var/home/core/zuul-output/0000755000175000017500000000000015110616121014517 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015110643435015473 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000006276747115110643425017722 0ustar rootrootNov 23 14:41:43 crc systemd[1]: Starting Kubernetes Kubelet... Nov 23 14:41:43 crc restorecon[4750]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 23 14:41:43 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 23 14:41:44 crc restorecon[4750]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 23 14:41:44 crc restorecon[4750]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 23 14:41:45 crc kubenswrapper[5050]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 23 14:41:45 crc kubenswrapper[5050]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 23 14:41:45 crc kubenswrapper[5050]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 23 14:41:45 crc kubenswrapper[5050]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 23 14:41:45 crc kubenswrapper[5050]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 23 14:41:45 crc kubenswrapper[5050]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.264078 5050 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.275565 5050 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.275976 5050 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.276093 5050 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.276189 5050 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.276278 5050 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.276366 5050 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.276569 5050 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.276692 5050 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.276810 5050 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.276930 5050 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.277049 5050 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.277164 5050 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.277296 5050 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.277408 5050 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.277592 5050 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.277701 5050 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.277815 5050 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.277912 5050 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.278013 5050 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.278106 5050 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.278194 5050 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.278281 5050 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.278385 5050 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.278515 5050 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.278613 5050 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.278755 5050 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.278859 5050 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.278967 5050 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279059 5050 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279148 5050 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279236 5050 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279335 5050 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279426 5050 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279582 5050 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279607 5050 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279621 5050 feature_gate.go:330] unrecognized feature gate: Example Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279632 5050 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279643 5050 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279653 5050 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279669 5050 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279682 5050 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279693 5050 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279704 5050 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279714 5050 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279726 5050 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279736 5050 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279745 5050 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279755 5050 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279765 5050 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279775 5050 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279785 5050 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279796 5050 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279805 5050 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279815 5050 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279825 5050 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279835 5050 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279845 5050 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279859 5050 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279874 5050 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279886 5050 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279897 5050 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279908 5050 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279919 5050 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279930 5050 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279940 5050 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279954 5050 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279964 5050 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279974 5050 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279984 5050 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.279994 5050 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.280005 5050 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.280924 5050 flags.go:64] FLAG: --address="0.0.0.0" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.280960 5050 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.280982 5050 flags.go:64] FLAG: --anonymous-auth="true" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.280999 5050 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281017 5050 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281030 5050 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281047 5050 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281064 5050 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281078 5050 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281091 5050 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281104 5050 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281117 5050 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281129 5050 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281144 5050 flags.go:64] FLAG: --cgroup-root="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281156 5050 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281168 5050 flags.go:64] FLAG: --client-ca-file="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281180 5050 flags.go:64] FLAG: --cloud-config="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281191 5050 flags.go:64] FLAG: --cloud-provider="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281203 5050 flags.go:64] FLAG: --cluster-dns="[]" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281222 5050 flags.go:64] FLAG: --cluster-domain="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281234 5050 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281249 5050 flags.go:64] FLAG: --config-dir="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281261 5050 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281274 5050 flags.go:64] FLAG: --container-log-max-files="5" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281291 5050 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281303 5050 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281316 5050 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281329 5050 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281341 5050 flags.go:64] FLAG: --contention-profiling="false" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281353 5050 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281366 5050 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281380 5050 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281392 5050 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281407 5050 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281420 5050 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281432 5050 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281485 5050 flags.go:64] FLAG: --enable-load-reader="false" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281499 5050 flags.go:64] FLAG: --enable-server="true" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281511 5050 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281527 5050 flags.go:64] FLAG: --event-burst="100" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281540 5050 flags.go:64] FLAG: --event-qps="50" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281552 5050 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281565 5050 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281578 5050 flags.go:64] FLAG: --eviction-hard="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281594 5050 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281608 5050 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281620 5050 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281638 5050 flags.go:64] FLAG: --eviction-soft="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281651 5050 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281663 5050 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281676 5050 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281688 5050 flags.go:64] FLAG: --experimental-mounter-path="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281700 5050 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281712 5050 flags.go:64] FLAG: --fail-swap-on="true" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281724 5050 flags.go:64] FLAG: --feature-gates="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281740 5050 flags.go:64] FLAG: --file-check-frequency="20s" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281753 5050 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281768 5050 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281781 5050 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281794 5050 flags.go:64] FLAG: --healthz-port="10248" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281807 5050 flags.go:64] FLAG: --help="false" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281819 5050 flags.go:64] FLAG: --hostname-override="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281832 5050 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281845 5050 flags.go:64] FLAG: --http-check-frequency="20s" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281860 5050 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281873 5050 flags.go:64] FLAG: --image-credential-provider-config="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281884 5050 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281896 5050 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281908 5050 flags.go:64] FLAG: --image-service-endpoint="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281921 5050 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281933 5050 flags.go:64] FLAG: --kube-api-burst="100" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281945 5050 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281959 5050 flags.go:64] FLAG: --kube-api-qps="50" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281971 5050 flags.go:64] FLAG: --kube-reserved="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281984 5050 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.281996 5050 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282008 5050 flags.go:64] FLAG: --kubelet-cgroups="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282021 5050 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282033 5050 flags.go:64] FLAG: --lock-file="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282045 5050 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282057 5050 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282070 5050 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282106 5050 flags.go:64] FLAG: --log-json-split-stream="false" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282124 5050 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282136 5050 flags.go:64] FLAG: --log-text-split-stream="false" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282148 5050 flags.go:64] FLAG: --logging-format="text" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282160 5050 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282173 5050 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282186 5050 flags.go:64] FLAG: --manifest-url="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282199 5050 flags.go:64] FLAG: --manifest-url-header="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282215 5050 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282227 5050 flags.go:64] FLAG: --max-open-files="1000000" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282244 5050 flags.go:64] FLAG: --max-pods="110" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282257 5050 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282270 5050 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282284 5050 flags.go:64] FLAG: --memory-manager-policy="None" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282298 5050 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282312 5050 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282325 5050 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282337 5050 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282370 5050 flags.go:64] FLAG: --node-status-max-images="50" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282383 5050 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282395 5050 flags.go:64] FLAG: --oom-score-adj="-999" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282408 5050 flags.go:64] FLAG: --pod-cidr="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282419 5050 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282474 5050 flags.go:64] FLAG: --pod-manifest-path="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282489 5050 flags.go:64] FLAG: --pod-max-pids="-1" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282501 5050 flags.go:64] FLAG: --pods-per-core="0" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282513 5050 flags.go:64] FLAG: --port="10250" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282526 5050 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282538 5050 flags.go:64] FLAG: --provider-id="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282550 5050 flags.go:64] FLAG: --qos-reserved="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282562 5050 flags.go:64] FLAG: --read-only-port="10255" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282575 5050 flags.go:64] FLAG: --register-node="true" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282587 5050 flags.go:64] FLAG: --register-schedulable="true" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282598 5050 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282622 5050 flags.go:64] FLAG: --registry-burst="10" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282634 5050 flags.go:64] FLAG: --registry-qps="5" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282646 5050 flags.go:64] FLAG: --reserved-cpus="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282661 5050 flags.go:64] FLAG: --reserved-memory="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282678 5050 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282727 5050 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282740 5050 flags.go:64] FLAG: --rotate-certificates="false" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282752 5050 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282764 5050 flags.go:64] FLAG: --runonce="false" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282776 5050 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282788 5050 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282802 5050 flags.go:64] FLAG: --seccomp-default="false" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282814 5050 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282827 5050 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282840 5050 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282853 5050 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282866 5050 flags.go:64] FLAG: --storage-driver-password="root" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282878 5050 flags.go:64] FLAG: --storage-driver-secure="false" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282891 5050 flags.go:64] FLAG: --storage-driver-table="stats" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282902 5050 flags.go:64] FLAG: --storage-driver-user="root" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282913 5050 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282925 5050 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282938 5050 flags.go:64] FLAG: --system-cgroups="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282949 5050 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282969 5050 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282981 5050 flags.go:64] FLAG: --tls-cert-file="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.282993 5050 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.283011 5050 flags.go:64] FLAG: --tls-min-version="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.283023 5050 flags.go:64] FLAG: --tls-private-key-file="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.283035 5050 flags.go:64] FLAG: --topology-manager-policy="none" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.283047 5050 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.283059 5050 flags.go:64] FLAG: --topology-manager-scope="container" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.283072 5050 flags.go:64] FLAG: --v="2" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.283088 5050 flags.go:64] FLAG: --version="false" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.283105 5050 flags.go:64] FLAG: --vmodule="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.283120 5050 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.283134 5050 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283436 5050 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283488 5050 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283518 5050 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283530 5050 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283541 5050 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283552 5050 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283564 5050 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283580 5050 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283596 5050 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283609 5050 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283621 5050 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283634 5050 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283646 5050 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283658 5050 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283670 5050 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283681 5050 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283691 5050 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283701 5050 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283711 5050 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283720 5050 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283732 5050 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283743 5050 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283753 5050 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283764 5050 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283778 5050 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283792 5050 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283803 5050 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283814 5050 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283827 5050 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283840 5050 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283852 5050 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283863 5050 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283874 5050 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283886 5050 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283897 5050 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283909 5050 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283919 5050 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283930 5050 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283957 5050 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283971 5050 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283985 5050 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.283998 5050 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284009 5050 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284022 5050 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284033 5050 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284043 5050 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284054 5050 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284065 5050 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284075 5050 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284087 5050 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284097 5050 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284107 5050 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284117 5050 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284127 5050 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284137 5050 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284147 5050 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284157 5050 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284166 5050 feature_gate.go:330] unrecognized feature gate: Example Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284176 5050 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284186 5050 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284195 5050 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284205 5050 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284215 5050 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284224 5050 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284234 5050 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284244 5050 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284253 5050 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284262 5050 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284272 5050 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284282 5050 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.284293 5050 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.284311 5050 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.300273 5050 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.300970 5050 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301155 5050 feature_gate.go:330] unrecognized feature gate: Example Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301175 5050 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301187 5050 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301198 5050 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301209 5050 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301219 5050 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301229 5050 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301241 5050 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301251 5050 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301261 5050 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301270 5050 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301280 5050 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301289 5050 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301299 5050 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301309 5050 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301319 5050 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301329 5050 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301339 5050 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301349 5050 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301358 5050 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301369 5050 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301379 5050 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301390 5050 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301400 5050 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301410 5050 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301420 5050 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301429 5050 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301465 5050 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301475 5050 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301489 5050 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301686 5050 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301698 5050 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301709 5050 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301720 5050 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301733 5050 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301745 5050 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301756 5050 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301771 5050 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301785 5050 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301795 5050 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301805 5050 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301815 5050 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301825 5050 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301835 5050 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301845 5050 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301856 5050 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301865 5050 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301876 5050 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301889 5050 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301902 5050 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301915 5050 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301927 5050 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301938 5050 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301950 5050 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301960 5050 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301970 5050 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301986 5050 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.301996 5050 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302007 5050 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302018 5050 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302028 5050 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302042 5050 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302055 5050 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302067 5050 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302078 5050 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302090 5050 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302100 5050 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302112 5050 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302123 5050 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302134 5050 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302147 5050 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.302165 5050 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302545 5050 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302563 5050 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302575 5050 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302585 5050 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302595 5050 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302606 5050 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302616 5050 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302627 5050 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302637 5050 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302647 5050 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302657 5050 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302667 5050 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302679 5050 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302691 5050 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302737 5050 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302750 5050 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302762 5050 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302776 5050 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302787 5050 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302798 5050 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302808 5050 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302819 5050 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302829 5050 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302839 5050 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302849 5050 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302859 5050 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302868 5050 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302878 5050 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302888 5050 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302899 5050 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302910 5050 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302920 5050 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302930 5050 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302940 5050 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302953 5050 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302963 5050 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302973 5050 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302983 5050 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.302993 5050 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303003 5050 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303012 5050 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303022 5050 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303032 5050 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303043 5050 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303052 5050 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303062 5050 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303077 5050 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303091 5050 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303102 5050 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303115 5050 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303128 5050 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303139 5050 feature_gate.go:330] unrecognized feature gate: Example Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303150 5050 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303160 5050 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303171 5050 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303185 5050 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303201 5050 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303216 5050 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303227 5050 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303240 5050 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303253 5050 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303265 5050 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303277 5050 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303288 5050 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303311 5050 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303322 5050 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303333 5050 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303343 5050 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303354 5050 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303364 5050 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.303377 5050 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.303395 5050 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.303821 5050 server.go:940] "Client rotation is on, will bootstrap in background" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.311385 5050 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.311599 5050 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.314300 5050 server.go:997] "Starting client certificate rotation" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.314349 5050 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.314602 5050 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2026-01-04 01:24:29.316970425 +0000 UTC Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.314769 5050 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 994h42m44.002206986s for next certificate rotation Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.346806 5050 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.350267 5050 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.370952 5050 log.go:25] "Validated CRI v1 runtime API" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.411059 5050 log.go:25] "Validated CRI v1 image API" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.413393 5050 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.421664 5050 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-23-14-36-53-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.421717 5050 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.447667 5050 manager.go:217] Machine: {Timestamp:2025-11-23 14:41:45.444251463 +0000 UTC m=+0.611247968 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654132736 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:6e38a339-19e4-4f40-bd49-1fbe05dbb3f1 BootID:bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2 Filesystems:[{Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730829824 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827068416 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:74:2b:75 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:74:2b:75 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:59:e6:7f Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:4c:c2:97 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:a1:a0:d5 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:d6:26:df Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:17:46:6c Speed:-1 Mtu:1496} {Name:eth10 MacAddress:3a:cd:2b:c2:56:49 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:16:b6:d4:56:93:06 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654132736 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.448056 5050 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.448323 5050 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.451476 5050 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.451834 5050 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.451913 5050 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.452241 5050 topology_manager.go:138] "Creating topology manager with none policy" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.452259 5050 container_manager_linux.go:303] "Creating device plugin manager" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.452897 5050 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.454369 5050 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.454671 5050 state_mem.go:36] "Initialized new in-memory state store" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.454870 5050 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.461383 5050 kubelet.go:418] "Attempting to sync node with API server" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.461435 5050 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.461514 5050 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.461538 5050 kubelet.go:324] "Adding apiserver pod source" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.461558 5050 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.468188 5050 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.469042 5050 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.32:6443: connect: connection refused Nov 23 14:41:45 crc kubenswrapper[5050]: E1123 14:41:45.469189 5050 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.32:6443: connect: connection refused" logger="UnhandledError" Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.469318 5050 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.32:6443: connect: connection refused Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.469488 5050 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 23 14:41:45 crc kubenswrapper[5050]: E1123 14:41:45.469487 5050 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.32:6443: connect: connection refused" logger="UnhandledError" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.471125 5050 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.473812 5050 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.473857 5050 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.473869 5050 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.473880 5050 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.473895 5050 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.473904 5050 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.473914 5050 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.473928 5050 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.473940 5050 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.473950 5050 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.473982 5050 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.473992 5050 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.476133 5050 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.476853 5050 server.go:1280] "Started kubelet" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.478337 5050 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.478347 5050 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 23 14:41:45 crc systemd[1]: Started Kubernetes Kubelet. Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.486813 5050 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.486867 5050 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.487396 5050 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.479194 5050 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.32:6443: connect: connection refused Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.487774 5050 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 05:12:28.987817735 +0000 UTC Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.487856 5050 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 1166h30m43.499967505s for next certificate rotation Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.488126 5050 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.488176 5050 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 23 14:41:45 crc kubenswrapper[5050]: E1123 14:41:45.488158 5050 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.488399 5050 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.489253 5050 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.32:6443: connect: connection refused Nov 23 14:41:45 crc kubenswrapper[5050]: E1123 14:41:45.489357 5050 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.32:6443: connect: connection refused" logger="UnhandledError" Nov 23 14:41:45 crc kubenswrapper[5050]: E1123 14:41:45.490093 5050 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.32:6443: connect: connection refused" interval="200ms" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.490996 5050 factory.go:55] Registering systemd factory Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.491298 5050 factory.go:221] Registration of the systemd container factory successfully Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.491811 5050 factory.go:153] Registering CRI-O factory Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.491851 5050 factory.go:221] Registration of the crio container factory successfully Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.492014 5050 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.492076 5050 factory.go:103] Registering Raw factory Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.492105 5050 manager.go:1196] Started watching for new ooms in manager Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.493324 5050 manager.go:319] Starting recovery of all containers Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.495178 5050 server.go:460] "Adding debug handlers to kubelet server" Nov 23 14:41:45 crc kubenswrapper[5050]: E1123 14:41:45.499716 5050 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.32:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187aa9cdcf17e67f default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-23 14:41:45.476810367 +0000 UTC m=+0.643806862,LastTimestamp:2025-11-23 14:41:45.476810367 +0000 UTC m=+0.643806862,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.504948 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505060 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505085 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505103 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505118 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505136 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505160 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505178 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505203 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505222 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505246 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505265 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505282 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505302 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505321 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505342 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505361 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505381 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505400 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505421 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505439 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505491 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505513 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505536 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505565 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505584 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505611 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505632 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505650 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505668 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505727 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505749 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505773 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505795 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505816 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505840 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505861 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505881 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505901 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505922 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.505994 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506022 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506042 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506062 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506082 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506104 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506126 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506146 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506167 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506190 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506209 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506229 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506258 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506280 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506301 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506322 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506341 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506358 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506376 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506393 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506415 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506436 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506483 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506505 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506526 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506544 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506569 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506584 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506601 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506649 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506663 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506677 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506695 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506710 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506724 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506738 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.506753 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509233 5050 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509292 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509311 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509327 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509344 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509363 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509378 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509393 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509407 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509422 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509439 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509483 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509500 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509518 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509539 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509561 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509582 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509597 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509612 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509626 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509640 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509654 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509674 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509695 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509711 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509726 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509739 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509755 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509782 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509801 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509823 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509844 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509882 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509902 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509923 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509939 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509955 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509974 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.509994 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.510013 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.510031 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.510051 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.510069 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.510090 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.510108 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.510124 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.510138 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.512786 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.512861 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.512896 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.512929 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.512958 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.512987 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513025 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513059 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513086 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513117 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513145 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513176 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513209 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513234 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513257 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513283 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513310 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513342 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513376 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513407 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513437 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513504 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513535 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513564 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513591 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513624 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513664 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513694 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513724 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513747 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513769 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513887 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513911 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513933 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513955 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513976 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.513999 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514028 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514064 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514094 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514124 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514157 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514188 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514218 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514251 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514274 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514296 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514319 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514348 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514375 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514404 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514426 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514553 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514592 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514621 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514653 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514684 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514712 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514745 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514777 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514845 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514878 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514913 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514943 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514974 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.514999 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.515020 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.515042 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.515064 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.515085 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.515114 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.515147 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.515176 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.515206 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.515235 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.515257 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.515281 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.515303 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.515324 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.515345 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.515367 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.515389 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.515427 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.515485 5050 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.515513 5050 reconstruct.go:97] "Volume reconstruction finished" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.515532 5050 reconciler.go:26] "Reconciler: start to sync state" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.532690 5050 manager.go:324] Recovery completed Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.544072 5050 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.546951 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.547189 5050 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.547234 5050 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.547268 5050 kubelet.go:2335] "Starting kubelet main sync loop" Nov 23 14:41:45 crc kubenswrapper[5050]: E1123 14:41:45.547320 5050 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 23 14:41:45 crc kubenswrapper[5050]: W1123 14:41:45.548912 5050 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.32:6443: connect: connection refused Nov 23 14:41:45 crc kubenswrapper[5050]: E1123 14:41:45.548974 5050 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.32:6443: connect: connection refused" logger="UnhandledError" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.549959 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.549995 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.550015 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.552638 5050 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.552680 5050 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.552703 5050 state_mem.go:36] "Initialized new in-memory state store" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.573429 5050 policy_none.go:49] "None policy: Start" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.575343 5050 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.575397 5050 state_mem.go:35] "Initializing new in-memory state store" Nov 23 14:41:45 crc kubenswrapper[5050]: E1123 14:41:45.589093 5050 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.631105 5050 manager.go:334] "Starting Device Plugin manager" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.631427 5050 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.631466 5050 server.go:79] "Starting device plugin registration server" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.631987 5050 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.632016 5050 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.632529 5050 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.632641 5050 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.632657 5050 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 23 14:41:45 crc kubenswrapper[5050]: E1123 14:41:45.640154 5050 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.647624 5050 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.647802 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.649427 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.649509 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.649526 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.649711 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.650161 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.650307 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.650700 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.650732 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.650743 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.650845 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.651069 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.651132 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.651879 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.651901 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.651913 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.651980 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.652033 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.652039 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.652048 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.652211 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.652244 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.652595 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.652744 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.652856 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.652612 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.652993 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.653005 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.652972 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.653162 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.653172 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.653181 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.653648 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.653737 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.654229 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.654262 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.654276 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.654485 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.654516 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.655416 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.655490 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.655510 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.655857 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.655894 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.655988 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:45 crc kubenswrapper[5050]: E1123 14:41:45.691100 5050 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.32:6443: connect: connection refused" interval="400ms" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.717912 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.717973 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.717995 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.718018 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.718116 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.718225 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.718299 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.718344 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.718414 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.718439 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.718476 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.718537 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.718661 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.718730 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.718780 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.732707 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.739085 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.739242 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.739258 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.739298 5050 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 23 14:41:45 crc kubenswrapper[5050]: E1123 14:41:45.739943 5050 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.32:6443: connect: connection refused" node="crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.820719 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.821234 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.821283 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.821312 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.821368 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.820989 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.821324 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.821483 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.821548 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.821615 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.821633 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.821663 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.821688 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.821711 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.821744 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.821767 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.821763 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.821788 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.821813 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.821827 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.821846 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.821804 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.821886 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.821883 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.821888 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.821922 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.821837 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.822001 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.822030 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.822186 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.940620 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.942356 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.942407 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.942426 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.942495 5050 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 23 14:41:45 crc kubenswrapper[5050]: E1123 14:41:45.943061 5050 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.32:6443: connect: connection refused" node="crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.975923 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 23 14:41:45 crc kubenswrapper[5050]: I1123 14:41:45.984887 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 23 14:41:46 crc kubenswrapper[5050]: I1123 14:41:46.005096 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 23 14:41:46 crc kubenswrapper[5050]: I1123 14:41:46.027468 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 23 14:41:46 crc kubenswrapper[5050]: W1123 14:41:46.032738 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-fb6f75d79687ca9925676354f3bed0aa211f5a7afb5bd62cf6c30b6a15b49831 WatchSource:0}: Error finding container fb6f75d79687ca9925676354f3bed0aa211f5a7afb5bd62cf6c30b6a15b49831: Status 404 returned error can't find the container with id fb6f75d79687ca9925676354f3bed0aa211f5a7afb5bd62cf6c30b6a15b49831 Nov 23 14:41:46 crc kubenswrapper[5050]: W1123 14:41:46.033215 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-0299b69fce325c5be8c08e2b9680a4ecd974815cec73a1772147827f14313640 WatchSource:0}: Error finding container 0299b69fce325c5be8c08e2b9680a4ecd974815cec73a1772147827f14313640: Status 404 returned error can't find the container with id 0299b69fce325c5be8c08e2b9680a4ecd974815cec73a1772147827f14313640 Nov 23 14:41:46 crc kubenswrapper[5050]: I1123 14:41:46.035697 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 23 14:41:46 crc kubenswrapper[5050]: W1123 14:41:46.040780 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-eaa353c9b8a5f6d6201cb331ddab03816ea3c62cd41c374f634bcdf7486aad69 WatchSource:0}: Error finding container eaa353c9b8a5f6d6201cb331ddab03816ea3c62cd41c374f634bcdf7486aad69: Status 404 returned error can't find the container with id eaa353c9b8a5f6d6201cb331ddab03816ea3c62cd41c374f634bcdf7486aad69 Nov 23 14:41:46 crc kubenswrapper[5050]: W1123 14:41:46.055957 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-1eac17de3f6782d9c50b720db8916f0e0e003378e54d46054184387c465869f8 WatchSource:0}: Error finding container 1eac17de3f6782d9c50b720db8916f0e0e003378e54d46054184387c465869f8: Status 404 returned error can't find the container with id 1eac17de3f6782d9c50b720db8916f0e0e003378e54d46054184387c465869f8 Nov 23 14:41:46 crc kubenswrapper[5050]: E1123 14:41:46.092649 5050 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.32:6443: connect: connection refused" interval="800ms" Nov 23 14:41:46 crc kubenswrapper[5050]: W1123 14:41:46.302891 5050 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.32:6443: connect: connection refused Nov 23 14:41:46 crc kubenswrapper[5050]: E1123 14:41:46.303007 5050 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.32:6443: connect: connection refused" logger="UnhandledError" Nov 23 14:41:46 crc kubenswrapper[5050]: I1123 14:41:46.343980 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:46 crc kubenswrapper[5050]: I1123 14:41:46.345597 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:46 crc kubenswrapper[5050]: I1123 14:41:46.345662 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:46 crc kubenswrapper[5050]: I1123 14:41:46.345679 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:46 crc kubenswrapper[5050]: I1123 14:41:46.345722 5050 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 23 14:41:46 crc kubenswrapper[5050]: E1123 14:41:46.346366 5050 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.32:6443: connect: connection refused" node="crc" Nov 23 14:41:46 crc kubenswrapper[5050]: I1123 14:41:46.489267 5050 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.32:6443: connect: connection refused Nov 23 14:41:46 crc kubenswrapper[5050]: W1123 14:41:46.506974 5050 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.32:6443: connect: connection refused Nov 23 14:41:46 crc kubenswrapper[5050]: E1123 14:41:46.507090 5050 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.32:6443: connect: connection refused" logger="UnhandledError" Nov 23 14:41:46 crc kubenswrapper[5050]: I1123 14:41:46.554077 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"1eac17de3f6782d9c50b720db8916f0e0e003378e54d46054184387c465869f8"} Nov 23 14:41:46 crc kubenswrapper[5050]: I1123 14:41:46.555875 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"388de9d649fb1e43db6858301c8108079f809458f2ce32eda7c33d09d77c4c66"} Nov 23 14:41:46 crc kubenswrapper[5050]: I1123 14:41:46.557021 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"eaa353c9b8a5f6d6201cb331ddab03816ea3c62cd41c374f634bcdf7486aad69"} Nov 23 14:41:46 crc kubenswrapper[5050]: I1123 14:41:46.558402 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"0299b69fce325c5be8c08e2b9680a4ecd974815cec73a1772147827f14313640"} Nov 23 14:41:46 crc kubenswrapper[5050]: I1123 14:41:46.560781 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"fb6f75d79687ca9925676354f3bed0aa211f5a7afb5bd62cf6c30b6a15b49831"} Nov 23 14:41:46 crc kubenswrapper[5050]: W1123 14:41:46.589672 5050 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.32:6443: connect: connection refused Nov 23 14:41:46 crc kubenswrapper[5050]: E1123 14:41:46.589782 5050 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.32:6443: connect: connection refused" logger="UnhandledError" Nov 23 14:41:46 crc kubenswrapper[5050]: E1123 14:41:46.893852 5050 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.32:6443: connect: connection refused" interval="1.6s" Nov 23 14:41:46 crc kubenswrapper[5050]: W1123 14:41:46.926800 5050 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.32:6443: connect: connection refused Nov 23 14:41:46 crc kubenswrapper[5050]: E1123 14:41:46.926914 5050 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.32:6443: connect: connection refused" logger="UnhandledError" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.147492 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.149216 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.149283 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.149298 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.149342 5050 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 23 14:41:47 crc kubenswrapper[5050]: E1123 14:41:47.150011 5050 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.32:6443: connect: connection refused" node="crc" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.488873 5050 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.32:6443: connect: connection refused Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.568148 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140"} Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.568232 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.568255 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8"} Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.568282 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798"} Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.568304 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456"} Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.569404 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.569471 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.569480 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.571344 5050 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="c08fa32aae98e7b7046d01d9b2a5ed244b6982aba34db8397e57dad1e08d571e" exitCode=0 Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.571466 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"c08fa32aae98e7b7046d01d9b2a5ed244b6982aba34db8397e57dad1e08d571e"} Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.571549 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.573454 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.573493 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.573506 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.577239 5050 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="ced1c27a73cf758143ad23a5852d2211c66925e2c429b91954344f6d78ad5ffd" exitCode=0 Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.577384 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"ced1c27a73cf758143ad23a5852d2211c66925e2c429b91954344f6d78ad5ffd"} Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.577601 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.579365 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.579399 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.579410 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.580566 5050 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="3e2428cf4682b20bc4bfea576e90dcfd5de93218b167a7d27e9c6af5496a7fd8" exitCode=0 Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.580737 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.580728 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"3e2428cf4682b20bc4bfea576e90dcfd5de93218b167a7d27e9c6af5496a7fd8"} Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.582338 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.582407 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.582438 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.584311 5050 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c" exitCode=0 Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.584384 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c"} Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.584561 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.586095 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.586152 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.586172 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.588743 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.589824 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.589868 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:47 crc kubenswrapper[5050]: I1123 14:41:47.589884 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.489235 5050 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.32:6443: connect: connection refused Nov 23 14:41:48 crc kubenswrapper[5050]: E1123 14:41:48.494840 5050 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.32:6443: connect: connection refused" interval="3.2s" Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.591344 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"0e3f802f93600b6a56df2a1709f042c01893f2d619ae5d173a014975b1d33d8d"} Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.591503 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.592980 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.593021 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.593034 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.597157 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7"} Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.597210 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c"} Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.597229 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b"} Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.597243 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0"} Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.601130 5050 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="8fbecb8ad747ae37afc9903477aed58b6f1781df2a0a05a89844a65afe1e8db1" exitCode=0 Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.601249 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.601267 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"8fbecb8ad747ae37afc9903477aed58b6f1781df2a0a05a89844a65afe1e8db1"} Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.602247 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.602291 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.602307 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.606879 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.607034 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.607279 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"60d5ec902c3bb132cd6e58cfbb8e7fdbaf26f34ca50654f9bd028dbb05d7aaec"} Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.607350 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"65320ae48f1453657b5ff41429b099e308d536f2b7e176941a1f258e77922eeb"} Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.607377 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"7eb678421bc625e01448e7eeb5914cc18880228c6d483129f5e0565dc2d00e49"} Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.608129 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.608197 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.608214 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.608577 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.608618 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.608652 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:48 crc kubenswrapper[5050]: W1123 14:41:48.686952 5050 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.32:6443: connect: connection refused Nov 23 14:41:48 crc kubenswrapper[5050]: E1123 14:41:48.687124 5050 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.32:6443: connect: connection refused" logger="UnhandledError" Nov 23 14:41:48 crc kubenswrapper[5050]: W1123 14:41:48.729392 5050 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.32:6443: connect: connection refused Nov 23 14:41:48 crc kubenswrapper[5050]: E1123 14:41:48.729511 5050 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.32:6443: connect: connection refused" logger="UnhandledError" Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.750915 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.752430 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.752524 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.752540 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.752573 5050 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 23 14:41:48 crc kubenswrapper[5050]: E1123 14:41:48.753185 5050 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.32:6443: connect: connection refused" node="crc" Nov 23 14:41:48 crc kubenswrapper[5050]: I1123 14:41:48.799253 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 23 14:41:49 crc kubenswrapper[5050]: W1123 14:41:49.098605 5050 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.32:6443: connect: connection refused Nov 23 14:41:49 crc kubenswrapper[5050]: E1123 14:41:49.098958 5050 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.32:6443: connect: connection refused" logger="UnhandledError" Nov 23 14:41:49 crc kubenswrapper[5050]: I1123 14:41:49.616711 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"add979609b9ecf2a5fa0f8a3cbce096448f38cbbc808342bb55c9b05ad846c6a"} Nov 23 14:41:49 crc kubenswrapper[5050]: I1123 14:41:49.616951 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:49 crc kubenswrapper[5050]: I1123 14:41:49.618919 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:49 crc kubenswrapper[5050]: I1123 14:41:49.618993 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:49 crc kubenswrapper[5050]: I1123 14:41:49.619013 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:49 crc kubenswrapper[5050]: I1123 14:41:49.621197 5050 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="7730abc79341516bfd6ab720c24b63dde6811f5cb55ffcb97d539798919316e0" exitCode=0 Nov 23 14:41:49 crc kubenswrapper[5050]: I1123 14:41:49.621358 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:49 crc kubenswrapper[5050]: I1123 14:41:49.621382 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"7730abc79341516bfd6ab720c24b63dde6811f5cb55ffcb97d539798919316e0"} Nov 23 14:41:49 crc kubenswrapper[5050]: I1123 14:41:49.621435 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:49 crc kubenswrapper[5050]: I1123 14:41:49.621514 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:49 crc kubenswrapper[5050]: I1123 14:41:49.621364 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:49 crc kubenswrapper[5050]: I1123 14:41:49.621438 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 23 14:41:49 crc kubenswrapper[5050]: I1123 14:41:49.623244 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:49 crc kubenswrapper[5050]: I1123 14:41:49.623286 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:49 crc kubenswrapper[5050]: I1123 14:41:49.623296 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:49 crc kubenswrapper[5050]: I1123 14:41:49.623481 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:49 crc kubenswrapper[5050]: I1123 14:41:49.623520 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:49 crc kubenswrapper[5050]: I1123 14:41:49.623543 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:49 crc kubenswrapper[5050]: I1123 14:41:49.623731 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:49 crc kubenswrapper[5050]: I1123 14:41:49.623817 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:49 crc kubenswrapper[5050]: I1123 14:41:49.623777 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:49 crc kubenswrapper[5050]: I1123 14:41:49.623857 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:49 crc kubenswrapper[5050]: I1123 14:41:49.623891 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:49 crc kubenswrapper[5050]: I1123 14:41:49.623921 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:50 crc kubenswrapper[5050]: I1123 14:41:50.020154 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 23 14:41:50 crc kubenswrapper[5050]: I1123 14:41:50.407366 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 23 14:41:50 crc kubenswrapper[5050]: I1123 14:41:50.417238 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 23 14:41:50 crc kubenswrapper[5050]: I1123 14:41:50.630585 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:50 crc kubenswrapper[5050]: I1123 14:41:50.630803 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"9ef767626bb19848d65f0ab4784ab89c6dd01e946bb44194657ddcb916428493"} Nov 23 14:41:50 crc kubenswrapper[5050]: I1123 14:41:50.630896 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"9f37b90309df2f4e858d172ffe27b40646f255748687b32f477f567c4d1a43af"} Nov 23 14:41:50 crc kubenswrapper[5050]: I1123 14:41:50.630925 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"0d2753e992e9f888af80f252d2bf30cdcf3133189be578613fd4ecb36f6e9309"} Nov 23 14:41:50 crc kubenswrapper[5050]: I1123 14:41:50.630938 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:50 crc kubenswrapper[5050]: I1123 14:41:50.631063 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:50 crc kubenswrapper[5050]: I1123 14:41:50.631175 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 23 14:41:50 crc kubenswrapper[5050]: I1123 14:41:50.631624 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:50 crc kubenswrapper[5050]: I1123 14:41:50.631651 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:50 crc kubenswrapper[5050]: I1123 14:41:50.631662 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:50 crc kubenswrapper[5050]: I1123 14:41:50.632202 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:50 crc kubenswrapper[5050]: I1123 14:41:50.632274 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:50 crc kubenswrapper[5050]: I1123 14:41:50.632306 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:50 crc kubenswrapper[5050]: I1123 14:41:50.632425 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:50 crc kubenswrapper[5050]: I1123 14:41:50.632471 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:50 crc kubenswrapper[5050]: I1123 14:41:50.632484 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:51 crc kubenswrapper[5050]: I1123 14:41:51.641283 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ce02806c5da3f21a6a9eaca7cd3ddf730da45c674f635620974ad72234dd73e5"} Nov 23 14:41:51 crc kubenswrapper[5050]: I1123 14:41:51.641385 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:51 crc kubenswrapper[5050]: I1123 14:41:51.641515 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:51 crc kubenswrapper[5050]: I1123 14:41:51.641399 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"47aaa5eaeda38a95ff67a26db661cf6b8fc449310817e4c5846cbfa6af92e6e1"} Nov 23 14:41:51 crc kubenswrapper[5050]: I1123 14:41:51.642773 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:51 crc kubenswrapper[5050]: I1123 14:41:51.644204 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:51 crc kubenswrapper[5050]: I1123 14:41:51.644233 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:51 crc kubenswrapper[5050]: I1123 14:41:51.644263 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:51 crc kubenswrapper[5050]: I1123 14:41:51.644274 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:51 crc kubenswrapper[5050]: I1123 14:41:51.644280 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:51 crc kubenswrapper[5050]: I1123 14:41:51.644300 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:51 crc kubenswrapper[5050]: I1123 14:41:51.645142 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:51 crc kubenswrapper[5050]: I1123 14:41:51.645185 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:51 crc kubenswrapper[5050]: I1123 14:41:51.645195 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:51 crc kubenswrapper[5050]: I1123 14:41:51.954379 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:51 crc kubenswrapper[5050]: I1123 14:41:51.956620 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:51 crc kubenswrapper[5050]: I1123 14:41:51.956701 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:51 crc kubenswrapper[5050]: I1123 14:41:51.956724 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:51 crc kubenswrapper[5050]: I1123 14:41:51.956768 5050 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 23 14:41:52 crc kubenswrapper[5050]: I1123 14:41:52.644815 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:52 crc kubenswrapper[5050]: I1123 14:41:52.646142 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:52 crc kubenswrapper[5050]: I1123 14:41:52.646192 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:52 crc kubenswrapper[5050]: I1123 14:41:52.646220 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:53 crc kubenswrapper[5050]: I1123 14:41:53.427774 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 23 14:41:53 crc kubenswrapper[5050]: I1123 14:41:53.428154 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:53 crc kubenswrapper[5050]: I1123 14:41:53.430493 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:53 crc kubenswrapper[5050]: I1123 14:41:53.430564 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:53 crc kubenswrapper[5050]: I1123 14:41:53.430589 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:53 crc kubenswrapper[5050]: I1123 14:41:53.621000 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 23 14:41:53 crc kubenswrapper[5050]: I1123 14:41:53.621296 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:53 crc kubenswrapper[5050]: I1123 14:41:53.623568 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:53 crc kubenswrapper[5050]: I1123 14:41:53.623642 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:53 crc kubenswrapper[5050]: I1123 14:41:53.623664 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:54 crc kubenswrapper[5050]: I1123 14:41:54.521529 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 23 14:41:54 crc kubenswrapper[5050]: I1123 14:41:54.521738 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:54 crc kubenswrapper[5050]: I1123 14:41:54.523182 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:54 crc kubenswrapper[5050]: I1123 14:41:54.523220 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:54 crc kubenswrapper[5050]: I1123 14:41:54.523235 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:55 crc kubenswrapper[5050]: I1123 14:41:55.589737 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 23 14:41:55 crc kubenswrapper[5050]: I1123 14:41:55.590707 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:55 crc kubenswrapper[5050]: I1123 14:41:55.595363 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:55 crc kubenswrapper[5050]: I1123 14:41:55.595421 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:55 crc kubenswrapper[5050]: I1123 14:41:55.595434 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:55 crc kubenswrapper[5050]: E1123 14:41:55.640418 5050 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 23 14:41:55 crc kubenswrapper[5050]: I1123 14:41:55.678494 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 23 14:41:55 crc kubenswrapper[5050]: I1123 14:41:55.678728 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:55 crc kubenswrapper[5050]: I1123 14:41:55.679889 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:55 crc kubenswrapper[5050]: I1123 14:41:55.679966 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:55 crc kubenswrapper[5050]: I1123 14:41:55.679991 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:58 crc kubenswrapper[5050]: I1123 14:41:58.678707 5050 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 23 14:41:58 crc kubenswrapper[5050]: I1123 14:41:58.678862 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 23 14:41:58 crc kubenswrapper[5050]: I1123 14:41:58.809336 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 23 14:41:58 crc kubenswrapper[5050]: I1123 14:41:58.809534 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:58 crc kubenswrapper[5050]: I1123 14:41:58.810793 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:58 crc kubenswrapper[5050]: I1123 14:41:58.810827 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:58 crc kubenswrapper[5050]: I1123 14:41:58.810838 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:59 crc kubenswrapper[5050]: I1123 14:41:59.247125 5050 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:51716->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 23 14:41:59 crc kubenswrapper[5050]: I1123 14:41:59.247191 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:51716->192.168.126.11:17697: read: connection reset by peer" Nov 23 14:41:59 crc kubenswrapper[5050]: I1123 14:41:59.489724 5050 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 23 14:41:59 crc kubenswrapper[5050]: I1123 14:41:59.667764 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 23 14:41:59 crc kubenswrapper[5050]: I1123 14:41:59.669529 5050 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="add979609b9ecf2a5fa0f8a3cbce096448f38cbbc808342bb55c9b05ad846c6a" exitCode=255 Nov 23 14:41:59 crc kubenswrapper[5050]: I1123 14:41:59.669582 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"add979609b9ecf2a5fa0f8a3cbce096448f38cbbc808342bb55c9b05ad846c6a"} Nov 23 14:41:59 crc kubenswrapper[5050]: I1123 14:41:59.669860 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:41:59 crc kubenswrapper[5050]: I1123 14:41:59.671322 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:41:59 crc kubenswrapper[5050]: I1123 14:41:59.671351 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:41:59 crc kubenswrapper[5050]: I1123 14:41:59.671360 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:41:59 crc kubenswrapper[5050]: I1123 14:41:59.671906 5050 scope.go:117] "RemoveContainer" containerID="add979609b9ecf2a5fa0f8a3cbce096448f38cbbc808342bb55c9b05ad846c6a" Nov 23 14:41:59 crc kubenswrapper[5050]: W1123 14:41:59.806274 5050 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 23 14:41:59 crc kubenswrapper[5050]: I1123 14:41:59.806378 5050 trace.go:236] Trace[1851507647]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (23-Nov-2025 14:41:49.804) (total time: 10002ms): Nov 23 14:41:59 crc kubenswrapper[5050]: Trace[1851507647]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10002ms (14:41:59.806) Nov 23 14:41:59 crc kubenswrapper[5050]: Trace[1851507647]: [10.002209207s] [10.002209207s] END Nov 23 14:41:59 crc kubenswrapper[5050]: E1123 14:41:59.806403 5050 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 23 14:42:00 crc kubenswrapper[5050]: I1123 14:42:00.020799 5050 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="Get \"https://192.168.126.11:6443/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 23 14:42:00 crc kubenswrapper[5050]: I1123 14:42:00.020880 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="Get \"https://192.168.126.11:6443/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 23 14:42:00 crc kubenswrapper[5050]: I1123 14:42:00.094817 5050 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 23 14:42:00 crc kubenswrapper[5050]: I1123 14:42:00.094907 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 23 14:42:00 crc kubenswrapper[5050]: I1123 14:42:00.674796 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 23 14:42:00 crc kubenswrapper[5050]: I1123 14:42:00.677151 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4"} Nov 23 14:42:00 crc kubenswrapper[5050]: I1123 14:42:00.677351 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:42:00 crc kubenswrapper[5050]: I1123 14:42:00.678598 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:00 crc kubenswrapper[5050]: I1123 14:42:00.678645 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:00 crc kubenswrapper[5050]: I1123 14:42:00.678658 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:03 crc kubenswrapper[5050]: I1123 14:42:03.165793 5050 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 23 14:42:04 crc kubenswrapper[5050]: I1123 14:42:04.556143 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 23 14:42:04 crc kubenswrapper[5050]: I1123 14:42:04.556438 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:42:04 crc kubenswrapper[5050]: I1123 14:42:04.558304 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:04 crc kubenswrapper[5050]: I1123 14:42:04.558355 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:04 crc kubenswrapper[5050]: I1123 14:42:04.558367 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:04 crc kubenswrapper[5050]: I1123 14:42:04.578868 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 23 14:42:04 crc kubenswrapper[5050]: I1123 14:42:04.690492 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:42:04 crc kubenswrapper[5050]: I1123 14:42:04.691746 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:04 crc kubenswrapper[5050]: I1123 14:42:04.691791 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:04 crc kubenswrapper[5050]: I1123 14:42:04.691802 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.024926 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.025093 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.025238 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.026646 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.026708 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.026723 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.028980 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 23 14:42:05 crc kubenswrapper[5050]: E1123 14:42:05.082234 5050 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.084659 5050 trace.go:236] Trace[375640897]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (23-Nov-2025 14:41:52.635) (total time: 12448ms): Nov 23 14:42:05 crc kubenswrapper[5050]: Trace[375640897]: ---"Objects listed" error: 12448ms (14:42:05.084) Nov 23 14:42:05 crc kubenswrapper[5050]: Trace[375640897]: [12.44875617s] [12.44875617s] END Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.084686 5050 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.086776 5050 trace.go:236] Trace[1284681532]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (23-Nov-2025 14:41:52.794) (total time: 12291ms): Nov 23 14:42:05 crc kubenswrapper[5050]: Trace[1284681532]: ---"Objects listed" error: 12291ms (14:42:05.086) Nov 23 14:42:05 crc kubenswrapper[5050]: Trace[1284681532]: [12.291936484s] [12.291936484s] END Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.086811 5050 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.087018 5050 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 23 14:42:05 crc kubenswrapper[5050]: E1123 14:42:05.087247 5050 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.087573 5050 trace.go:236] Trace[930062895]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (23-Nov-2025 14:41:53.288) (total time: 11798ms): Nov 23 14:42:05 crc kubenswrapper[5050]: Trace[930062895]: ---"Objects listed" error: 11798ms (14:42:05.087) Nov 23 14:42:05 crc kubenswrapper[5050]: Trace[930062895]: [11.798668443s] [11.798668443s] END Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.087619 5050 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.475422 5050 apiserver.go:52] "Watching apiserver" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.481217 5050 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.482209 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-7btqb","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf"] Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.482804 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.482899 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.482942 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:05 crc kubenswrapper[5050]: E1123 14:42:05.483198 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:05 crc kubenswrapper[5050]: E1123 14:42:05.483235 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.483304 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.483471 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.483606 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.483636 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-7btqb" Nov 23 14:42:05 crc kubenswrapper[5050]: E1123 14:42:05.483654 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.485888 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.485961 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.486072 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.486087 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.486717 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.486945 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.487099 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.487185 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.487404 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.487504 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.487555 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.487719 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.489093 5050 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.503693 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.519833 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.533046 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.548658 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.561776 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.577846 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.588712 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.588747 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.588767 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.588787 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.588807 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.588825 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.588841 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.588857 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.588902 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.588919 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.588937 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.588956 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589006 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589024 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589043 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589081 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589101 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589122 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589139 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589157 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589176 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589192 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589209 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589226 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589242 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589260 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589302 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589322 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589339 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589357 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589374 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589391 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589409 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589438 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589472 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589515 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589532 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589548 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589589 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589605 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589622 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589640 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589657 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589673 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589692 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589713 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589735 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589752 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589770 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589787 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589804 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589824 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589842 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589860 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589878 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589896 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589914 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589933 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589950 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589967 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589987 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590004 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590023 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590040 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590055 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590071 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590090 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590106 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590125 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590170 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590188 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590203 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590218 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590335 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590353 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590371 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590392 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590410 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590461 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590480 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590495 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590511 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590529 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590548 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590565 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590582 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590600 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590617 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590634 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590651 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590670 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590688 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590704 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590722 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590738 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590756 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590775 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590794 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590813 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590828 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590800 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590845 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591076 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591113 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591149 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591185 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591229 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591263 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591296 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591331 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591356 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591383 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591409 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591432 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591477 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591505 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591536 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591567 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591594 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591618 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591649 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591676 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591702 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591723 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591747 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591768 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591792 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591817 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591839 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589241 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591878 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591945 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591953 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589901 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590126 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590311 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.592028 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590373 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590584 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590647 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590924 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.590996 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.592110 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591005 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.592109 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591121 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591193 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591234 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591356 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591501 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591558 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591600 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591614 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591640 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591764 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.592342 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591855 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.592200 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.592304 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.592429 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.592494 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.603085 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.603527 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.604166 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.604201 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.604252 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.604396 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.604595 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.604575 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.604644 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.604833 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.604862 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.604880 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.605080 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.605101 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.605151 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.607956 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.608181 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.604869 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.608611 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.609135 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.609216 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.609503 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.609647 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.609884 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.610153 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.610535 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.615128 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.615637 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.615815 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.616569 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.615483 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.617099 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.617477 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.617573 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: E1123 14:42:05.617615 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:42:06.117579298 +0000 UTC m=+21.284575783 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.617651 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.618048 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.618066 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.618149 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.618239 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.618706 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.618838 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.618969 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.619353 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.619515 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.619656 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.621645 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.621932 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.622546 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.622754 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.622869 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.623205 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.623260 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.623505 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.623647 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.591861 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.623724 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.623746 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.623765 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.623792 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.623815 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.623811 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.623836 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.624143 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.624535 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.624622 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.624623 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.624652 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.624677 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.624703 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.624723 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.624743 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.624764 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.624787 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.624806 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.624825 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.624844 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.624866 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.624891 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.624901 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.624969 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.625003 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.625103 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.625235 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.625380 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.625421 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.625476 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.625505 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.625533 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.625568 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.625596 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.625623 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.625650 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.625661 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.625675 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.625799 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.625941 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.625991 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.626014 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.589505 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.626403 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.626683 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.627029 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.627058 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.625932 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.627700 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.627746 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.627780 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.627789 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.627848 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.628119 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.628163 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.628191 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.628223 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.628290 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.628892 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.629072 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.629480 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.629551 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.629594 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.629637 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.629734 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.629840 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.629982 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.630202 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.630262 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.630268 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.630464 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.630573 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.630649 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.630717 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.630784 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.630852 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.630926 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.631021 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.631103 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.631166 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.631228 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.631301 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.631365 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.631432 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.631531 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.631601 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.631673 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.631748 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.631816 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.632230 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.632300 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.632340 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.632381 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.630481 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.630533 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.630576 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.630580 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.630676 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.630804 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.630831 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.630897 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.631041 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.631174 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.631616 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.631683 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.631963 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.632162 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.632216 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.632607 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.632703 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.632678 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.632761 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.634862 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.634924 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.634945 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.634968 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.635120 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.635171 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.635202 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.635247 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/963992d0-f69e-4327-9789-4571451c1838-hosts-file\") pod \"node-resolver-7btqb\" (UID: \"963992d0-f69e-4327-9789-4571451c1838\") " pod="openshift-dns/node-resolver-7btqb" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.635192 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.635341 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.635417 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.635517 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.636382 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.636921 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.637063 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.637083 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.637138 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.637144 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.637182 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.637331 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.637374 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.637582 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.637710 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.637902 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.637964 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.637544 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.638083 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.638316 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.638514 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.638618 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.638787 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.638846 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.638889 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.638808 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.639113 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.639505 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.639589 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.639200 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.639987 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.640083 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.640111 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.640129 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.640236 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:05 crc kubenswrapper[5050]: E1123 14:42:05.640266 5050 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.641168 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.641227 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.641711 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.642124 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: E1123 14:42:05.642182 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:06.142154008 +0000 UTC m=+21.309150503 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.642533 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.642672 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.642574 5050 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.642682 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.642933 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.643017 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.642932 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: E1123 14:42:05.643042 5050 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.642720 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:05 crc kubenswrapper[5050]: E1123 14:42:05.643197 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:06.143105423 +0000 UTC m=+21.310102128 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.643379 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.643529 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.643578 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vphk\" (UniqueName: \"kubernetes.io/projected/963992d0-f69e-4327-9789-4571451c1838-kube-api-access-8vphk\") pod \"node-resolver-7btqb\" (UID: \"963992d0-f69e-4327-9789-4571451c1838\") " pod="openshift-dns/node-resolver-7btqb" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644034 5050 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644057 5050 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644075 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644094 5050 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644107 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644123 5050 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644133 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644136 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644186 5050 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644200 5050 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644214 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644228 5050 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644244 5050 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644259 5050 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644272 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644285 5050 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644299 5050 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644313 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644326 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644340 5050 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644353 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644366 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644380 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644394 5050 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644412 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644425 5050 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644437 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644468 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644482 5050 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644496 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644509 5050 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644522 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644534 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644550 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644569 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644587 5050 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644608 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644622 5050 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644636 5050 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644649 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644662 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644675 5050 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644689 5050 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644703 5050 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644720 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644735 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644751 5050 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644765 5050 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644780 5050 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644793 5050 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644806 5050 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644820 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644833 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644882 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644898 5050 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644913 5050 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644954 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644974 5050 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644966 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.644987 5050 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645001 5050 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645015 5050 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645027 5050 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645040 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645054 5050 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645091 5050 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645106 5050 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645245 5050 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645262 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645275 5050 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645289 5050 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645300 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645335 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645348 5050 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645361 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645373 5050 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645386 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645398 5050 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645411 5050 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645427 5050 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645440 5050 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645469 5050 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645481 5050 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645494 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645506 5050 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645518 5050 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645533 5050 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645549 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645562 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645579 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645598 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645613 5050 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645628 5050 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645640 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645653 5050 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645665 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645677 5050 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645690 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645703 5050 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645715 5050 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645728 5050 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645740 5050 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645756 5050 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645769 5050 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645782 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645796 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645809 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645822 5050 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645834 5050 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645847 5050 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645861 5050 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645873 5050 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645886 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645929 5050 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645944 5050 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645956 5050 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.645997 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646010 5050 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646023 5050 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646038 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646052 5050 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646066 5050 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646104 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646119 5050 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646132 5050 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646144 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646157 5050 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646169 5050 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646182 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646195 5050 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646209 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646222 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646235 5050 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646248 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646263 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646277 5050 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646291 5050 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646304 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646318 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646331 5050 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646345 5050 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646358 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646371 5050 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646383 5050 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646395 5050 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646408 5050 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646422 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646435 5050 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646478 5050 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646492 5050 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646505 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646520 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646533 5050 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646545 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646558 5050 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646573 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646590 5050 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646607 5050 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646624 5050 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646637 5050 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646649 5050 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646661 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646676 5050 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646689 5050 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646829 5050 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.646857 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.647033 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.647048 5050 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.647176 5050 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.647196 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.647209 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.647246 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.647260 5050 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.647375 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.654004 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.656703 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.656952 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: E1123 14:42:05.658667 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 23 14:42:05 crc kubenswrapper[5050]: E1123 14:42:05.658688 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 23 14:42:05 crc kubenswrapper[5050]: E1123 14:42:05.658724 5050 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.658739 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 23 14:42:05 crc kubenswrapper[5050]: E1123 14:42:05.658813 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:06.158768247 +0000 UTC m=+21.325764732 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.658869 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.665362 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.665880 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.666074 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.666150 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.666793 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.667133 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.667575 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:05 crc kubenswrapper[5050]: E1123 14:42:05.667790 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 23 14:42:05 crc kubenswrapper[5050]: E1123 14:42:05.667809 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 23 14:42:05 crc kubenswrapper[5050]: E1123 14:42:05.667824 5050 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:05 crc kubenswrapper[5050]: E1123 14:42:05.667882 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:06.167859867 +0000 UTC m=+21.334856352 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.676003 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.678003 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.678274 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.681658 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.684701 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.684856 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.684902 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.685216 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.685811 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.686763 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.687289 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.688820 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.692014 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.696353 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.707348 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.707425 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.715232 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.719799 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.726188 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.735789 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:42:05 crc kubenswrapper[5050]: E1123 14:42:05.736659 5050 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4.scope\": RecentStats: unable to find data in memory cache]" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.739121 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.744413 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.748772 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.748801 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.748818 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/963992d0-f69e-4327-9789-4571451c1838-hosts-file\") pod \"node-resolver-7btqb\" (UID: \"963992d0-f69e-4327-9789-4571451c1838\") " pod="openshift-dns/node-resolver-7btqb" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.748877 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vphk\" (UniqueName: \"kubernetes.io/projected/963992d0-f69e-4327-9789-4571451c1838-kube-api-access-8vphk\") pod \"node-resolver-7btqb\" (UID: \"963992d0-f69e-4327-9789-4571451c1838\") " pod="openshift-dns/node-resolver-7btqb" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.748910 5050 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.748923 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.748920 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.748934 5050 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.748985 5050 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.748997 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.749011 5050 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.749022 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.749035 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.749045 5050 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.749055 5050 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.749064 5050 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.749074 5050 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.749084 5050 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.749094 5050 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.749104 5050 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.749116 5050 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.749126 5050 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.749136 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.749145 5050 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.749155 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.749165 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.749175 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.749185 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.749215 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.749257 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/963992d0-f69e-4327-9789-4571451c1838-hosts-file\") pod \"node-resolver-7btqb\" (UID: \"963992d0-f69e-4327-9789-4571451c1838\") " pod="openshift-dns/node-resolver-7btqb" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.772688 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vphk\" (UniqueName: \"kubernetes.io/projected/963992d0-f69e-4327-9789-4571451c1838-kube-api-access-8vphk\") pod \"node-resolver-7btqb\" (UID: \"963992d0-f69e-4327-9789-4571451c1838\") " pod="openshift-dns/node-resolver-7btqb" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.800926 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.808295 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 23 14:42:05 crc kubenswrapper[5050]: W1123 14:42:05.813013 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-c4b072b03b04f49288b136338c57654dc2f53447ab46af53c00f69f7b6039edd WatchSource:0}: Error finding container c4b072b03b04f49288b136338c57654dc2f53447ab46af53c00f69f7b6039edd: Status 404 returned error can't find the container with id c4b072b03b04f49288b136338c57654dc2f53447ab46af53c00f69f7b6039edd Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.817388 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 23 14:42:05 crc kubenswrapper[5050]: I1123 14:42:05.822108 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-7btqb" Nov 23 14:42:05 crc kubenswrapper[5050]: W1123 14:42:05.822350 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-87256a189144e63ec19564c72a96d472ddd70ad33f8d874e5ecae4b032c250fb WatchSource:0}: Error finding container 87256a189144e63ec19564c72a96d472ddd70ad33f8d874e5ecae4b032c250fb: Status 404 returned error can't find the container with id 87256a189144e63ec19564c72a96d472ddd70ad33f8d874e5ecae4b032c250fb Nov 23 14:42:05 crc kubenswrapper[5050]: W1123 14:42:05.834697 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-70403832731aa77f32f9c60e249d5eadb5c07495597958622e2b92e8ef896d07 WatchSource:0}: Error finding container 70403832731aa77f32f9c60e249d5eadb5c07495597958622e2b92e8ef896d07: Status 404 returned error can't find the container with id 70403832731aa77f32f9c60e249d5eadb5c07495597958622e2b92e8ef896d07 Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.124260 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.132115 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.141521 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.151646 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://add979609b9ecf2a5fa0f8a3cbce096448f38cbbc808342bb55c9b05ad846c6a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:41:59Z\\\",\\\"message\\\":\\\"W1123 14:41:48.828917 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1123 14:41:48.829321 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763908908 cert, and key in /tmp/serving-cert-4159908995/serving-signer.crt, /tmp/serving-cert-4159908995/serving-signer.key\\\\nI1123 14:41:49.008540 1 observer_polling.go:159] Starting file observer\\\\nW1123 14:41:49.011283 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1123 14:41:49.011473 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1123 14:41:49.013711 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4159908995/tls.crt::/tmp/serving-cert-4159908995/tls.key\\\\\\\"\\\\nF1123 14:41:59.241588 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.152860 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:42:06 crc kubenswrapper[5050]: E1123 14:42:06.152957 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:42:07.152934062 +0000 UTC m=+22.319930547 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.153027 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.153059 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:06 crc kubenswrapper[5050]: E1123 14:42:06.153153 5050 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 23 14:42:06 crc kubenswrapper[5050]: E1123 14:42:06.153182 5050 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 23 14:42:06 crc kubenswrapper[5050]: E1123 14:42:06.153207 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:07.153197739 +0000 UTC m=+22.320194224 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 23 14:42:06 crc kubenswrapper[5050]: E1123 14:42:06.153226 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:07.153215589 +0000 UTC m=+22.320212074 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.161984 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.174670 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.184821 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.195919 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.208080 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.208543 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.218557 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.229202 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.239562 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.248730 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.254366 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.254404 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:06 crc kubenswrapper[5050]: E1123 14:42:06.254550 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 23 14:42:06 crc kubenswrapper[5050]: E1123 14:42:06.254566 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 23 14:42:06 crc kubenswrapper[5050]: E1123 14:42:06.254579 5050 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:06 crc kubenswrapper[5050]: E1123 14:42:06.254625 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:07.25460984 +0000 UTC m=+22.421606325 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:06 crc kubenswrapper[5050]: E1123 14:42:06.254622 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 23 14:42:06 crc kubenswrapper[5050]: E1123 14:42:06.254670 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 23 14:42:06 crc kubenswrapper[5050]: E1123 14:42:06.254688 5050 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:06 crc kubenswrapper[5050]: E1123 14:42:06.254765 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:07.254743493 +0000 UTC m=+22.421739988 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.259225 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.267677 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.277878 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.288237 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.297606 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://add979609b9ecf2a5fa0f8a3cbce096448f38cbbc808342bb55c9b05ad846c6a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:41:59Z\\\",\\\"message\\\":\\\"W1123 14:41:48.828917 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1123 14:41:48.829321 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763908908 cert, and key in /tmp/serving-cert-4159908995/serving-signer.crt, /tmp/serving-cert-4159908995/serving-signer.key\\\\nI1123 14:41:49.008540 1 observer_polling.go:159] Starting file observer\\\\nW1123 14:41:49.011283 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1123 14:41:49.011473 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1123 14:41:49.013711 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4159908995/tls.crt::/tmp/serving-cert-4159908995/tls.key\\\\\\\"\\\\nF1123 14:41:59.241588 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.311566 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.548331 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.548568 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:06 crc kubenswrapper[5050]: E1123 14:42:06.548960 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:06 crc kubenswrapper[5050]: E1123 14:42:06.549207 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.637566 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-qvjn2"] Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.638631 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-hlrlq"] Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.638910 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.639201 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.641176 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.641241 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.641789 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.641933 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.642491 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.642857 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.644626 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.644668 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.644814 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.645055 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-j8fzz"] Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.647377 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.649640 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-km979"] Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.651633 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.652691 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-km979" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.666215 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.668145 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.668274 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.668318 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.668363 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.668412 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.668494 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.668613 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.669155 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.678888 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://add979609b9ecf2a5fa0f8a3cbce096448f38cbbc808342bb55c9b05ad846c6a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:41:59Z\\\",\\\"message\\\":\\\"W1123 14:41:48.828917 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1123 14:41:48.829321 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763908908 cert, and key in /tmp/serving-cert-4159908995/serving-signer.crt, /tmp/serving-cert-4159908995/serving-signer.key\\\\nI1123 14:41:49.008540 1 observer_polling.go:159] Starting file observer\\\\nW1123 14:41:49.011283 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1123 14:41:49.011473 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1123 14:41:49.013711 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4159908995/tls.crt::/tmp/serving-cert-4159908995/tls.key\\\\\\\"\\\\nF1123 14:41:59.241588 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:06Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.692916 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:06Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.705038 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:06Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.708305 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.709146 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.711000 5050 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4" exitCode=255 Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.711045 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4"} Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.711150 5050 scope.go:117] "RemoveContainer" containerID="add979609b9ecf2a5fa0f8a3cbce096448f38cbbc808342bb55c9b05ad846c6a" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.713188 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3"} Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.713230 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0"} Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.713244 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"87256a189144e63ec19564c72a96d472ddd70ad33f8d874e5ecae4b032c250fb"} Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.714590 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d"} Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.714621 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"c4b072b03b04f49288b136338c57654dc2f53447ab46af53c00f69f7b6039edd"} Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.716079 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-7btqb" event={"ID":"963992d0-f69e-4327-9789-4571451c1838","Type":"ContainerStarted","Data":"c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647"} Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.716158 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-7btqb" event={"ID":"963992d0-f69e-4327-9789-4571451c1838","Type":"ContainerStarted","Data":"cefe6c9ccab3d40142f54998db8208afbf141171c8de8c5b559ab02e16d6b87a"} Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.717081 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"70403832731aa77f32f9c60e249d5eadb5c07495597958622e2b92e8ef896d07"} Nov 23 14:42:06 crc kubenswrapper[5050]: E1123 14:42:06.722656 5050 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-apiserver-crc\" already exists" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.722923 5050 scope.go:117] "RemoveContainer" containerID="10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4" Nov 23 14:42:06 crc kubenswrapper[5050]: E1123 14:42:06.723174 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.724472 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:06Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.739468 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:06Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.752264 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:06Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.758671 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1d998909-9470-47ef-87e8-b34f0473682f-proxy-tls\") pod \"machine-config-daemon-hlrlq\" (UID: \"1d998909-9470-47ef-87e8-b34f0473682f\") " pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.758705 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-slash\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.758726 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/1d7b7de2-b361-44b9-ba10-5d4f889abc9b-cni-binary-copy\") pod \"multus-additional-cni-plugins-km979\" (UID: \"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\") " pod="openshift-multus/multus-additional-cni-plugins-km979" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.758745 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/1d7b7de2-b361-44b9-ba10-5d4f889abc9b-tuning-conf-dir\") pod \"multus-additional-cni-plugins-km979\" (UID: \"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\") " pod="openshift-multus/multus-additional-cni-plugins-km979" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.758766 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-multus-conf-dir\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.758786 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6573c043-542c-47ae-a2ba-f70b8baf60c2-ovn-node-metrics-cert\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.758807 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5mtwf\" (UniqueName: \"kubernetes.io/projected/6573c043-542c-47ae-a2ba-f70b8baf60c2-kube-api-access-5mtwf\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.758824 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-host-run-netns\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.758844 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/1d998909-9470-47ef-87e8-b34f0473682f-rootfs\") pod \"machine-config-daemon-hlrlq\" (UID: \"1d998909-9470-47ef-87e8-b34f0473682f\") " pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.758875 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6573c043-542c-47ae-a2ba-f70b8baf60c2-env-overrides\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.758971 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-systemd-units\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.759140 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-etc-kubernetes\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.759189 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bggpq\" (UniqueName: \"kubernetes.io/projected/abdac21e-18fc-460d-bd3b-73bed66b8ab9-kube-api-access-bggpq\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.759212 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-os-release\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.759246 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-host-var-lib-cni-multus\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.759283 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-host-var-lib-kubelet\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.759302 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/abdac21e-18fc-460d-bd3b-73bed66b8ab9-multus-daemon-config\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.759360 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6573c043-542c-47ae-a2ba-f70b8baf60c2-ovnkube-config\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.759395 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-multus-cni-dir\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.759424 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1d998909-9470-47ef-87e8-b34f0473682f-mcd-auth-proxy-config\") pod \"machine-config-daemon-hlrlq\" (UID: \"1d998909-9470-47ef-87e8-b34f0473682f\") " pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.759472 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/abdac21e-18fc-460d-bd3b-73bed66b8ab9-cni-binary-copy\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.759503 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/1d7b7de2-b361-44b9-ba10-5d4f889abc9b-system-cni-dir\") pod \"multus-additional-cni-plugins-km979\" (UID: \"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\") " pod="openshift-multus/multus-additional-cni-plugins-km979" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.759520 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-run-openvswitch\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.759574 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-log-socket\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.759594 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-cnibin\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.759619 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-run-systemd\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.759642 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-run-ovn\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.759665 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-run-netns\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.759735 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/1d7b7de2-b361-44b9-ba10-5d4f889abc9b-cnibin\") pod \"multus-additional-cni-plugins-km979\" (UID: \"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\") " pod="openshift-multus/multus-additional-cni-plugins-km979" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.759767 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-host-var-lib-cni-bin\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.759792 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-host-run-multus-certs\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.759832 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.759852 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-etc-openvswitch\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.759872 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-run-ovn-kubernetes\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.759909 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6573c043-542c-47ae-a2ba-f70b8baf60c2-ovnkube-script-lib\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.759937 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-multus-socket-dir-parent\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.759968 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/1d7b7de2-b361-44b9-ba10-5d4f889abc9b-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-km979\" (UID: \"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\") " pod="openshift-multus/multus-additional-cni-plugins-km979" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.759996 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-kubelet\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.760019 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-var-lib-openvswitch\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.760063 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-cni-bin\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.760088 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-system-cni-dir\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.760119 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-host-run-k8s-cni-cncf-io\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.760141 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-hostroot\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.760310 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-node-log\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.760601 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/1d7b7de2-b361-44b9-ba10-5d4f889abc9b-os-release\") pod \"multus-additional-cni-plugins-km979\" (UID: \"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\") " pod="openshift-multus/multus-additional-cni-plugins-km979" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.760662 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b4rwq\" (UniqueName: \"kubernetes.io/projected/1d7b7de2-b361-44b9-ba10-5d4f889abc9b-kube-api-access-b4rwq\") pod \"multus-additional-cni-plugins-km979\" (UID: \"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\") " pod="openshift-multus/multus-additional-cni-plugins-km979" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.760733 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-cni-netd\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.760773 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jx4bd\" (UniqueName: \"kubernetes.io/projected/1d998909-9470-47ef-87e8-b34f0473682f-kube-api-access-jx4bd\") pod \"machine-config-daemon-hlrlq\" (UID: \"1d998909-9470-47ef-87e8-b34f0473682f\") " pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.765331 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:06Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.777157 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:06Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.788686 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:06Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.799247 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:06Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.811481 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:06Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.826967 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:06Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.842677 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:06Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.862198 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-multus-conf-dir\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.862496 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6573c043-542c-47ae-a2ba-f70b8baf60c2-env-overrides\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.862376 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-multus-conf-dir\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.862583 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6573c043-542c-47ae-a2ba-f70b8baf60c2-ovn-node-metrics-cert\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.862684 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5mtwf\" (UniqueName: \"kubernetes.io/projected/6573c043-542c-47ae-a2ba-f70b8baf60c2-kube-api-access-5mtwf\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.862716 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-host-run-netns\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.862744 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/1d998909-9470-47ef-87e8-b34f0473682f-rootfs\") pod \"machine-config-daemon-hlrlq\" (UID: \"1d998909-9470-47ef-87e8-b34f0473682f\") " pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.862778 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-etc-kubernetes\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.862839 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bggpq\" (UniqueName: \"kubernetes.io/projected/abdac21e-18fc-460d-bd3b-73bed66b8ab9-kube-api-access-bggpq\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.862879 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-systemd-units\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.862904 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6573c043-542c-47ae-a2ba-f70b8baf60c2-ovnkube-config\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.862931 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-multus-cni-dir\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.862922 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/1d998909-9470-47ef-87e8-b34f0473682f-rootfs\") pod \"machine-config-daemon-hlrlq\" (UID: \"1d998909-9470-47ef-87e8-b34f0473682f\") " pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.862970 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-systemd-units\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.862957 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-os-release\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.862929 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-etc-kubernetes\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.863038 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-host-var-lib-cni-multus\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.863063 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-host-var-lib-kubelet\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.863081 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/abdac21e-18fc-460d-bd3b-73bed66b8ab9-multus-daemon-config\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.863104 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1d998909-9470-47ef-87e8-b34f0473682f-mcd-auth-proxy-config\") pod \"machine-config-daemon-hlrlq\" (UID: \"1d998909-9470-47ef-87e8-b34f0473682f\") " pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.863135 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-run-openvswitch\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.863548 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-os-release\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.863575 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-log-socket\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.863665 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-cnibin\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.863711 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/abdac21e-18fc-460d-bd3b-73bed66b8ab9-cni-binary-copy\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.863134 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-host-var-lib-cni-multus\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.863754 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/1d7b7de2-b361-44b9-ba10-5d4f889abc9b-system-cni-dir\") pod \"multus-additional-cni-plugins-km979\" (UID: \"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\") " pod="openshift-multus/multus-additional-cni-plugins-km979" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.863792 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-run-systemd\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.863803 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-log-socket\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.863827 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-run-ovn\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.863865 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-run-netns\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.863898 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/1d7b7de2-b361-44b9-ba10-5d4f889abc9b-cnibin\") pod \"multus-additional-cni-plugins-km979\" (UID: \"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\") " pod="openshift-multus/multus-additional-cni-plugins-km979" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.863941 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-host-var-lib-cni-bin\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.863987 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-host-run-multus-certs\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.864024 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.864064 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/1d7b7de2-b361-44b9-ba10-5d4f889abc9b-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-km979\" (UID: \"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\") " pod="openshift-multus/multus-additional-cni-plugins-km979" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.864105 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-kubelet\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.864142 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-var-lib-openvswitch\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.864140 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6573c043-542c-47ae-a2ba-f70b8baf60c2-env-overrides\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.864176 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-etc-openvswitch\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.864218 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-etc-openvswitch\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.864308 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/1d7b7de2-b361-44b9-ba10-5d4f889abc9b-cnibin\") pod \"multus-additional-cni-plugins-km979\" (UID: \"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\") " pod="openshift-multus/multus-additional-cni-plugins-km979" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.864309 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-run-netns\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.864356 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-cnibin\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.864384 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-host-var-lib-cni-bin\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.864419 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-host-var-lib-kubelet\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.864483 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-run-openvswitch\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.864594 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-host-run-multus-certs\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.864599 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/abdac21e-18fc-460d-bd3b-73bed66b8ab9-multus-daemon-config\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.864608 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6573c043-542c-47ae-a2ba-f70b8baf60c2-ovnkube-config\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.864629 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.864676 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-kubelet\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.864730 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-var-lib-openvswitch\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.863714 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-multus-cni-dir\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.864788 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-run-systemd\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.864821 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-run-ovn\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.864830 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/1d7b7de2-b361-44b9-ba10-5d4f889abc9b-system-cni-dir\") pod \"multus-additional-cni-plugins-km979\" (UID: \"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\") " pod="openshift-multus/multus-additional-cni-plugins-km979" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.863569 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-host-run-netns\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.864885 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1d998909-9470-47ef-87e8-b34f0473682f-mcd-auth-proxy-config\") pod \"machine-config-daemon-hlrlq\" (UID: \"1d998909-9470-47ef-87e8-b34f0473682f\") " pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.864971 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-run-ovn-kubernetes\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.865042 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-run-ovn-kubernetes\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.865050 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6573c043-542c-47ae-a2ba-f70b8baf60c2-ovnkube-script-lib\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.865118 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-multus-socket-dir-parent\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.865135 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/abdac21e-18fc-460d-bd3b-73bed66b8ab9-cni-binary-copy\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.865199 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-multus-socket-dir-parent\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.865255 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-node-log\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.865305 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-cni-bin\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.865341 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-system-cni-dir\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.865396 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-system-cni-dir\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.865395 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-host-run-k8s-cni-cncf-io\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.865435 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-cni-bin\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.865465 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-hostroot\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.865608 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/1d7b7de2-b361-44b9-ba10-5d4f889abc9b-os-release\") pod \"multus-additional-cni-plugins-km979\" (UID: \"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\") " pod="openshift-multus/multus-additional-cni-plugins-km979" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.865622 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/1d7b7de2-b361-44b9-ba10-5d4f889abc9b-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-km979\" (UID: \"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\") " pod="openshift-multus/multus-additional-cni-plugins-km979" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.865642 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b4rwq\" (UniqueName: \"kubernetes.io/projected/1d7b7de2-b361-44b9-ba10-5d4f889abc9b-kube-api-access-b4rwq\") pod \"multus-additional-cni-plugins-km979\" (UID: \"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\") " pod="openshift-multus/multus-additional-cni-plugins-km979" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.865673 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jx4bd\" (UniqueName: \"kubernetes.io/projected/1d998909-9470-47ef-87e8-b34f0473682f-kube-api-access-jx4bd\") pod \"machine-config-daemon-hlrlq\" (UID: \"1d998909-9470-47ef-87e8-b34f0473682f\") " pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.865704 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-node-log\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.865745 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-host-run-k8s-cni-cncf-io\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.865777 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6573c043-542c-47ae-a2ba-f70b8baf60c2-ovnkube-script-lib\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.865855 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/abdac21e-18fc-460d-bd3b-73bed66b8ab9-hostroot\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.865868 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/1d7b7de2-b361-44b9-ba10-5d4f889abc9b-os-release\") pod \"multus-additional-cni-plugins-km979\" (UID: \"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\") " pod="openshift-multus/multus-additional-cni-plugins-km979" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.865906 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-cni-netd\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.865977 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-cni-netd\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.866010 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1d998909-9470-47ef-87e8-b34f0473682f-proxy-tls\") pod \"machine-config-daemon-hlrlq\" (UID: \"1d998909-9470-47ef-87e8-b34f0473682f\") " pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.866170 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-slash\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.866224 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-slash\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.866264 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/1d7b7de2-b361-44b9-ba10-5d4f889abc9b-tuning-conf-dir\") pod \"multus-additional-cni-plugins-km979\" (UID: \"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\") " pod="openshift-multus/multus-additional-cni-plugins-km979" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.866305 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/1d7b7de2-b361-44b9-ba10-5d4f889abc9b-cni-binary-copy\") pod \"multus-additional-cni-plugins-km979\" (UID: \"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\") " pod="openshift-multus/multus-additional-cni-plugins-km979" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.867113 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/1d7b7de2-b361-44b9-ba10-5d4f889abc9b-cni-binary-copy\") pod \"multus-additional-cni-plugins-km979\" (UID: \"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\") " pod="openshift-multus/multus-additional-cni-plugins-km979" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.867522 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/1d7b7de2-b361-44b9-ba10-5d4f889abc9b-tuning-conf-dir\") pod \"multus-additional-cni-plugins-km979\" (UID: \"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\") " pod="openshift-multus/multus-additional-cni-plugins-km979" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.870150 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6573c043-542c-47ae-a2ba-f70b8baf60c2-ovn-node-metrics-cert\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.872863 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1d998909-9470-47ef-87e8-b34f0473682f-proxy-tls\") pod \"machine-config-daemon-hlrlq\" (UID: \"1d998909-9470-47ef-87e8-b34f0473682f\") " pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.880535 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:06Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.887924 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jx4bd\" (UniqueName: \"kubernetes.io/projected/1d998909-9470-47ef-87e8-b34f0473682f-kube-api-access-jx4bd\") pod \"machine-config-daemon-hlrlq\" (UID: \"1d998909-9470-47ef-87e8-b34f0473682f\") " pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.888753 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bggpq\" (UniqueName: \"kubernetes.io/projected/abdac21e-18fc-460d-bd3b-73bed66b8ab9-kube-api-access-bggpq\") pod \"multus-qvjn2\" (UID: \"abdac21e-18fc-460d-bd3b-73bed66b8ab9\") " pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.893721 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b4rwq\" (UniqueName: \"kubernetes.io/projected/1d7b7de2-b361-44b9-ba10-5d4f889abc9b-kube-api-access-b4rwq\") pod \"multus-additional-cni-plugins-km979\" (UID: \"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\") " pod="openshift-multus/multus-additional-cni-plugins-km979" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.896065 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5mtwf\" (UniqueName: \"kubernetes.io/projected/6573c043-542c-47ae-a2ba-f70b8baf60c2-kube-api-access-5mtwf\") pod \"ovnkube-node-j8fzz\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.906862 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:06Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.955259 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.958681 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:06Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:06 crc kubenswrapper[5050]: W1123 14:42:06.969326 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1d998909_9470_47ef_87e8_b34f0473682f.slice/crio-696dc7c926b8afdb50a9bc560d5babd1dfb12733d70d9975062ce7c25a51a67c WatchSource:0}: Error finding container 696dc7c926b8afdb50a9bc560d5babd1dfb12733d70d9975062ce7c25a51a67c: Status 404 returned error can't find the container with id 696dc7c926b8afdb50a9bc560d5babd1dfb12733d70d9975062ce7c25a51a67c Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.979745 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-qvjn2" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.985418 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.986728 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:06Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:06 crc kubenswrapper[5050]: I1123 14:42:06.993100 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-km979" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.022304 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:07Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.049372 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:07Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.071196 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:07Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.089187 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:07Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.107158 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://add979609b9ecf2a5fa0f8a3cbce096448f38cbbc808342bb55c9b05ad846c6a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:41:59Z\\\",\\\"message\\\":\\\"W1123 14:41:48.828917 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1123 14:41:48.829321 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763908908 cert, and key in /tmp/serving-cert-4159908995/serving-signer.crt, /tmp/serving-cert-4159908995/serving-signer.key\\\\nI1123 14:41:49.008540 1 observer_polling.go:159] Starting file observer\\\\nW1123 14:41:49.011283 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1123 14:41:49.011473 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1123 14:41:49.013711 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4159908995/tls.crt::/tmp/serving-cert-4159908995/tls.key\\\\\\\"\\\\nF1123 14:41:59.241588 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:07Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.124207 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:07Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.168696 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.168814 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.168863 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:07 crc kubenswrapper[5050]: E1123 14:42:07.168907 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:42:09.168869011 +0000 UTC m=+24.335865496 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:42:07 crc kubenswrapper[5050]: E1123 14:42:07.168964 5050 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 23 14:42:07 crc kubenswrapper[5050]: E1123 14:42:07.169008 5050 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 23 14:42:07 crc kubenswrapper[5050]: E1123 14:42:07.169033 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:09.169011095 +0000 UTC m=+24.336007790 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 23 14:42:07 crc kubenswrapper[5050]: E1123 14:42:07.169067 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:09.169058576 +0000 UTC m=+24.336055061 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.270157 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.270226 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:07 crc kubenswrapper[5050]: E1123 14:42:07.270356 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 23 14:42:07 crc kubenswrapper[5050]: E1123 14:42:07.270373 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 23 14:42:07 crc kubenswrapper[5050]: E1123 14:42:07.270386 5050 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:07 crc kubenswrapper[5050]: E1123 14:42:07.270434 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:09.270418546 +0000 UTC m=+24.437415031 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:07 crc kubenswrapper[5050]: E1123 14:42:07.270768 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 23 14:42:07 crc kubenswrapper[5050]: E1123 14:42:07.270787 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 23 14:42:07 crc kubenswrapper[5050]: E1123 14:42:07.270795 5050 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:07 crc kubenswrapper[5050]: E1123 14:42:07.270817 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:09.270810206 +0000 UTC m=+24.437806691 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.548286 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:07 crc kubenswrapper[5050]: E1123 14:42:07.549272 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.558126 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.558926 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.560702 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.561919 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.563057 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.564085 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.565041 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.566045 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.567072 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.568010 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.570107 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.571145 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.571861 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.572616 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.573371 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.574156 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.575012 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.575654 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.576409 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.577231 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.577911 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.581278 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.582177 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.583224 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.584386 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.585519 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.586954 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.587939 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.589027 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.590030 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.591808 5050 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.591967 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.593949 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.594768 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.595387 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.596907 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.597892 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.598645 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.599601 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.600587 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.601275 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.604745 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.608260 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.609794 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.611680 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.612512 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.614147 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.615482 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.616946 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.618847 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.620028 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.621753 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.623213 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.625274 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.722748 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b"} Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.722843 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922"} Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.722865 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"696dc7c926b8afdb50a9bc560d5babd1dfb12733d70d9975062ce7c25a51a67c"} Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.726074 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.730292 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" event={"ID":"1d7b7de2-b361-44b9-ba10-5d4f889abc9b","Type":"ContainerStarted","Data":"32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38"} Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.731058 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" event={"ID":"1d7b7de2-b361-44b9-ba10-5d4f889abc9b","Type":"ContainerStarted","Data":"93a2c08363578c427e9b4ff16d8296a68d9bebb698e59cb94e3e41fba4e0199e"} Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.731665 5050 scope.go:117] "RemoveContainer" containerID="10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4" Nov 23 14:42:07 crc kubenswrapper[5050]: E1123 14:42:07.731909 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.734963 5050 generic.go:334] "Generic (PLEG): container finished" podID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerID="6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc" exitCode=0 Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.735026 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" event={"ID":"6573c043-542c-47ae-a2ba-f70b8baf60c2","Type":"ContainerDied","Data":"6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc"} Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.735074 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" event={"ID":"6573c043-542c-47ae-a2ba-f70b8baf60c2","Type":"ContainerStarted","Data":"9a3221887c76c30560f37a45f62e8988b6432c08dc2eedb0b050b5a6cbff0244"} Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.737370 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-qvjn2" event={"ID":"abdac21e-18fc-460d-bd3b-73bed66b8ab9","Type":"ContainerStarted","Data":"7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf"} Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.737418 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-qvjn2" event={"ID":"abdac21e-18fc-460d-bd3b-73bed66b8ab9","Type":"ContainerStarted","Data":"bbd671c608bd29f8510cccc38420ac3c179869eea508df2e1d31e5d4a4a81160"} Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.746108 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:07Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.765165 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:07Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.777716 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:07Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.800566 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:07Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.818642 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:07Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.837584 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:07Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.851774 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:07Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.868632 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:07Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.882287 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:07Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.893700 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:07Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.915957 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:07Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.934342 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://add979609b9ecf2a5fa0f8a3cbce096448f38cbbc808342bb55c9b05ad846c6a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:41:59Z\\\",\\\"message\\\":\\\"W1123 14:41:48.828917 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1123 14:41:48.829321 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763908908 cert, and key in /tmp/serving-cert-4159908995/serving-signer.crt, /tmp/serving-cert-4159908995/serving-signer.key\\\\nI1123 14:41:49.008540 1 observer_polling.go:159] Starting file observer\\\\nW1123 14:41:49.011283 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1123 14:41:49.011473 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1123 14:41:49.013711 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4159908995/tls.crt::/tmp/serving-cert-4159908995/tls.key\\\\\\\"\\\\nF1123 14:41:59.241588 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:07Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.947258 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:07Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.962133 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:07Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.974055 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:07Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:07 crc kubenswrapper[5050]: I1123 14:42:07.992725 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:07Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.008041 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.036523 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.053184 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.066474 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.084461 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.104484 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.122128 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.154269 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.174173 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.218863 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.547716 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.547796 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:08 crc kubenswrapper[5050]: E1123 14:42:08.548425 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:08 crc kubenswrapper[5050]: E1123 14:42:08.548497 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.599903 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-7cjg9"] Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.600306 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-7cjg9" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.603025 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.603039 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.603272 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.603366 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.620377 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.635714 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.649619 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.668593 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.686953 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/7f65245a-e7ea-4b29-9534-5dbe5a7ee271-host\") pod \"node-ca-7cjg9\" (UID: \"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\") " pod="openshift-image-registry/node-ca-7cjg9" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.687030 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/7f65245a-e7ea-4b29-9534-5dbe5a7ee271-serviceca\") pod \"node-ca-7cjg9\" (UID: \"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\") " pod="openshift-image-registry/node-ca-7cjg9" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.687226 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-586ds\" (UniqueName: \"kubernetes.io/projected/7f65245a-e7ea-4b29-9534-5dbe5a7ee271-kube-api-access-586ds\") pod \"node-ca-7cjg9\" (UID: \"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\") " pod="openshift-image-registry/node-ca-7cjg9" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.690519 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.703086 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.714564 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.730426 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.742161 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d7b7de2-b361-44b9-ba10-5d4f889abc9b" containerID="32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38" exitCode=0 Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.742247 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" event={"ID":"1d7b7de2-b361-44b9-ba10-5d4f889abc9b","Type":"ContainerDied","Data":"32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38"} Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.745673 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" event={"ID":"6573c043-542c-47ae-a2ba-f70b8baf60c2","Type":"ContainerStarted","Data":"cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb"} Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.745701 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" event={"ID":"6573c043-542c-47ae-a2ba-f70b8baf60c2","Type":"ContainerStarted","Data":"bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f"} Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.745711 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" event={"ID":"6573c043-542c-47ae-a2ba-f70b8baf60c2","Type":"ContainerStarted","Data":"2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e"} Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.745756 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" event={"ID":"6573c043-542c-47ae-a2ba-f70b8baf60c2","Type":"ContainerStarted","Data":"af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6"} Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.745770 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" event={"ID":"6573c043-542c-47ae-a2ba-f70b8baf60c2","Type":"ContainerStarted","Data":"34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387"} Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.747455 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972"} Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.748266 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.761532 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.774712 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.788642 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/7f65245a-e7ea-4b29-9534-5dbe5a7ee271-serviceca\") pod \"node-ca-7cjg9\" (UID: \"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\") " pod="openshift-image-registry/node-ca-7cjg9" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.788725 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-586ds\" (UniqueName: \"kubernetes.io/projected/7f65245a-e7ea-4b29-9534-5dbe5a7ee271-kube-api-access-586ds\") pod \"node-ca-7cjg9\" (UID: \"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\") " pod="openshift-image-registry/node-ca-7cjg9" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.788744 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/7f65245a-e7ea-4b29-9534-5dbe5a7ee271-host\") pod \"node-ca-7cjg9\" (UID: \"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\") " pod="openshift-image-registry/node-ca-7cjg9" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.788801 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/7f65245a-e7ea-4b29-9534-5dbe5a7ee271-host\") pod \"node-ca-7cjg9\" (UID: \"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\") " pod="openshift-image-registry/node-ca-7cjg9" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.789481 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.789693 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/7f65245a-e7ea-4b29-9534-5dbe5a7ee271-serviceca\") pod \"node-ca-7cjg9\" (UID: \"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\") " pod="openshift-image-registry/node-ca-7cjg9" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.806009 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.810379 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-586ds\" (UniqueName: \"kubernetes.io/projected/7f65245a-e7ea-4b29-9534-5dbe5a7ee271-kube-api-access-586ds\") pod \"node-ca-7cjg9\" (UID: \"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\") " pod="openshift-image-registry/node-ca-7cjg9" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.822615 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.835375 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.849432 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.860530 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.873919 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.890377 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.904565 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.921935 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.936892 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.947909 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.949051 5050 scope.go:117] "RemoveContainer" containerID="10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4" Nov 23 14:42:08 crc kubenswrapper[5050]: E1123 14:42:08.949374 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.954434 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:08 crc kubenswrapper[5050]: I1123 14:42:08.971921 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.002038 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:08Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.014162 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:09Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.027363 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:09Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.029422 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-7cjg9" Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.039524 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:09Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:09 crc kubenswrapper[5050]: W1123 14:42:09.051239 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7f65245a_e7ea_4b29_9534_5dbe5a7ee271.slice/crio-bdbfec0d4d4b923b8b8794115bf391505af46635ad8bffa67e4c811afe879d34 WatchSource:0}: Error finding container bdbfec0d4d4b923b8b8794115bf391505af46635ad8bffa67e4c811afe879d34: Status 404 returned error can't find the container with id bdbfec0d4d4b923b8b8794115bf391505af46635ad8bffa67e4c811afe879d34 Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.193380 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.193514 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:09 crc kubenswrapper[5050]: E1123 14:42:09.193675 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:42:13.193620941 +0000 UTC m=+28.360617476 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.193961 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:09 crc kubenswrapper[5050]: E1123 14:42:09.193844 5050 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 23 14:42:09 crc kubenswrapper[5050]: E1123 14:42:09.194059 5050 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 23 14:42:09 crc kubenswrapper[5050]: E1123 14:42:09.194069 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:13.194047592 +0000 UTC m=+28.361044287 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 23 14:42:09 crc kubenswrapper[5050]: E1123 14:42:09.194118 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:13.194106834 +0000 UTC m=+28.361103319 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.295203 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.295293 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:09 crc kubenswrapper[5050]: E1123 14:42:09.295420 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 23 14:42:09 crc kubenswrapper[5050]: E1123 14:42:09.295460 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 23 14:42:09 crc kubenswrapper[5050]: E1123 14:42:09.295479 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 23 14:42:09 crc kubenswrapper[5050]: E1123 14:42:09.295481 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 23 14:42:09 crc kubenswrapper[5050]: E1123 14:42:09.295496 5050 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:09 crc kubenswrapper[5050]: E1123 14:42:09.295501 5050 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:09 crc kubenswrapper[5050]: E1123 14:42:09.295567 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:13.295541966 +0000 UTC m=+28.462538641 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:09 crc kubenswrapper[5050]: E1123 14:42:09.295593 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:13.295584367 +0000 UTC m=+28.462581052 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.547907 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:09 crc kubenswrapper[5050]: E1123 14:42:09.548073 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.753919 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-7cjg9" event={"ID":"7f65245a-e7ea-4b29-9534-5dbe5a7ee271","Type":"ContainerStarted","Data":"cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d"} Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.754015 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-7cjg9" event={"ID":"7f65245a-e7ea-4b29-9534-5dbe5a7ee271","Type":"ContainerStarted","Data":"bdbfec0d4d4b923b8b8794115bf391505af46635ad8bffa67e4c811afe879d34"} Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.757021 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d7b7de2-b361-44b9-ba10-5d4f889abc9b" containerID="86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2" exitCode=0 Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.757129 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" event={"ID":"1d7b7de2-b361-44b9-ba10-5d4f889abc9b","Type":"ContainerDied","Data":"86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2"} Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.763380 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" event={"ID":"6573c043-542c-47ae-a2ba-f70b8baf60c2","Type":"ContainerStarted","Data":"d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1"} Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.781393 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:09Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.798970 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:09Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.815269 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:09Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.831044 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:09Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.847846 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:09Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.862813 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:09Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.878096 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:09Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.898948 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:09Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.916679 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:09Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.929486 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:09Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.947948 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:09Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.965352 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:09Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:09 crc kubenswrapper[5050]: I1123 14:42:09.980249 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:09Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:10 crc kubenswrapper[5050]: I1123 14:42:10.002651 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:10Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:10 crc kubenswrapper[5050]: I1123 14:42:10.019888 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:10Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:10 crc kubenswrapper[5050]: I1123 14:42:10.034312 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:10Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:10 crc kubenswrapper[5050]: I1123 14:42:10.053797 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:10Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:10 crc kubenswrapper[5050]: I1123 14:42:10.067697 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:10Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:10 crc kubenswrapper[5050]: I1123 14:42:10.078844 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:10Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:10 crc kubenswrapper[5050]: I1123 14:42:10.096132 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:10Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:10 crc kubenswrapper[5050]: I1123 14:42:10.110231 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:10Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:10 crc kubenswrapper[5050]: I1123 14:42:10.121897 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:10Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:10 crc kubenswrapper[5050]: I1123 14:42:10.133330 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:10Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:10 crc kubenswrapper[5050]: I1123 14:42:10.146192 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:10Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:10 crc kubenswrapper[5050]: I1123 14:42:10.164475 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:10Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:10 crc kubenswrapper[5050]: I1123 14:42:10.183227 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:10Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:10 crc kubenswrapper[5050]: I1123 14:42:10.202302 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:10Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:10 crc kubenswrapper[5050]: I1123 14:42:10.215708 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:10Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:10 crc kubenswrapper[5050]: I1123 14:42:10.548147 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:10 crc kubenswrapper[5050]: I1123 14:42:10.548240 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:10 crc kubenswrapper[5050]: E1123 14:42:10.548349 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:10 crc kubenswrapper[5050]: E1123 14:42:10.548596 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:10 crc kubenswrapper[5050]: I1123 14:42:10.772088 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d7b7de2-b361-44b9-ba10-5d4f889abc9b" containerID="c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682" exitCode=0 Nov 23 14:42:10 crc kubenswrapper[5050]: I1123 14:42:10.772158 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" event={"ID":"1d7b7de2-b361-44b9-ba10-5d4f889abc9b","Type":"ContainerDied","Data":"c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682"} Nov 23 14:42:10 crc kubenswrapper[5050]: I1123 14:42:10.795248 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:10Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.071611 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.096021 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.121560 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.143718 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.165120 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.180724 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.204597 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.227345 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.242254 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.260672 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.277604 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.289129 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.309805 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.487624 5050 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.489717 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.489760 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.489772 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.489888 5050 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.500346 5050 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.500602 5050 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.501473 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.501514 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.501526 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.501544 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.501559 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:11Z","lastTransitionTime":"2025-11-23T14:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:11 crc kubenswrapper[5050]: E1123 14:42:11.514800 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.519709 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.519751 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.519763 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.519782 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.519795 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:11Z","lastTransitionTime":"2025-11-23T14:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:11 crc kubenswrapper[5050]: E1123 14:42:11.536151 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.545642 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.545687 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.545702 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.545722 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.545739 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:11Z","lastTransitionTime":"2025-11-23T14:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.547520 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:11 crc kubenswrapper[5050]: E1123 14:42:11.547643 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:11 crc kubenswrapper[5050]: E1123 14:42:11.558911 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.563706 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.563742 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.563754 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.563772 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.563784 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:11Z","lastTransitionTime":"2025-11-23T14:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:11 crc kubenswrapper[5050]: E1123 14:42:11.576784 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.581432 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.581518 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.581544 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.581578 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.581598 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:11Z","lastTransitionTime":"2025-11-23T14:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:11 crc kubenswrapper[5050]: E1123 14:42:11.599312 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: E1123 14:42:11.599429 5050 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.601313 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.601346 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.601356 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.601372 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.601384 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:11Z","lastTransitionTime":"2025-11-23T14:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.704221 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.704297 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.704316 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.704344 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.704366 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:11Z","lastTransitionTime":"2025-11-23T14:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.779858 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d7b7de2-b361-44b9-ba10-5d4f889abc9b" containerID="ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a" exitCode=0 Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.779961 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" event={"ID":"1d7b7de2-b361-44b9-ba10-5d4f889abc9b","Type":"ContainerDied","Data":"ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a"} Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.787120 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" event={"ID":"6573c043-542c-47ae-a2ba-f70b8baf60c2","Type":"ContainerStarted","Data":"7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f"} Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.799496 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.807107 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.807177 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.807202 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.807239 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.807265 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:11Z","lastTransitionTime":"2025-11-23T14:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.816734 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.841221 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.887676 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.909375 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.910222 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.910267 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.910276 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.910295 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.910310 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:11Z","lastTransitionTime":"2025-11-23T14:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.922329 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.936880 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.949777 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.965849 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.978348 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:11 crc kubenswrapper[5050]: I1123 14:42:11.990547 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:11Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.006397 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:12Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.013360 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.013401 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.013410 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.013458 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.013470 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:12Z","lastTransitionTime":"2025-11-23T14:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.021127 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:12Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.033408 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:12Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.116633 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.116693 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.116708 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.116732 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.116747 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:12Z","lastTransitionTime":"2025-11-23T14:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.220305 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.220359 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.220369 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.220413 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.220427 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:12Z","lastTransitionTime":"2025-11-23T14:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.324353 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.324425 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.324450 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.324516 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.324537 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:12Z","lastTransitionTime":"2025-11-23T14:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.427665 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.427755 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.427782 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.427817 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.427839 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:12Z","lastTransitionTime":"2025-11-23T14:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.530733 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.530781 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.530796 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.530820 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.530834 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:12Z","lastTransitionTime":"2025-11-23T14:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.548430 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.548473 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:12 crc kubenswrapper[5050]: E1123 14:42:12.548669 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:12 crc kubenswrapper[5050]: E1123 14:42:12.548669 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.634278 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.634333 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.634347 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.634372 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.634389 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:12Z","lastTransitionTime":"2025-11-23T14:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.737314 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.737379 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.737396 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.737420 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.737437 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:12Z","lastTransitionTime":"2025-11-23T14:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.797371 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d7b7de2-b361-44b9-ba10-5d4f889abc9b" containerID="c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d" exitCode=0 Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.797440 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" event={"ID":"1d7b7de2-b361-44b9-ba10-5d4f889abc9b","Type":"ContainerDied","Data":"c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d"} Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.826228 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:12Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.840960 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.841044 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.841062 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.841095 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.841115 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:12Z","lastTransitionTime":"2025-11-23T14:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.850431 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:12Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.867430 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:12Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.900950 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:12Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.919668 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:12Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.942404 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:12Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.944813 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.944882 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.944948 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.945020 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.945064 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:12Z","lastTransitionTime":"2025-11-23T14:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.961833 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:12Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.978816 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:12Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:12 crc kubenswrapper[5050]: I1123 14:42:12.998305 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:12Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.015850 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:13Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.035067 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:13Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.048389 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.048435 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.048452 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.048522 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.048541 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:13Z","lastTransitionTime":"2025-11-23T14:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.053969 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:13Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.069010 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:13Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.085500 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:13Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.152814 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.152895 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.152912 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.152939 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.152959 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:13Z","lastTransitionTime":"2025-11-23T14:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.255854 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.255905 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.255919 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.255941 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.255954 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:13Z","lastTransitionTime":"2025-11-23T14:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.284787 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.285008 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:13 crc kubenswrapper[5050]: E1123 14:42:13.285102 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:42:21.28505068 +0000 UTC m=+36.452047175 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.285168 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:13 crc kubenswrapper[5050]: E1123 14:42:13.285200 5050 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 23 14:42:13 crc kubenswrapper[5050]: E1123 14:42:13.285295 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:21.285263986 +0000 UTC m=+36.452260651 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 23 14:42:13 crc kubenswrapper[5050]: E1123 14:42:13.285356 5050 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 23 14:42:13 crc kubenswrapper[5050]: E1123 14:42:13.285429 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:21.28541519 +0000 UTC m=+36.452411685 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.358793 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.358864 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.358882 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.358909 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.358927 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:13Z","lastTransitionTime":"2025-11-23T14:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.386970 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.387028 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:13 crc kubenswrapper[5050]: E1123 14:42:13.387198 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 23 14:42:13 crc kubenswrapper[5050]: E1123 14:42:13.387223 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 23 14:42:13 crc kubenswrapper[5050]: E1123 14:42:13.387241 5050 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:13 crc kubenswrapper[5050]: E1123 14:42:13.387303 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:21.387284713 +0000 UTC m=+36.554281208 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:13 crc kubenswrapper[5050]: E1123 14:42:13.387932 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 23 14:42:13 crc kubenswrapper[5050]: E1123 14:42:13.388009 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 23 14:42:13 crc kubenswrapper[5050]: E1123 14:42:13.388038 5050 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:13 crc kubenswrapper[5050]: E1123 14:42:13.388149 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:21.388117635 +0000 UTC m=+36.555114150 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.462358 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.462419 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.462434 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.462462 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.462499 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:13Z","lastTransitionTime":"2025-11-23T14:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.548340 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:13 crc kubenswrapper[5050]: E1123 14:42:13.548545 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.565785 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.565822 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.565836 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.565853 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.565866 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:13Z","lastTransitionTime":"2025-11-23T14:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.669930 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.670007 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.670049 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.670098 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.670125 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:13Z","lastTransitionTime":"2025-11-23T14:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.774006 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.774070 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.774082 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.774104 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.774121 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:13Z","lastTransitionTime":"2025-11-23T14:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.806389 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d7b7de2-b361-44b9-ba10-5d4f889abc9b" containerID="0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857" exitCode=0 Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.806467 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" event={"ID":"1d7b7de2-b361-44b9-ba10-5d4f889abc9b","Type":"ContainerDied","Data":"0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857"} Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.815293 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" event={"ID":"6573c043-542c-47ae-a2ba-f70b8baf60c2","Type":"ContainerStarted","Data":"df06d11d5998c885ff8bd3dd1fd314bc3157a4177aafe218655e8cf5b3ea64d9"} Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.815744 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.815806 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.834176 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:13Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.850520 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.852662 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.855494 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:13Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.876566 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:13Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.877296 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.877343 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.877354 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.877378 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.877390 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:13Z","lastTransitionTime":"2025-11-23T14:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.906986 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:13Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.924579 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:13Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.939594 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:13Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.950998 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:13Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.967719 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:13Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.980558 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.980606 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.980618 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.980639 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.980656 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:13Z","lastTransitionTime":"2025-11-23T14:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.982194 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:13Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:13 crc kubenswrapper[5050]: I1123 14:42:13.997415 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:13Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.011858 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:14Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.023604 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:14Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.040959 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:14Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.055590 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:14Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.071463 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:14Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.084437 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.084538 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.084561 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.084587 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.084604 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:14Z","lastTransitionTime":"2025-11-23T14:42:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.094071 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:14Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.110810 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:14Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.126961 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:14Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.144618 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:14Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.160772 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:14Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.173275 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:14Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.186286 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:14Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.188005 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.188035 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.188045 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.188062 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.188072 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:14Z","lastTransitionTime":"2025-11-23T14:42:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.199259 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:14Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.211021 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:14Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.252447 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df06d11d5998c885ff8bd3dd1fd314bc3157a4177aafe218655e8cf5b3ea64d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:14Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.268233 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:14Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.288741 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:14Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.290654 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.290692 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.290747 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.290769 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.290779 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:14Z","lastTransitionTime":"2025-11-23T14:42:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.304387 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:14Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.393701 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.393773 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.393800 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.393837 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.393865 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:14Z","lastTransitionTime":"2025-11-23T14:42:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.499907 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.499999 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.500030 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.500079 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.500118 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:14Z","lastTransitionTime":"2025-11-23T14:42:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.548673 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.548763 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:14 crc kubenswrapper[5050]: E1123 14:42:14.549032 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:14 crc kubenswrapper[5050]: E1123 14:42:14.549083 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.602674 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.602722 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.602737 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.602758 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.602773 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:14Z","lastTransitionTime":"2025-11-23T14:42:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.705250 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.705304 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.705324 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.705351 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.705369 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:14Z","lastTransitionTime":"2025-11-23T14:42:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.808406 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.808467 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.808478 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.808496 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.808509 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:14Z","lastTransitionTime":"2025-11-23T14:42:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.822995 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" event={"ID":"1d7b7de2-b361-44b9-ba10-5d4f889abc9b","Type":"ContainerStarted","Data":"0a024193010036bae37aa1c744cd832dced7ca4893acc76efe5c2b9db65834a8"} Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.823109 5050 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.842334 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:14Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.861948 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:14Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.877924 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:14Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.899839 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:14Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.911622 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.911683 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.911694 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.911716 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.911729 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:14Z","lastTransitionTime":"2025-11-23T14:42:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.916541 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:14Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.937874 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a024193010036bae37aa1c744cd832dced7ca4893acc76efe5c2b9db65834a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:14Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.953652 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:14Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.977067 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:14Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:14 crc kubenswrapper[5050]: I1123 14:42:14.996349 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:14Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.015140 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.015451 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.015687 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.015835 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.016054 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:15Z","lastTransitionTime":"2025-11-23T14:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.019631 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:15Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.035925 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:15Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.063854 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df06d11d5998c885ff8bd3dd1fd314bc3157a4177aafe218655e8cf5b3ea64d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:15Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.086059 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:15Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.105142 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:15Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.119687 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.119721 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.119733 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.119750 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.119763 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:15Z","lastTransitionTime":"2025-11-23T14:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.222753 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.222815 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.222833 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.222861 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.222880 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:15Z","lastTransitionTime":"2025-11-23T14:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.324933 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.324975 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.324985 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.325000 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.325008 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:15Z","lastTransitionTime":"2025-11-23T14:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.427707 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.427737 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.427745 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.427760 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.427768 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:15Z","lastTransitionTime":"2025-11-23T14:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.530940 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.530976 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.530986 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.531002 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.531012 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:15Z","lastTransitionTime":"2025-11-23T14:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.547870 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:15 crc kubenswrapper[5050]: E1123 14:42:15.547989 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.565672 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:15Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.582147 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:15Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.602447 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:15Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.629347 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:15Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.635423 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.635535 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.635556 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.635587 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.635618 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:15Z","lastTransitionTime":"2025-11-23T14:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.649791 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:15Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.667106 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:15Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.683828 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a024193010036bae37aa1c744cd832dced7ca4893acc76efe5c2b9db65834a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:15Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.698107 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:15Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.715056 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:15Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.728965 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:15Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.738387 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.738421 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.738433 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.738489 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.738503 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:15Z","lastTransitionTime":"2025-11-23T14:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.746433 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:15Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.758415 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:15Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.788819 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df06d11d5998c885ff8bd3dd1fd314bc3157a4177aafe218655e8cf5b3ea64d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:15Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.806139 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:15Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.826444 5050 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.841007 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.841054 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.841064 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.841078 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.841088 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:15Z","lastTransitionTime":"2025-11-23T14:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.944432 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.944504 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.944517 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.944537 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:15 crc kubenswrapper[5050]: I1123 14:42:15.944549 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:15Z","lastTransitionTime":"2025-11-23T14:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.047261 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.047307 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.047318 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.047334 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.047372 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:16Z","lastTransitionTime":"2025-11-23T14:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.149701 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.149747 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.149760 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.149781 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.149795 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:16Z","lastTransitionTime":"2025-11-23T14:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.252013 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.252050 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.252060 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.252075 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.252084 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:16Z","lastTransitionTime":"2025-11-23T14:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.356293 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.356331 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.356340 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.356355 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.356364 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:16Z","lastTransitionTime":"2025-11-23T14:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.460691 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.460759 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.460772 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.460814 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.460832 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:16Z","lastTransitionTime":"2025-11-23T14:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.547823 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.547969 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:16 crc kubenswrapper[5050]: E1123 14:42:16.548153 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:16 crc kubenswrapper[5050]: E1123 14:42:16.548371 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.564047 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.564105 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.564117 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.564137 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.564154 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:16Z","lastTransitionTime":"2025-11-23T14:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.666894 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.666932 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.666941 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.666955 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.666964 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:16Z","lastTransitionTime":"2025-11-23T14:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.769693 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.769987 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.770095 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.770228 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.770321 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:16Z","lastTransitionTime":"2025-11-23T14:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.831798 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-j8fzz_6573c043-542c-47ae-a2ba-f70b8baf60c2/ovnkube-controller/0.log" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.835253 5050 generic.go:334] "Generic (PLEG): container finished" podID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerID="df06d11d5998c885ff8bd3dd1fd314bc3157a4177aafe218655e8cf5b3ea64d9" exitCode=1 Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.835304 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" event={"ID":"6573c043-542c-47ae-a2ba-f70b8baf60c2","Type":"ContainerDied","Data":"df06d11d5998c885ff8bd3dd1fd314bc3157a4177aafe218655e8cf5b3ea64d9"} Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.836184 5050 scope.go:117] "RemoveContainer" containerID="df06d11d5998c885ff8bd3dd1fd314bc3157a4177aafe218655e8cf5b3ea64d9" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.850200 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:16Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.862207 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:16Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.872635 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.872668 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.872677 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.872692 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.872702 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:16Z","lastTransitionTime":"2025-11-23T14:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.874800 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:16Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.885370 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:16Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.903333 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df06d11d5998c885ff8bd3dd1fd314bc3157a4177aafe218655e8cf5b3ea64d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df06d11d5998c885ff8bd3dd1fd314bc3157a4177aafe218655e8cf5b3ea64d9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:16Z\\\",\\\"message\\\":\\\"oval\\\\nI1123 14:42:16.467386 6369 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1123 14:42:16.467393 6369 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1123 14:42:16.467423 6369 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1123 14:42:16.467460 6369 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1123 14:42:16.467464 6369 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1123 14:42:16.467460 6369 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1123 14:42:16.467511 6369 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1123 14:42:16.467567 6369 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1123 14:42:16.467610 6369 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1123 14:42:16.467658 6369 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1123 14:42:16.467674 6369 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1123 14:42:16.467691 6369 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1123 14:42:16.467704 6369 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1123 14:42:16.467719 6369 factory.go:656] Stopping watch factory\\\\nI1123 14:42:16.467736 6369 ovnkube.go:599] Stopped ovnkube\\\\nI1123 14:42:16.467764 6369 metrics.go:553] Stopping metrics server at address \\\\\\\"\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:16Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.916980 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:16Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.928862 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:16Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.938990 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:16Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.950315 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:16Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.963608 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:16Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.975588 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.975632 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.975645 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.975664 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.975679 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:16Z","lastTransitionTime":"2025-11-23T14:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.978500 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:16Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:16 crc kubenswrapper[5050]: I1123 14:42:16.990803 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:16Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.007607 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a024193010036bae37aa1c744cd832dced7ca4893acc76efe5c2b9db65834a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:17Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.020704 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:17Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.077562 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.077598 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.077607 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.077620 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.077632 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:17Z","lastTransitionTime":"2025-11-23T14:42:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.181291 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.181342 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.181352 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.181370 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.181384 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:17Z","lastTransitionTime":"2025-11-23T14:42:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.284901 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.284976 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.284991 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.285017 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.285034 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:17Z","lastTransitionTime":"2025-11-23T14:42:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.388305 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.388375 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.388388 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.388409 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.388427 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:17Z","lastTransitionTime":"2025-11-23T14:42:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.491817 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.492253 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.492373 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.492511 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.492627 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:17Z","lastTransitionTime":"2025-11-23T14:42:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.548736 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:17 crc kubenswrapper[5050]: E1123 14:42:17.549300 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.596041 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.596088 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.596098 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.596114 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.596125 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:17Z","lastTransitionTime":"2025-11-23T14:42:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.699764 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.699843 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.699855 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.699875 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.699887 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:17Z","lastTransitionTime":"2025-11-23T14:42:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.803759 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.803822 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.803837 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.803863 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.803926 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:17Z","lastTransitionTime":"2025-11-23T14:42:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.843304 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-j8fzz_6573c043-542c-47ae-a2ba-f70b8baf60c2/ovnkube-controller/0.log" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.847196 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" event={"ID":"6573c043-542c-47ae-a2ba-f70b8baf60c2","Type":"ContainerStarted","Data":"e23fb2eb8cab90b4a0352a351bc63c590a96ec40331e9db29b2f39fe76cbf0f8"} Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.847332 5050 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.869843 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:17Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.885591 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:17Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.900188 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:17Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.905706 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.905777 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.905793 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.905815 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.905830 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:17Z","lastTransitionTime":"2025-11-23T14:42:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.914890 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:17Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.929367 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a024193010036bae37aa1c744cd832dced7ca4893acc76efe5c2b9db65834a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:17Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.954461 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:17Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.977788 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:17Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:17 crc kubenswrapper[5050]: I1123 14:42:17.998451 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:17Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.008275 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.008323 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.008333 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.008349 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.008362 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:18Z","lastTransitionTime":"2025-11-23T14:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.029067 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:18Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.040184 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:18Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.057706 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e23fb2eb8cab90b4a0352a351bc63c590a96ec40331e9db29b2f39fe76cbf0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df06d11d5998c885ff8bd3dd1fd314bc3157a4177aafe218655e8cf5b3ea64d9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:16Z\\\",\\\"message\\\":\\\"oval\\\\nI1123 14:42:16.467386 6369 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1123 14:42:16.467393 6369 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1123 14:42:16.467423 6369 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1123 14:42:16.467460 6369 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1123 14:42:16.467464 6369 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1123 14:42:16.467460 6369 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1123 14:42:16.467511 6369 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1123 14:42:16.467567 6369 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1123 14:42:16.467610 6369 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1123 14:42:16.467658 6369 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1123 14:42:16.467674 6369 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1123 14:42:16.467691 6369 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1123 14:42:16.467704 6369 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1123 14:42:16.467719 6369 factory.go:656] Stopping watch factory\\\\nI1123 14:42:16.467736 6369 ovnkube.go:599] Stopped ovnkube\\\\nI1123 14:42:16.467764 6369 metrics.go:553] Stopping metrics server at address \\\\\\\"\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:18Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.069431 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:18Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.082625 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:18Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.093085 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:18Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.111575 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.111643 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.111655 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.111672 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.111685 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:18Z","lastTransitionTime":"2025-11-23T14:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.214618 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.214686 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.214696 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.214712 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.214722 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:18Z","lastTransitionTime":"2025-11-23T14:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.318097 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.318147 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.318159 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.318175 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.318185 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:18Z","lastTransitionTime":"2025-11-23T14:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.421127 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.421159 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.421169 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.421183 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.421192 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:18Z","lastTransitionTime":"2025-11-23T14:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.523700 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.523763 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.523785 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.523811 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.523831 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:18Z","lastTransitionTime":"2025-11-23T14:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.547973 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.548010 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:18 crc kubenswrapper[5050]: E1123 14:42:18.548144 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:18 crc kubenswrapper[5050]: E1123 14:42:18.548252 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.627184 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.627249 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.627268 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.627293 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.627312 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:18Z","lastTransitionTime":"2025-11-23T14:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.730349 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.730390 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.730402 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.730420 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.730436 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:18Z","lastTransitionTime":"2025-11-23T14:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.836021 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.836080 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.836163 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.836238 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.836256 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:18Z","lastTransitionTime":"2025-11-23T14:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.852424 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-j8fzz_6573c043-542c-47ae-a2ba-f70b8baf60c2/ovnkube-controller/1.log" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.853017 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-j8fzz_6573c043-542c-47ae-a2ba-f70b8baf60c2/ovnkube-controller/0.log" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.855863 5050 generic.go:334] "Generic (PLEG): container finished" podID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerID="e23fb2eb8cab90b4a0352a351bc63c590a96ec40331e9db29b2f39fe76cbf0f8" exitCode=1 Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.855902 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" event={"ID":"6573c043-542c-47ae-a2ba-f70b8baf60c2","Type":"ContainerDied","Data":"e23fb2eb8cab90b4a0352a351bc63c590a96ec40331e9db29b2f39fe76cbf0f8"} Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.855937 5050 scope.go:117] "RemoveContainer" containerID="df06d11d5998c885ff8bd3dd1fd314bc3157a4177aafe218655e8cf5b3ea64d9" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.857283 5050 scope.go:117] "RemoveContainer" containerID="e23fb2eb8cab90b4a0352a351bc63c590a96ec40331e9db29b2f39fe76cbf0f8" Nov 23 14:42:18 crc kubenswrapper[5050]: E1123 14:42:18.857546 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-j8fzz_openshift-ovn-kubernetes(6573c043-542c-47ae-a2ba-f70b8baf60c2)\"" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.873825 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:18Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.886731 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:18Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.897394 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:18Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.914418 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e23fb2eb8cab90b4a0352a351bc63c590a96ec40331e9db29b2f39fe76cbf0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df06d11d5998c885ff8bd3dd1fd314bc3157a4177aafe218655e8cf5b3ea64d9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:16Z\\\",\\\"message\\\":\\\"oval\\\\nI1123 14:42:16.467386 6369 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1123 14:42:16.467393 6369 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1123 14:42:16.467423 6369 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1123 14:42:16.467460 6369 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1123 14:42:16.467464 6369 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1123 14:42:16.467460 6369 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1123 14:42:16.467511 6369 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1123 14:42:16.467567 6369 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1123 14:42:16.467610 6369 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1123 14:42:16.467658 6369 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1123 14:42:16.467674 6369 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1123 14:42:16.467691 6369 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1123 14:42:16.467704 6369 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1123 14:42:16.467719 6369 factory.go:656] Stopping watch factory\\\\nI1123 14:42:16.467736 6369 ovnkube.go:599] Stopped ovnkube\\\\nI1123 14:42:16.467764 6369 metrics.go:553] Stopping metrics server at address \\\\\\\"\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e23fb2eb8cab90b4a0352a351bc63c590a96ec40331e9db29b2f39fe76cbf0f8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:17Z\\\",\\\"message\\\":\\\"3 14:42:17.882505 6496 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1123 14:42:17.882633 6496 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1123 14:42:17.882768 6496 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1123 14:42:17.882928 6496 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1123 14:42:17.883130 6496 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1123 14:42:17.883305 6496 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1123 14:42:17.883429 6496 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1123 14:42:17.883712 6496 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:18Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.926135 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:18Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.937730 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:18Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.938898 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.938928 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.938939 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.938956 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.938967 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:18Z","lastTransitionTime":"2025-11-23T14:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.947952 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:18Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.959355 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:18Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.972831 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:18Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:18 crc kubenswrapper[5050]: I1123 14:42:18.987344 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:18Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.003262 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:19Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.017944 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a024193010036bae37aa1c744cd832dced7ca4893acc76efe5c2b9db65834a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:19Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.032298 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:19Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.041937 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.041971 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.041982 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.041997 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.042007 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:19Z","lastTransitionTime":"2025-11-23T14:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.046064 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:19Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.144427 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.144491 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.144502 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.144517 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.144529 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:19Z","lastTransitionTime":"2025-11-23T14:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.247352 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.247433 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.247467 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.247488 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.247502 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:19Z","lastTransitionTime":"2025-11-23T14:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.313565 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg"] Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.314029 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.317047 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.317634 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.331966 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:19Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.347094 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:19Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.349655 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.349795 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.349857 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.349924 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.350003 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:19Z","lastTransitionTime":"2025-11-23T14:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.355227 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0b75c970-bec5-4155-89c1-3fcba8733f70-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-9q4mg\" (UID: \"0b75c970-bec5-4155-89c1-3fcba8733f70\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.355339 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0b75c970-bec5-4155-89c1-3fcba8733f70-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-9q4mg\" (UID: \"0b75c970-bec5-4155-89c1-3fcba8733f70\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.355398 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lsct2\" (UniqueName: \"kubernetes.io/projected/0b75c970-bec5-4155-89c1-3fcba8733f70-kube-api-access-lsct2\") pod \"ovnkube-control-plane-749d76644c-9q4mg\" (UID: \"0b75c970-bec5-4155-89c1-3fcba8733f70\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.355445 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0b75c970-bec5-4155-89c1-3fcba8733f70-env-overrides\") pod \"ovnkube-control-plane-749d76644c-9q4mg\" (UID: \"0b75c970-bec5-4155-89c1-3fcba8733f70\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.356962 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:19Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.372919 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a024193010036bae37aa1c744cd832dced7ca4893acc76efe5c2b9db65834a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:19Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.386931 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:19Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.402535 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:19Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.414869 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:19Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.426197 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:19Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.447658 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:19Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.452412 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.452464 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.452478 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.452498 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.452510 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:19Z","lastTransitionTime":"2025-11-23T14:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.456891 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0b75c970-bec5-4155-89c1-3fcba8733f70-env-overrides\") pod \"ovnkube-control-plane-749d76644c-9q4mg\" (UID: \"0b75c970-bec5-4155-89c1-3fcba8733f70\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.456943 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0b75c970-bec5-4155-89c1-3fcba8733f70-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-9q4mg\" (UID: \"0b75c970-bec5-4155-89c1-3fcba8733f70\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.457016 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0b75c970-bec5-4155-89c1-3fcba8733f70-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-9q4mg\" (UID: \"0b75c970-bec5-4155-89c1-3fcba8733f70\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.457052 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lsct2\" (UniqueName: \"kubernetes.io/projected/0b75c970-bec5-4155-89c1-3fcba8733f70-kube-api-access-lsct2\") pod \"ovnkube-control-plane-749d76644c-9q4mg\" (UID: \"0b75c970-bec5-4155-89c1-3fcba8733f70\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.457508 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0b75c970-bec5-4155-89c1-3fcba8733f70-env-overrides\") pod \"ovnkube-control-plane-749d76644c-9q4mg\" (UID: \"0b75c970-bec5-4155-89c1-3fcba8733f70\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.457768 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0b75c970-bec5-4155-89c1-3fcba8733f70-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-9q4mg\" (UID: \"0b75c970-bec5-4155-89c1-3fcba8733f70\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.463293 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0b75c970-bec5-4155-89c1-3fcba8733f70-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-9q4mg\" (UID: \"0b75c970-bec5-4155-89c1-3fcba8733f70\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.464938 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:19Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.472232 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lsct2\" (UniqueName: \"kubernetes.io/projected/0b75c970-bec5-4155-89c1-3fcba8733f70-kube-api-access-lsct2\") pod \"ovnkube-control-plane-749d76644c-9q4mg\" (UID: \"0b75c970-bec5-4155-89c1-3fcba8733f70\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.478978 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b75c970-bec5-4155-89c1-3fcba8733f70\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9q4mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:19Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.491053 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:19Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.506759 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:19Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.520331 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:19Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.537418 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e23fb2eb8cab90b4a0352a351bc63c590a96ec40331e9db29b2f39fe76cbf0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df06d11d5998c885ff8bd3dd1fd314bc3157a4177aafe218655e8cf5b3ea64d9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:16Z\\\",\\\"message\\\":\\\"oval\\\\nI1123 14:42:16.467386 6369 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1123 14:42:16.467393 6369 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1123 14:42:16.467423 6369 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1123 14:42:16.467460 6369 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1123 14:42:16.467464 6369 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1123 14:42:16.467460 6369 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1123 14:42:16.467511 6369 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1123 14:42:16.467567 6369 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1123 14:42:16.467610 6369 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1123 14:42:16.467658 6369 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1123 14:42:16.467674 6369 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1123 14:42:16.467691 6369 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1123 14:42:16.467704 6369 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1123 14:42:16.467719 6369 factory.go:656] Stopping watch factory\\\\nI1123 14:42:16.467736 6369 ovnkube.go:599] Stopped ovnkube\\\\nI1123 14:42:16.467764 6369 metrics.go:553] Stopping metrics server at address \\\\\\\"\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e23fb2eb8cab90b4a0352a351bc63c590a96ec40331e9db29b2f39fe76cbf0f8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:17Z\\\",\\\"message\\\":\\\"3 14:42:17.882505 6496 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1123 14:42:17.882633 6496 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1123 14:42:17.882768 6496 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1123 14:42:17.882928 6496 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1123 14:42:17.883130 6496 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1123 14:42:17.883305 6496 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1123 14:42:17.883429 6496 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1123 14:42:17.883712 6496 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:19Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.548513 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:19 crc kubenswrapper[5050]: E1123 14:42:19.548731 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.554509 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.554568 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.554582 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.554598 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.554632 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:19Z","lastTransitionTime":"2025-11-23T14:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.629617 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" Nov 23 14:42:19 crc kubenswrapper[5050]: W1123 14:42:19.649245 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0b75c970_bec5_4155_89c1_3fcba8733f70.slice/crio-9ccf01ecef6467f6a9696506fd55797f21e54a22ee3f43bfb7eb63a7bde3c7df WatchSource:0}: Error finding container 9ccf01ecef6467f6a9696506fd55797f21e54a22ee3f43bfb7eb63a7bde3c7df: Status 404 returned error can't find the container with id 9ccf01ecef6467f6a9696506fd55797f21e54a22ee3f43bfb7eb63a7bde3c7df Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.657291 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.657325 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.657334 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.657350 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.657374 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:19Z","lastTransitionTime":"2025-11-23T14:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.761136 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.761260 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.761582 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.761657 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.761688 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:19Z","lastTransitionTime":"2025-11-23T14:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.863190 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" event={"ID":"0b75c970-bec5-4155-89c1-3fcba8733f70","Type":"ContainerStarted","Data":"9ccf01ecef6467f6a9696506fd55797f21e54a22ee3f43bfb7eb63a7bde3c7df"} Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.863379 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.863393 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.863402 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.863415 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.863426 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:19Z","lastTransitionTime":"2025-11-23T14:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.865370 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-j8fzz_6573c043-542c-47ae-a2ba-f70b8baf60c2/ovnkube-controller/1.log" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.965555 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.965614 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.965633 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.965657 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:19 crc kubenswrapper[5050]: I1123 14:42:19.965675 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:19Z","lastTransitionTime":"2025-11-23T14:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.068958 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.069020 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.069036 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.069062 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.069081 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:20Z","lastTransitionTime":"2025-11-23T14:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.172657 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.172708 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.172723 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.172744 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.172759 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:20Z","lastTransitionTime":"2025-11-23T14:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.275707 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.275985 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.276105 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.276227 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.276335 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:20Z","lastTransitionTime":"2025-11-23T14:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.378690 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.378732 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.378744 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.378763 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.378777 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:20Z","lastTransitionTime":"2025-11-23T14:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.439741 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-gtj96"] Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.440593 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:20 crc kubenswrapper[5050]: E1123 14:42:20.440729 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.462818 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:20Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.468484 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ltjqr\" (UniqueName: \"kubernetes.io/projected/cc69bd19-1f49-486e-a510-d5b8461fb172-kube-api-access-ltjqr\") pod \"network-metrics-daemon-gtj96\" (UID: \"cc69bd19-1f49-486e-a510-d5b8461fb172\") " pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.468548 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cc69bd19-1f49-486e-a510-d5b8461fb172-metrics-certs\") pod \"network-metrics-daemon-gtj96\" (UID: \"cc69bd19-1f49-486e-a510-d5b8461fb172\") " pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.481166 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:20Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.481277 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.481329 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.481345 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.481372 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.481390 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:20Z","lastTransitionTime":"2025-11-23T14:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.493925 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gtj96" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc69bd19-1f49-486e-a510-d5b8461fb172\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gtj96\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:20Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.520194 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e23fb2eb8cab90b4a0352a351bc63c590a96ec40331e9db29b2f39fe76cbf0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df06d11d5998c885ff8bd3dd1fd314bc3157a4177aafe218655e8cf5b3ea64d9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:16Z\\\",\\\"message\\\":\\\"oval\\\\nI1123 14:42:16.467386 6369 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1123 14:42:16.467393 6369 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1123 14:42:16.467423 6369 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1123 14:42:16.467460 6369 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1123 14:42:16.467464 6369 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1123 14:42:16.467460 6369 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1123 14:42:16.467511 6369 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1123 14:42:16.467567 6369 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1123 14:42:16.467610 6369 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1123 14:42:16.467658 6369 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1123 14:42:16.467674 6369 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1123 14:42:16.467691 6369 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1123 14:42:16.467704 6369 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1123 14:42:16.467719 6369 factory.go:656] Stopping watch factory\\\\nI1123 14:42:16.467736 6369 ovnkube.go:599] Stopped ovnkube\\\\nI1123 14:42:16.467764 6369 metrics.go:553] Stopping metrics server at address \\\\\\\"\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e23fb2eb8cab90b4a0352a351bc63c590a96ec40331e9db29b2f39fe76cbf0f8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:17Z\\\",\\\"message\\\":\\\"3 14:42:17.882505 6496 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1123 14:42:17.882633 6496 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1123 14:42:17.882768 6496 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1123 14:42:17.882928 6496 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1123 14:42:17.883130 6496 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1123 14:42:17.883305 6496 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1123 14:42:17.883429 6496 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1123 14:42:17.883712 6496 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:20Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.539051 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b75c970-bec5-4155-89c1-3fcba8733f70\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9q4mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:20Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.548343 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.548521 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:20 crc kubenswrapper[5050]: E1123 14:42:20.548630 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:20 crc kubenswrapper[5050]: E1123 14:42:20.548765 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.556839 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:20Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.569686 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ltjqr\" (UniqueName: \"kubernetes.io/projected/cc69bd19-1f49-486e-a510-d5b8461fb172-kube-api-access-ltjqr\") pod \"network-metrics-daemon-gtj96\" (UID: \"cc69bd19-1f49-486e-a510-d5b8461fb172\") " pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.569799 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cc69bd19-1f49-486e-a510-d5b8461fb172-metrics-certs\") pod \"network-metrics-daemon-gtj96\" (UID: \"cc69bd19-1f49-486e-a510-d5b8461fb172\") " pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:20 crc kubenswrapper[5050]: E1123 14:42:20.570006 5050 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 23 14:42:20 crc kubenswrapper[5050]: E1123 14:42:20.570096 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cc69bd19-1f49-486e-a510-d5b8461fb172-metrics-certs podName:cc69bd19-1f49-486e-a510-d5b8461fb172 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:21.07006728 +0000 UTC m=+36.237063795 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/cc69bd19-1f49-486e-a510-d5b8461fb172-metrics-certs") pod "network-metrics-daemon-gtj96" (UID: "cc69bd19-1f49-486e-a510-d5b8461fb172") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.570288 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:20Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.583594 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.583633 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.583647 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.583664 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.583676 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:20Z","lastTransitionTime":"2025-11-23T14:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.584404 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:20Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.594868 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ltjqr\" (UniqueName: \"kubernetes.io/projected/cc69bd19-1f49-486e-a510-d5b8461fb172-kube-api-access-ltjqr\") pod \"network-metrics-daemon-gtj96\" (UID: \"cc69bd19-1f49-486e-a510-d5b8461fb172\") " pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.603988 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:20Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.621090 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:20Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.635566 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:20Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.650686 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:20Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.672564 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a024193010036bae37aa1c744cd832dced7ca4893acc76efe5c2b9db65834a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:20Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.686704 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.686769 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.686782 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.686802 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.686815 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:20Z","lastTransitionTime":"2025-11-23T14:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.687482 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:20Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.704513 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:20Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.719992 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:20Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.789721 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.789780 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.789797 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.789820 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.789837 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:20Z","lastTransitionTime":"2025-11-23T14:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.878946 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" event={"ID":"0b75c970-bec5-4155-89c1-3fcba8733f70","Type":"ContainerStarted","Data":"2cc3ecb845a59d933cd05a64b4c258b18d167098d933d7b0741d7dbb43bfb328"} Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.879019 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" event={"ID":"0b75c970-bec5-4155-89c1-3fcba8733f70","Type":"ContainerStarted","Data":"b4f4f3e4f2e058d94d8ff4dc299e19a23318e04263a4324efa766daf5f7268f2"} Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.892915 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.892966 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.892981 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.893005 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.893019 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:20Z","lastTransitionTime":"2025-11-23T14:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.901350 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:20Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.916615 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:20Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.928854 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gtj96" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc69bd19-1f49-486e-a510-d5b8461fb172\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gtj96\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:20Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.942104 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:20Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.971138 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e23fb2eb8cab90b4a0352a351bc63c590a96ec40331e9db29b2f39fe76cbf0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df06d11d5998c885ff8bd3dd1fd314bc3157a4177aafe218655e8cf5b3ea64d9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:16Z\\\",\\\"message\\\":\\\"oval\\\\nI1123 14:42:16.467386 6369 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1123 14:42:16.467393 6369 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1123 14:42:16.467423 6369 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1123 14:42:16.467460 6369 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1123 14:42:16.467464 6369 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1123 14:42:16.467460 6369 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1123 14:42:16.467511 6369 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1123 14:42:16.467567 6369 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1123 14:42:16.467610 6369 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1123 14:42:16.467658 6369 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1123 14:42:16.467674 6369 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1123 14:42:16.467691 6369 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1123 14:42:16.467704 6369 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1123 14:42:16.467719 6369 factory.go:656] Stopping watch factory\\\\nI1123 14:42:16.467736 6369 ovnkube.go:599] Stopped ovnkube\\\\nI1123 14:42:16.467764 6369 metrics.go:553] Stopping metrics server at address \\\\\\\"\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e23fb2eb8cab90b4a0352a351bc63c590a96ec40331e9db29b2f39fe76cbf0f8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:17Z\\\",\\\"message\\\":\\\"3 14:42:17.882505 6496 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1123 14:42:17.882633 6496 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1123 14:42:17.882768 6496 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1123 14:42:17.882928 6496 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1123 14:42:17.883130 6496 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1123 14:42:17.883305 6496 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1123 14:42:17.883429 6496 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1123 14:42:17.883712 6496 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:20Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.986131 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b75c970-bec5-4155-89c1-3fcba8733f70\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f4f3e4f2e058d94d8ff4dc299e19a23318e04263a4324efa766daf5f7268f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cc3ecb845a59d933cd05a64b4c258b18d167098d933d7b0741d7dbb43bfb328\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9q4mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:20Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.995685 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.995739 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.995752 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.995775 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:20 crc kubenswrapper[5050]: I1123 14:42:20.995787 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:20Z","lastTransitionTime":"2025-11-23T14:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.002130 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:21Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.016740 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:21Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.027548 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:21Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.042138 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:21Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.058156 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:21Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.076308 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cc69bd19-1f49-486e-a510-d5b8461fb172-metrics-certs\") pod \"network-metrics-daemon-gtj96\" (UID: \"cc69bd19-1f49-486e-a510-d5b8461fb172\") " pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:21 crc kubenswrapper[5050]: E1123 14:42:21.076562 5050 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 23 14:42:21 crc kubenswrapper[5050]: E1123 14:42:21.076685 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cc69bd19-1f49-486e-a510-d5b8461fb172-metrics-certs podName:cc69bd19-1f49-486e-a510-d5b8461fb172 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:22.076659193 +0000 UTC m=+37.243655698 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/cc69bd19-1f49-486e-a510-d5b8461fb172-metrics-certs") pod "network-metrics-daemon-gtj96" (UID: "cc69bd19-1f49-486e-a510-d5b8461fb172") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.077394 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:21Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.094393 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:21Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.099145 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.099188 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.099203 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.099220 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.099236 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:21Z","lastTransitionTime":"2025-11-23T14:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.115058 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a024193010036bae37aa1c744cd832dced7ca4893acc76efe5c2b9db65834a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:21Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.129288 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:21Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.146848 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:21Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.202114 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.202174 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.202191 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.202217 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.202242 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:21Z","lastTransitionTime":"2025-11-23T14:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.305399 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.305467 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.305483 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.305507 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.305523 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:21Z","lastTransitionTime":"2025-11-23T14:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.379932 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:42:21 crc kubenswrapper[5050]: E1123 14:42:21.380168 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:42:37.380122656 +0000 UTC m=+52.547119151 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.380238 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.380310 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:21 crc kubenswrapper[5050]: E1123 14:42:21.380608 5050 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 23 14:42:21 crc kubenswrapper[5050]: E1123 14:42:21.380656 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:37.38064614 +0000 UTC m=+52.547642635 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 23 14:42:21 crc kubenswrapper[5050]: E1123 14:42:21.380887 5050 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 23 14:42:21 crc kubenswrapper[5050]: E1123 14:42:21.381019 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:37.380982559 +0000 UTC m=+52.547979234 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.408343 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.408417 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.408439 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.408529 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.408552 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:21Z","lastTransitionTime":"2025-11-23T14:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.481048 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.481111 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:21 crc kubenswrapper[5050]: E1123 14:42:21.481257 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 23 14:42:21 crc kubenswrapper[5050]: E1123 14:42:21.481279 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 23 14:42:21 crc kubenswrapper[5050]: E1123 14:42:21.481278 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 23 14:42:21 crc kubenswrapper[5050]: E1123 14:42:21.481336 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 23 14:42:21 crc kubenswrapper[5050]: E1123 14:42:21.481359 5050 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:21 crc kubenswrapper[5050]: E1123 14:42:21.481294 5050 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:21 crc kubenswrapper[5050]: E1123 14:42:21.481491 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:37.481410714 +0000 UTC m=+52.648407239 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:21 crc kubenswrapper[5050]: E1123 14:42:21.481529 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:37.481513277 +0000 UTC m=+52.648510012 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.511970 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.512025 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.512046 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.512066 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.512080 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:21Z","lastTransitionTime":"2025-11-23T14:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.548185 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:21 crc kubenswrapper[5050]: E1123 14:42:21.548300 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.615996 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.616057 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.616070 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.616089 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.616101 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:21Z","lastTransitionTime":"2025-11-23T14:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.719984 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.720094 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.720591 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.720684 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.720942 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:21Z","lastTransitionTime":"2025-11-23T14:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.825311 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.825389 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.825408 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.825479 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.825513 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:21Z","lastTransitionTime":"2025-11-23T14:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.928927 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.928972 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.928984 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.929003 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.929015 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:21Z","lastTransitionTime":"2025-11-23T14:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.995033 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.995092 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.995110 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.995137 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:21 crc kubenswrapper[5050]: I1123 14:42:21.995155 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:21Z","lastTransitionTime":"2025-11-23T14:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:22 crc kubenswrapper[5050]: E1123 14:42:22.016365 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:21Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:22Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.022108 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.022186 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.022198 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.022216 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.022231 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:22Z","lastTransitionTime":"2025-11-23T14:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:22 crc kubenswrapper[5050]: E1123 14:42:22.039231 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:22Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.043381 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.043441 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.043500 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.043533 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.043558 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:22Z","lastTransitionTime":"2025-11-23T14:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:22 crc kubenswrapper[5050]: E1123 14:42:22.061029 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:22Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.065803 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.065871 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.065891 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.065918 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.065937 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:22Z","lastTransitionTime":"2025-11-23T14:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.087585 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cc69bd19-1f49-486e-a510-d5b8461fb172-metrics-certs\") pod \"network-metrics-daemon-gtj96\" (UID: \"cc69bd19-1f49-486e-a510-d5b8461fb172\") " pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:22 crc kubenswrapper[5050]: E1123 14:42:22.087805 5050 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 23 14:42:22 crc kubenswrapper[5050]: E1123 14:42:22.087887 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cc69bd19-1f49-486e-a510-d5b8461fb172-metrics-certs podName:cc69bd19-1f49-486e-a510-d5b8461fb172 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:24.087862918 +0000 UTC m=+39.254859413 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/cc69bd19-1f49-486e-a510-d5b8461fb172-metrics-certs") pod "network-metrics-daemon-gtj96" (UID: "cc69bd19-1f49-486e-a510-d5b8461fb172") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 23 14:42:22 crc kubenswrapper[5050]: E1123 14:42:22.087746 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:22Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.092787 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.092859 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.092924 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.092957 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.092981 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:22Z","lastTransitionTime":"2025-11-23T14:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:22 crc kubenswrapper[5050]: E1123 14:42:22.109195 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:22Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:22 crc kubenswrapper[5050]: E1123 14:42:22.109424 5050 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.111354 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.111407 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.111425 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.111475 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.111494 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:22Z","lastTransitionTime":"2025-11-23T14:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.213876 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.213967 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.213988 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.214022 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.214074 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:22Z","lastTransitionTime":"2025-11-23T14:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.317636 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.317715 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.317741 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.317777 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.317801 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:22Z","lastTransitionTime":"2025-11-23T14:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.420705 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.420746 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.420759 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.420776 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.420789 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:22Z","lastTransitionTime":"2025-11-23T14:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.524020 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.524105 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.524158 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.524186 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.524204 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:22Z","lastTransitionTime":"2025-11-23T14:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.547665 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.547765 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.547801 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:22 crc kubenswrapper[5050]: E1123 14:42:22.547882 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:22 crc kubenswrapper[5050]: E1123 14:42:22.548282 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:42:22 crc kubenswrapper[5050]: E1123 14:42:22.548771 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.548900 5050 scope.go:117] "RemoveContainer" containerID="10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.630435 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.630504 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.630520 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.630545 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.630562 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:22Z","lastTransitionTime":"2025-11-23T14:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.733106 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.733399 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.733413 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.733431 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.733447 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:22Z","lastTransitionTime":"2025-11-23T14:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.836040 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.836075 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.836086 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.836105 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.836119 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:22Z","lastTransitionTime":"2025-11-23T14:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.939928 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.939976 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.939987 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.940007 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:22 crc kubenswrapper[5050]: I1123 14:42:22.940021 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:22Z","lastTransitionTime":"2025-11-23T14:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.042986 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.043041 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.043054 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.043077 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.043092 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:23Z","lastTransitionTime":"2025-11-23T14:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.146853 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.146911 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.146925 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.146947 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.146962 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:23Z","lastTransitionTime":"2025-11-23T14:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.250647 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.250706 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.250715 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.250734 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.250744 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:23Z","lastTransitionTime":"2025-11-23T14:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.354750 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.354806 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.354817 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.354837 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.354852 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:23Z","lastTransitionTime":"2025-11-23T14:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.458691 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.458776 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.458801 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.458835 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.458859 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:23Z","lastTransitionTime":"2025-11-23T14:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.548217 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:23 crc kubenswrapper[5050]: E1123 14:42:23.548518 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.562032 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.562112 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.562132 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.562163 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.562184 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:23Z","lastTransitionTime":"2025-11-23T14:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.665301 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.665360 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.665373 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.665394 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.665408 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:23Z","lastTransitionTime":"2025-11-23T14:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.768777 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.768849 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.768868 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.768898 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.768925 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:23Z","lastTransitionTime":"2025-11-23T14:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.872559 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.872649 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.872667 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.872700 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.872721 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:23Z","lastTransitionTime":"2025-11-23T14:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.893219 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.896494 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"0c0214fb48430e8263a76a8412ebe7aaeaa85c1b476c22f8bbe9a5279f35aa5d"} Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.897058 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.921070 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c0214fb48430e8263a76a8412ebe7aaeaa85c1b476c22f8bbe9a5279f35aa5d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:23Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.940693 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:23Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.956163 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gtj96" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc69bd19-1f49-486e-a510-d5b8461fb172\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gtj96\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:23Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.970804 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:23Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.975471 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.975536 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.975554 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.975584 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.975604 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:23Z","lastTransitionTime":"2025-11-23T14:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:23 crc kubenswrapper[5050]: I1123 14:42:23.995385 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e23fb2eb8cab90b4a0352a351bc63c590a96ec40331e9db29b2f39fe76cbf0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df06d11d5998c885ff8bd3dd1fd314bc3157a4177aafe218655e8cf5b3ea64d9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:16Z\\\",\\\"message\\\":\\\"oval\\\\nI1123 14:42:16.467386 6369 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1123 14:42:16.467393 6369 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1123 14:42:16.467423 6369 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1123 14:42:16.467460 6369 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1123 14:42:16.467464 6369 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1123 14:42:16.467460 6369 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1123 14:42:16.467511 6369 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1123 14:42:16.467567 6369 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1123 14:42:16.467610 6369 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1123 14:42:16.467658 6369 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1123 14:42:16.467674 6369 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1123 14:42:16.467691 6369 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1123 14:42:16.467704 6369 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1123 14:42:16.467719 6369 factory.go:656] Stopping watch factory\\\\nI1123 14:42:16.467736 6369 ovnkube.go:599] Stopped ovnkube\\\\nI1123 14:42:16.467764 6369 metrics.go:553] Stopping metrics server at address \\\\\\\"\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e23fb2eb8cab90b4a0352a351bc63c590a96ec40331e9db29b2f39fe76cbf0f8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:17Z\\\",\\\"message\\\":\\\"3 14:42:17.882505 6496 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1123 14:42:17.882633 6496 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1123 14:42:17.882768 6496 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1123 14:42:17.882928 6496 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1123 14:42:17.883130 6496 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1123 14:42:17.883305 6496 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1123 14:42:17.883429 6496 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1123 14:42:17.883712 6496 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:23Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.010895 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b75c970-bec5-4155-89c1-3fcba8733f70\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f4f3e4f2e058d94d8ff4dc299e19a23318e04263a4324efa766daf5f7268f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cc3ecb845a59d933cd05a64b4c258b18d167098d933d7b0741d7dbb43bfb328\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9q4mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:24Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.032226 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:24Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.050728 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:24Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.064919 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:24Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.078673 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.078731 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.078752 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.078779 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.078802 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:24Z","lastTransitionTime":"2025-11-23T14:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.082191 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:24Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.110879 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:24Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.114676 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cc69bd19-1f49-486e-a510-d5b8461fb172-metrics-certs\") pod \"network-metrics-daemon-gtj96\" (UID: \"cc69bd19-1f49-486e-a510-d5b8461fb172\") " pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:24 crc kubenswrapper[5050]: E1123 14:42:24.115027 5050 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 23 14:42:24 crc kubenswrapper[5050]: E1123 14:42:24.115186 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cc69bd19-1f49-486e-a510-d5b8461fb172-metrics-certs podName:cc69bd19-1f49-486e-a510-d5b8461fb172 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:28.115150184 +0000 UTC m=+43.282146709 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/cc69bd19-1f49-486e-a510-d5b8461fb172-metrics-certs") pod "network-metrics-daemon-gtj96" (UID: "cc69bd19-1f49-486e-a510-d5b8461fb172") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.144775 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:24Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.181867 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.181976 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.181991 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.182012 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.182024 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:24Z","lastTransitionTime":"2025-11-23T14:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.192617 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:24Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.209720 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a024193010036bae37aa1c744cd832dced7ca4893acc76efe5c2b9db65834a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:24Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.223531 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:24Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.238565 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:24Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.284816 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.284888 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.284908 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.284939 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.284965 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:24Z","lastTransitionTime":"2025-11-23T14:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.388504 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.388581 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.388608 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.388654 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.388685 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:24Z","lastTransitionTime":"2025-11-23T14:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.492916 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.493001 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.493020 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.493052 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.493073 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:24Z","lastTransitionTime":"2025-11-23T14:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.547758 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.547791 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:24 crc kubenswrapper[5050]: E1123 14:42:24.547965 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.548058 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:24 crc kubenswrapper[5050]: E1123 14:42:24.548278 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:24 crc kubenswrapper[5050]: E1123 14:42:24.548397 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.595805 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.595846 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.595860 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.595877 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.595887 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:24Z","lastTransitionTime":"2025-11-23T14:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.698744 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.698819 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.698844 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.698885 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.698912 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:24Z","lastTransitionTime":"2025-11-23T14:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.802428 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.802543 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.802569 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.802603 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.802629 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:24Z","lastTransitionTime":"2025-11-23T14:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.906150 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.906243 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.906265 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.906295 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:24 crc kubenswrapper[5050]: I1123 14:42:24.906318 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:24Z","lastTransitionTime":"2025-11-23T14:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.009581 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.009684 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.009722 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.009761 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.009787 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:25Z","lastTransitionTime":"2025-11-23T14:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.113371 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.113487 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.113512 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.113545 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.113565 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:25Z","lastTransitionTime":"2025-11-23T14:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.217367 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.217425 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.217438 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.217481 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.217497 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:25Z","lastTransitionTime":"2025-11-23T14:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.320597 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.320656 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.320674 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.320700 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.320728 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:25Z","lastTransitionTime":"2025-11-23T14:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.423870 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.423979 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.424005 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.424041 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.424066 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:25Z","lastTransitionTime":"2025-11-23T14:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.528175 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.528218 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.528227 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.528247 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.528257 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:25Z","lastTransitionTime":"2025-11-23T14:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.547954 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:25 crc kubenswrapper[5050]: E1123 14:42:25.548054 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.575378 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e23fb2eb8cab90b4a0352a351bc63c590a96ec40331e9db29b2f39fe76cbf0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df06d11d5998c885ff8bd3dd1fd314bc3157a4177aafe218655e8cf5b3ea64d9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:16Z\\\",\\\"message\\\":\\\"oval\\\\nI1123 14:42:16.467386 6369 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1123 14:42:16.467393 6369 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1123 14:42:16.467423 6369 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1123 14:42:16.467460 6369 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1123 14:42:16.467464 6369 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1123 14:42:16.467460 6369 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1123 14:42:16.467511 6369 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1123 14:42:16.467567 6369 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1123 14:42:16.467610 6369 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1123 14:42:16.467658 6369 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1123 14:42:16.467674 6369 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1123 14:42:16.467691 6369 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1123 14:42:16.467704 6369 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1123 14:42:16.467719 6369 factory.go:656] Stopping watch factory\\\\nI1123 14:42:16.467736 6369 ovnkube.go:599] Stopped ovnkube\\\\nI1123 14:42:16.467764 6369 metrics.go:553] Stopping metrics server at address \\\\\\\"\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e23fb2eb8cab90b4a0352a351bc63c590a96ec40331e9db29b2f39fe76cbf0f8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:17Z\\\",\\\"message\\\":\\\"3 14:42:17.882505 6496 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1123 14:42:17.882633 6496 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1123 14:42:17.882768 6496 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1123 14:42:17.882928 6496 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1123 14:42:17.883130 6496 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1123 14:42:17.883305 6496 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1123 14:42:17.883429 6496 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1123 14:42:17.883712 6496 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:25Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.590194 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b75c970-bec5-4155-89c1-3fcba8733f70\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f4f3e4f2e058d94d8ff4dc299e19a23318e04263a4324efa766daf5f7268f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cc3ecb845a59d933cd05a64b4c258b18d167098d933d7b0741d7dbb43bfb328\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9q4mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:25Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.611584 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:25Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.630428 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:25Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.631487 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.631572 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.631595 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.631629 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.631648 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:25Z","lastTransitionTime":"2025-11-23T14:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.643401 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:25Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.663762 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:25Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.682028 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:25Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.695262 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:25Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.708654 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:25Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.727581 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a024193010036bae37aa1c744cd832dced7ca4893acc76efe5c2b9db65834a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:25Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.734240 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.734272 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.734282 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.734298 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.734309 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:25Z","lastTransitionTime":"2025-11-23T14:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.742113 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:25Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.756636 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:25Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.770906 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:25Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.790394 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c0214fb48430e8263a76a8412ebe7aaeaa85c1b476c22f8bbe9a5279f35aa5d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:25Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.806891 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:25Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.828723 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gtj96" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc69bd19-1f49-486e-a510-d5b8461fb172\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gtj96\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:25Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.837773 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.837877 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.837906 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.837943 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.837971 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:25Z","lastTransitionTime":"2025-11-23T14:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.941081 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.941144 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.941176 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.941234 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:25 crc kubenswrapper[5050]: I1123 14:42:25.941255 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:25Z","lastTransitionTime":"2025-11-23T14:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.044979 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.045028 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.045038 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.045057 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.045069 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:26Z","lastTransitionTime":"2025-11-23T14:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.148150 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.148211 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.148233 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.148262 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.148286 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:26Z","lastTransitionTime":"2025-11-23T14:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.252050 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.252127 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.252146 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.252174 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.252198 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:26Z","lastTransitionTime":"2025-11-23T14:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.355738 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.355866 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.355884 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.355909 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.355928 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:26Z","lastTransitionTime":"2025-11-23T14:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.459314 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.459477 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.459501 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.459527 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.459546 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:26Z","lastTransitionTime":"2025-11-23T14:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.548485 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.548539 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.548642 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:26 crc kubenswrapper[5050]: E1123 14:42:26.548809 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:42:26 crc kubenswrapper[5050]: E1123 14:42:26.549001 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:26 crc kubenswrapper[5050]: E1123 14:42:26.549252 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.567305 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.567357 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.567369 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.567520 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.567549 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:26Z","lastTransitionTime":"2025-11-23T14:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.672229 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.672296 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.672314 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.672339 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.672360 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:26Z","lastTransitionTime":"2025-11-23T14:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.775864 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.775908 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.775920 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.775938 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.775950 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:26Z","lastTransitionTime":"2025-11-23T14:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.880101 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.880175 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.880198 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.880231 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.880259 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:26Z","lastTransitionTime":"2025-11-23T14:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.983803 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.983888 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.983904 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.983929 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:26 crc kubenswrapper[5050]: I1123 14:42:26.983948 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:26Z","lastTransitionTime":"2025-11-23T14:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.087700 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.088312 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.088335 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.088369 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.088389 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:27Z","lastTransitionTime":"2025-11-23T14:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.191630 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.191716 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.191750 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.191824 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.191862 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:27Z","lastTransitionTime":"2025-11-23T14:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.295938 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.296009 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.296027 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.296058 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.296077 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:27Z","lastTransitionTime":"2025-11-23T14:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.399965 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.400032 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.400051 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.400082 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.400101 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:27Z","lastTransitionTime":"2025-11-23T14:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.503618 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.503697 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.503715 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.503743 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.503763 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:27Z","lastTransitionTime":"2025-11-23T14:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.548570 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:27 crc kubenswrapper[5050]: E1123 14:42:27.548782 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.607071 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.607146 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.607170 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.607201 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.607221 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:27Z","lastTransitionTime":"2025-11-23T14:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.710544 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.710617 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.710635 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.710666 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.710688 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:27Z","lastTransitionTime":"2025-11-23T14:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.813840 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.813903 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.813925 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.813956 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.813982 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:27Z","lastTransitionTime":"2025-11-23T14:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.916966 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.917064 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.917083 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.917112 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:27 crc kubenswrapper[5050]: I1123 14:42:27.917134 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:27Z","lastTransitionTime":"2025-11-23T14:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.020521 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.020585 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.020594 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.020612 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.020627 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:28Z","lastTransitionTime":"2025-11-23T14:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.124244 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.124304 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.124377 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.124397 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.124407 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:28Z","lastTransitionTime":"2025-11-23T14:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.165050 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cc69bd19-1f49-486e-a510-d5b8461fb172-metrics-certs\") pod \"network-metrics-daemon-gtj96\" (UID: \"cc69bd19-1f49-486e-a510-d5b8461fb172\") " pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:28 crc kubenswrapper[5050]: E1123 14:42:28.165226 5050 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 23 14:42:28 crc kubenswrapper[5050]: E1123 14:42:28.165315 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cc69bd19-1f49-486e-a510-d5b8461fb172-metrics-certs podName:cc69bd19-1f49-486e-a510-d5b8461fb172 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:36.165290102 +0000 UTC m=+51.332286587 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/cc69bd19-1f49-486e-a510-d5b8461fb172-metrics-certs") pod "network-metrics-daemon-gtj96" (UID: "cc69bd19-1f49-486e-a510-d5b8461fb172") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.227316 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.227382 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.227398 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.227425 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.227476 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:28Z","lastTransitionTime":"2025-11-23T14:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.331019 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.331111 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.331138 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.331173 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.331195 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:28Z","lastTransitionTime":"2025-11-23T14:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.435305 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.435373 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.435390 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.435419 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.435437 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:28Z","lastTransitionTime":"2025-11-23T14:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.538748 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.538805 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.538827 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.538852 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.538871 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:28Z","lastTransitionTime":"2025-11-23T14:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.547516 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.547594 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.547619 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:28 crc kubenswrapper[5050]: E1123 14:42:28.547696 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:42:28 crc kubenswrapper[5050]: E1123 14:42:28.547861 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:28 crc kubenswrapper[5050]: E1123 14:42:28.548124 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.622685 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.624198 5050 scope.go:117] "RemoveContainer" containerID="e23fb2eb8cab90b4a0352a351bc63c590a96ec40331e9db29b2f39fe76cbf0f8" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.642784 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.642850 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.642867 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.642904 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.642925 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:28Z","lastTransitionTime":"2025-11-23T14:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.655115 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:28Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.674558 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gtj96" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc69bd19-1f49-486e-a510-d5b8461fb172\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gtj96\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:28Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.692153 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c0214fb48430e8263a76a8412ebe7aaeaa85c1b476c22f8bbe9a5279f35aa5d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:28Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.713929 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:28Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.733290 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:28Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.747220 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.747255 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.747264 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.747279 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.747289 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:28Z","lastTransitionTime":"2025-11-23T14:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.753015 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:28Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.776715 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e23fb2eb8cab90b4a0352a351bc63c590a96ec40331e9db29b2f39fe76cbf0f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e23fb2eb8cab90b4a0352a351bc63c590a96ec40331e9db29b2f39fe76cbf0f8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:17Z\\\",\\\"message\\\":\\\"3 14:42:17.882505 6496 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1123 14:42:17.882633 6496 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1123 14:42:17.882768 6496 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1123 14:42:17.882928 6496 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1123 14:42:17.883130 6496 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1123 14:42:17.883305 6496 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1123 14:42:17.883429 6496 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1123 14:42:17.883712 6496 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:16Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-j8fzz_openshift-ovn-kubernetes(6573c043-542c-47ae-a2ba-f70b8baf60c2)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:28Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.790192 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b75c970-bec5-4155-89c1-3fcba8733f70\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f4f3e4f2e058d94d8ff4dc299e19a23318e04263a4324efa766daf5f7268f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cc3ecb845a59d933cd05a64b4c258b18d167098d933d7b0741d7dbb43bfb328\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9q4mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:28Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.803740 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:28Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.818127 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:28Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.829640 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:28Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.850877 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:28Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.851221 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.851254 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.851266 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.851285 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.851298 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:28Z","lastTransitionTime":"2025-11-23T14:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.866719 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:28Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.879313 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:28Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.892639 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a024193010036bae37aa1c744cd832dced7ca4893acc76efe5c2b9db65834a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:28Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.904010 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:28Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.920484 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-j8fzz_6573c043-542c-47ae-a2ba-f70b8baf60c2/ovnkube-controller/1.log" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.923146 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" event={"ID":"6573c043-542c-47ae-a2ba-f70b8baf60c2","Type":"ContainerStarted","Data":"8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a"} Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.923658 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.938828 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c0214fb48430e8263a76a8412ebe7aaeaa85c1b476c22f8bbe9a5279f35aa5d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:28Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.950098 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:28Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.954227 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.954253 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.954262 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.954276 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.954286 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:28Z","lastTransitionTime":"2025-11-23T14:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.959730 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gtj96" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc69bd19-1f49-486e-a510-d5b8461fb172\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gtj96\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:28Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.970903 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:28Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.983770 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:28Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:28 crc kubenswrapper[5050]: I1123 14:42:28.995137 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:28Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.018830 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e23fb2eb8cab90b4a0352a351bc63c590a96ec40331e9db29b2f39fe76cbf0f8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:17Z\\\",\\\"message\\\":\\\"3 14:42:17.882505 6496 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1123 14:42:17.882633 6496 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1123 14:42:17.882768 6496 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1123 14:42:17.882928 6496 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1123 14:42:17.883130 6496 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1123 14:42:17.883305 6496 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1123 14:42:17.883429 6496 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1123 14:42:17.883712 6496 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:16Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:29Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.038298 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b75c970-bec5-4155-89c1-3fcba8733f70\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f4f3e4f2e058d94d8ff4dc299e19a23318e04263a4324efa766daf5f7268f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cc3ecb845a59d933cd05a64b4c258b18d167098d933d7b0741d7dbb43bfb328\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9q4mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:29Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.052261 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:29Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.056814 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.056871 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.056883 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.056906 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.056922 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:29Z","lastTransitionTime":"2025-11-23T14:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.072771 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:29Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.092178 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:29Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.105461 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:29Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.119097 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:29Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.132206 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:29Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.144409 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:29Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.157857 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a024193010036bae37aa1c744cd832dced7ca4893acc76efe5c2b9db65834a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:29Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.159309 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.159351 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.159362 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.159378 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.159387 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:29Z","lastTransitionTime":"2025-11-23T14:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.266372 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.266456 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.266471 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.266489 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.266499 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:29Z","lastTransitionTime":"2025-11-23T14:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.370766 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.370817 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.370828 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.370847 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.370858 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:29Z","lastTransitionTime":"2025-11-23T14:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.473612 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.473653 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.473665 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.473681 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.473693 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:29Z","lastTransitionTime":"2025-11-23T14:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.548546 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:29 crc kubenswrapper[5050]: E1123 14:42:29.548714 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.576939 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.576983 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.576995 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.577012 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.577024 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:29Z","lastTransitionTime":"2025-11-23T14:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.679714 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.679789 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.679808 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.679863 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.679892 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:29Z","lastTransitionTime":"2025-11-23T14:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.783254 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.783294 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.783303 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.783318 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.783331 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:29Z","lastTransitionTime":"2025-11-23T14:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.886957 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.887010 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.887027 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.887055 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.887074 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:29Z","lastTransitionTime":"2025-11-23T14:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.928565 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-j8fzz_6573c043-542c-47ae-a2ba-f70b8baf60c2/ovnkube-controller/2.log" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.929639 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-j8fzz_6573c043-542c-47ae-a2ba-f70b8baf60c2/ovnkube-controller/1.log" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.933136 5050 generic.go:334] "Generic (PLEG): container finished" podID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerID="8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a" exitCode=1 Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.933178 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" event={"ID":"6573c043-542c-47ae-a2ba-f70b8baf60c2","Type":"ContainerDied","Data":"8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a"} Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.933229 5050 scope.go:117] "RemoveContainer" containerID="e23fb2eb8cab90b4a0352a351bc63c590a96ec40331e9db29b2f39fe76cbf0f8" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.934209 5050 scope.go:117] "RemoveContainer" containerID="8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a" Nov 23 14:42:29 crc kubenswrapper[5050]: E1123 14:42:29.934475 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-j8fzz_openshift-ovn-kubernetes(6573c043-542c-47ae-a2ba-f70b8baf60c2)\"" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.951244 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:29Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.965337 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:29Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.978364 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:29Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.989717 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.989915 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.990051 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.990218 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.990356 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:29Z","lastTransitionTime":"2025-11-23T14:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:29 crc kubenswrapper[5050]: I1123 14:42:29.997674 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a024193010036bae37aa1c744cd832dced7ca4893acc76efe5c2b9db65834a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:29Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.016683 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:30Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.030914 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gtj96" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc69bd19-1f49-486e-a510-d5b8461fb172\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gtj96\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:30Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.051060 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c0214fb48430e8263a76a8412ebe7aaeaa85c1b476c22f8bbe9a5279f35aa5d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:30Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.071096 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:30Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.090595 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:30Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.094421 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.094656 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.094774 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.094897 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.095010 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:30Z","lastTransitionTime":"2025-11-23T14:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.109160 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:30Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.143869 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e23fb2eb8cab90b4a0352a351bc63c590a96ec40331e9db29b2f39fe76cbf0f8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:17Z\\\",\\\"message\\\":\\\"3 14:42:17.882505 6496 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1123 14:42:17.882633 6496 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1123 14:42:17.882768 6496 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1123 14:42:17.882928 6496 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1123 14:42:17.883130 6496 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1123 14:42:17.883305 6496 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1123 14:42:17.883429 6496 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1123 14:42:17.883712 6496 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:16Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:29Z\\\",\\\"message\\\":\\\"tus/network-metrics-daemon-gtj96: failed to update pod openshift-multus/network-metrics-daemon-gtj96: Internal error occurred: failed calling webhook \\\\\\\"pod.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/pod?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:29Z is after 2025-08-24T17:21:41Z\\\\nI1123 14:42:29.428594 6728 ovnkube.go:599] Stopped ovnkube\\\\nI1123 14:42:29.428608 6728 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1123 14:42:29.428611 6728 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI1123 14:42:29.428624 6728 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI1123 14:42:29.428635 6728 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1123 14:42:29.428578 6728 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-7btqb\\\\nF1123 14:42:29.428723 6728 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:30Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.162536 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b75c970-bec5-4155-89c1-3fcba8733f70\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f4f3e4f2e058d94d8ff4dc299e19a23318e04263a4324efa766daf5f7268f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cc3ecb845a59d933cd05a64b4c258b18d167098d933d7b0741d7dbb43bfb328\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9q4mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:30Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.184423 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:30Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.198751 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.198797 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.198808 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.198831 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.198847 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:30Z","lastTransitionTime":"2025-11-23T14:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.206638 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:30Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.221954 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:30Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.238218 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:30Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.301868 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.301922 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.301941 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.301966 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.301984 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:30Z","lastTransitionTime":"2025-11-23T14:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.407280 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.407383 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.407410 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.407483 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.407567 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:30Z","lastTransitionTime":"2025-11-23T14:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.511932 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.512010 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.512029 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.512058 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.512078 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:30Z","lastTransitionTime":"2025-11-23T14:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.547746 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.547868 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.547774 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:30 crc kubenswrapper[5050]: E1123 14:42:30.547969 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:30 crc kubenswrapper[5050]: E1123 14:42:30.548114 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:30 crc kubenswrapper[5050]: E1123 14:42:30.548260 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.615335 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.615403 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.615422 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.615482 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.615536 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:30Z","lastTransitionTime":"2025-11-23T14:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.719690 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.719779 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.719799 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.719826 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.719846 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:30Z","lastTransitionTime":"2025-11-23T14:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.823609 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.823668 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.823689 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.823720 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.823741 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:30Z","lastTransitionTime":"2025-11-23T14:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.927353 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.927411 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.927423 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.927470 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.927485 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:30Z","lastTransitionTime":"2025-11-23T14:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.940321 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-j8fzz_6573c043-542c-47ae-a2ba-f70b8baf60c2/ovnkube-controller/2.log" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.946212 5050 scope.go:117] "RemoveContainer" containerID="8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a" Nov 23 14:42:30 crc kubenswrapper[5050]: E1123 14:42:30.946525 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-j8fzz_openshift-ovn-kubernetes(6573c043-542c-47ae-a2ba-f70b8baf60c2)\"" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.971180 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c0214fb48430e8263a76a8412ebe7aaeaa85c1b476c22f8bbe9a5279f35aa5d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:30Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:30 crc kubenswrapper[5050]: I1123 14:42:30.994424 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:30Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.012241 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gtj96" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc69bd19-1f49-486e-a510-d5b8461fb172\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gtj96\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:31Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.031851 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.031930 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.031944 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.031967 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.031985 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:31Z","lastTransitionTime":"2025-11-23T14:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.032959 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:31Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.069781 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:29Z\\\",\\\"message\\\":\\\"tus/network-metrics-daemon-gtj96: failed to update pod openshift-multus/network-metrics-daemon-gtj96: Internal error occurred: failed calling webhook \\\\\\\"pod.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/pod?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:29Z is after 2025-08-24T17:21:41Z\\\\nI1123 14:42:29.428594 6728 ovnkube.go:599] Stopped ovnkube\\\\nI1123 14:42:29.428608 6728 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1123 14:42:29.428611 6728 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI1123 14:42:29.428624 6728 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI1123 14:42:29.428635 6728 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1123 14:42:29.428578 6728 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-7btqb\\\\nF1123 14:42:29.428723 6728 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-j8fzz_openshift-ovn-kubernetes(6573c043-542c-47ae-a2ba-f70b8baf60c2)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:31Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.086422 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b75c970-bec5-4155-89c1-3fcba8733f70\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f4f3e4f2e058d94d8ff4dc299e19a23318e04263a4324efa766daf5f7268f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cc3ecb845a59d933cd05a64b4c258b18d167098d933d7b0741d7dbb43bfb328\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9q4mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:31Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.103039 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:31Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.120542 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:31Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.135292 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.135378 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.135397 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.135429 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.135479 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:31Z","lastTransitionTime":"2025-11-23T14:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.137089 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:31Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.154762 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:31Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.174254 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:31Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.196559 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:31Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.216263 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:31Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.239222 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.239291 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.239308 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.239331 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.239345 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:31Z","lastTransitionTime":"2025-11-23T14:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.243020 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a024193010036bae37aa1c744cd832dced7ca4893acc76efe5c2b9db65834a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:31Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.268778 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:31Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.296134 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:31Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.343935 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.344013 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.344025 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.344046 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.344061 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:31Z","lastTransitionTime":"2025-11-23T14:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.446929 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.447017 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.447076 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.447099 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.447117 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:31Z","lastTransitionTime":"2025-11-23T14:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.548367 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:31 crc kubenswrapper[5050]: E1123 14:42:31.548661 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.550908 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.551003 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.551022 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.551051 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.551073 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:31Z","lastTransitionTime":"2025-11-23T14:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.653678 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.653961 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.654028 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.654139 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.654212 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:31Z","lastTransitionTime":"2025-11-23T14:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.757647 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.757728 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.757749 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.757783 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.757805 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:31Z","lastTransitionTime":"2025-11-23T14:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.860630 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.860694 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.860712 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.860739 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.860757 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:31Z","lastTransitionTime":"2025-11-23T14:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.963778 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.963864 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.963884 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.963916 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:31 crc kubenswrapper[5050]: I1123 14:42:31.963941 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:31Z","lastTransitionTime":"2025-11-23T14:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.068036 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.068108 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.068122 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.068143 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.068160 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:32Z","lastTransitionTime":"2025-11-23T14:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.172097 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.172189 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.172209 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.172238 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.172266 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:32Z","lastTransitionTime":"2025-11-23T14:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.251678 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.251753 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.251771 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.251799 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.251819 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:32Z","lastTransitionTime":"2025-11-23T14:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:32 crc kubenswrapper[5050]: E1123 14:42:32.275227 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:32Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.281578 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.281665 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.281687 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.281724 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.281750 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:32Z","lastTransitionTime":"2025-11-23T14:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:32 crc kubenswrapper[5050]: E1123 14:42:32.307213 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:32Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.314684 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.314948 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.315088 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.315269 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.315424 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:32Z","lastTransitionTime":"2025-11-23T14:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:32 crc kubenswrapper[5050]: E1123 14:42:32.340370 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:32Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.346422 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.346664 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.346801 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.346979 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.347129 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:32Z","lastTransitionTime":"2025-11-23T14:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:32 crc kubenswrapper[5050]: E1123 14:42:32.371646 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:32Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.378516 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.378591 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.378611 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.378645 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.378670 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:32Z","lastTransitionTime":"2025-11-23T14:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:32 crc kubenswrapper[5050]: E1123 14:42:32.400881 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:32Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:32 crc kubenswrapper[5050]: E1123 14:42:32.401123 5050 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.404123 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.404180 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.404198 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.404227 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.404247 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:32Z","lastTransitionTime":"2025-11-23T14:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.507434 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.507555 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.507579 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.507612 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.507642 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:32Z","lastTransitionTime":"2025-11-23T14:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.548581 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.548726 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:32 crc kubenswrapper[5050]: E1123 14:42:32.548823 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.548581 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:32 crc kubenswrapper[5050]: E1123 14:42:32.548986 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:32 crc kubenswrapper[5050]: E1123 14:42:32.549065 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.610865 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.610936 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.610960 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.610991 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.611012 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:32Z","lastTransitionTime":"2025-11-23T14:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.714748 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.714805 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.714826 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.714857 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.714879 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:32Z","lastTransitionTime":"2025-11-23T14:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.818327 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.818383 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.818401 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.818426 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.818493 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:32Z","lastTransitionTime":"2025-11-23T14:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.922438 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.922538 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.922556 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.922581 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:32 crc kubenswrapper[5050]: I1123 14:42:32.922602 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:32Z","lastTransitionTime":"2025-11-23T14:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.025883 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.025949 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.025967 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.025995 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.026016 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:33Z","lastTransitionTime":"2025-11-23T14:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.130397 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.130520 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.130558 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.130599 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.130623 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:33Z","lastTransitionTime":"2025-11-23T14:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.234070 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.234138 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.234159 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.234192 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.234213 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:33Z","lastTransitionTime":"2025-11-23T14:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.337758 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.337874 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.337895 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.337920 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.337937 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:33Z","lastTransitionTime":"2025-11-23T14:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.441372 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.441488 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.441513 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.441542 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.441566 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:33Z","lastTransitionTime":"2025-11-23T14:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.545499 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.545562 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.545581 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.545607 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.545627 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:33Z","lastTransitionTime":"2025-11-23T14:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.548374 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:33 crc kubenswrapper[5050]: E1123 14:42:33.548634 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.649514 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.649584 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.649600 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.649629 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.649648 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:33Z","lastTransitionTime":"2025-11-23T14:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.753380 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.753514 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.753544 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.753578 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.753597 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:33Z","lastTransitionTime":"2025-11-23T14:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.857281 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.857359 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.857383 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.857416 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.857475 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:33Z","lastTransitionTime":"2025-11-23T14:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.961279 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.961344 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.961362 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.961386 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:33 crc kubenswrapper[5050]: I1123 14:42:33.961404 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:33Z","lastTransitionTime":"2025-11-23T14:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.064559 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.064609 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.064621 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.064639 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.064652 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:34Z","lastTransitionTime":"2025-11-23T14:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.168215 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.168286 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.168306 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.168337 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.168357 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:34Z","lastTransitionTime":"2025-11-23T14:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.271434 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.271525 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.271548 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.271578 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.271596 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:34Z","lastTransitionTime":"2025-11-23T14:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.374844 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.374928 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.374952 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.374984 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.375006 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:34Z","lastTransitionTime":"2025-11-23T14:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.477989 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.478056 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.478073 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.478098 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.478116 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:34Z","lastTransitionTime":"2025-11-23T14:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.548190 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.548278 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:34 crc kubenswrapper[5050]: E1123 14:42:34.548388 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.548421 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:34 crc kubenswrapper[5050]: E1123 14:42:34.548615 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:34 crc kubenswrapper[5050]: E1123 14:42:34.548846 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.581527 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.581602 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.581629 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.581659 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.581685 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:34Z","lastTransitionTime":"2025-11-23T14:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.684710 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.684773 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.684792 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.684818 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.684839 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:34Z","lastTransitionTime":"2025-11-23T14:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.788000 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.788058 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.788074 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.788104 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.788128 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:34Z","lastTransitionTime":"2025-11-23T14:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.891992 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.892040 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.892055 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.892075 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.892090 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:34Z","lastTransitionTime":"2025-11-23T14:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.995597 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.995679 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.995705 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.995758 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:34 crc kubenswrapper[5050]: I1123 14:42:34.995784 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:34Z","lastTransitionTime":"2025-11-23T14:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.099130 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.099212 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.099230 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.099257 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.099278 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:35Z","lastTransitionTime":"2025-11-23T14:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.201917 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.201960 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.201969 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.201984 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.201995 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:35Z","lastTransitionTime":"2025-11-23T14:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.305415 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.305489 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.305502 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.305522 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.305535 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:35Z","lastTransitionTime":"2025-11-23T14:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.408610 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.408683 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.408701 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.408728 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.408748 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:35Z","lastTransitionTime":"2025-11-23T14:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.511740 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.511825 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.511846 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.511875 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.511896 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:35Z","lastTransitionTime":"2025-11-23T14:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.548508 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:35 crc kubenswrapper[5050]: E1123 14:42:35.548762 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.572392 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c0214fb48430e8263a76a8412ebe7aaeaa85c1b476c22f8bbe9a5279f35aa5d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:35Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.595830 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:35Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.614100 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gtj96" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc69bd19-1f49-486e-a510-d5b8461fb172\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gtj96\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:35Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.615068 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.615131 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.615152 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.615184 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.615205 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:35Z","lastTransitionTime":"2025-11-23T14:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.635545 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:35Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.651756 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:35Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.677537 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:35Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.711982 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:29Z\\\",\\\"message\\\":\\\"tus/network-metrics-daemon-gtj96: failed to update pod openshift-multus/network-metrics-daemon-gtj96: Internal error occurred: failed calling webhook \\\\\\\"pod.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/pod?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:29Z is after 2025-08-24T17:21:41Z\\\\nI1123 14:42:29.428594 6728 ovnkube.go:599] Stopped ovnkube\\\\nI1123 14:42:29.428608 6728 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1123 14:42:29.428611 6728 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI1123 14:42:29.428624 6728 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI1123 14:42:29.428635 6728 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1123 14:42:29.428578 6728 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-7btqb\\\\nF1123 14:42:29.428723 6728 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-j8fzz_openshift-ovn-kubernetes(6573c043-542c-47ae-a2ba-f70b8baf60c2)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:35Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.717800 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.717851 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.717862 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.717881 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.717891 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:35Z","lastTransitionTime":"2025-11-23T14:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.730000 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b75c970-bec5-4155-89c1-3fcba8733f70\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f4f3e4f2e058d94d8ff4dc299e19a23318e04263a4324efa766daf5f7268f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cc3ecb845a59d933cd05a64b4c258b18d167098d933d7b0741d7dbb43bfb328\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9q4mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:35Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.745651 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:35Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.773653 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:35Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.790630 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:35Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.812020 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:35Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.820818 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.821039 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.821158 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.821282 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.821389 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:35Z","lastTransitionTime":"2025-11-23T14:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.832728 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:35Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.859147 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:35Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.885599 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:35Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.912595 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a024193010036bae37aa1c744cd832dced7ca4893acc76efe5c2b9db65834a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:35Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.924801 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.924854 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.924867 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.924893 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:35 crc kubenswrapper[5050]: I1123 14:42:35.924906 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:35Z","lastTransitionTime":"2025-11-23T14:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.027553 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.027601 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.027613 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.027635 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.027648 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:36Z","lastTransitionTime":"2025-11-23T14:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.130854 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.130902 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.130915 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.130936 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.130949 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:36Z","lastTransitionTime":"2025-11-23T14:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.234526 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.234581 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.234600 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.234624 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.234639 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:36Z","lastTransitionTime":"2025-11-23T14:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.266541 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cc69bd19-1f49-486e-a510-d5b8461fb172-metrics-certs\") pod \"network-metrics-daemon-gtj96\" (UID: \"cc69bd19-1f49-486e-a510-d5b8461fb172\") " pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:36 crc kubenswrapper[5050]: E1123 14:42:36.266808 5050 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 23 14:42:36 crc kubenswrapper[5050]: E1123 14:42:36.266927 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cc69bd19-1f49-486e-a510-d5b8461fb172-metrics-certs podName:cc69bd19-1f49-486e-a510-d5b8461fb172 nodeName:}" failed. No retries permitted until 2025-11-23 14:42:52.266901202 +0000 UTC m=+67.433897687 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/cc69bd19-1f49-486e-a510-d5b8461fb172-metrics-certs") pod "network-metrics-daemon-gtj96" (UID: "cc69bd19-1f49-486e-a510-d5b8461fb172") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.338185 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.338256 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.338278 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.338308 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.338326 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:36Z","lastTransitionTime":"2025-11-23T14:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.447238 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.448170 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.448842 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.448994 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.449145 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:36Z","lastTransitionTime":"2025-11-23T14:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.548732 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.548745 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:36 crc kubenswrapper[5050]: E1123 14:42:36.549006 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:36 crc kubenswrapper[5050]: E1123 14:42:36.549131 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.549330 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:36 crc kubenswrapper[5050]: E1123 14:42:36.549731 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.552264 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.552357 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.552379 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.552412 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.552433 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:36Z","lastTransitionTime":"2025-11-23T14:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.655967 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.656038 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.656056 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.656082 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.656103 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:36Z","lastTransitionTime":"2025-11-23T14:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.759914 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.759978 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.759995 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.760018 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.760036 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:36Z","lastTransitionTime":"2025-11-23T14:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.863972 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.864042 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.864055 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.864078 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.864093 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:36Z","lastTransitionTime":"2025-11-23T14:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.967402 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.967490 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.967510 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.967539 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:36 crc kubenswrapper[5050]: I1123 14:42:36.967561 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:36Z","lastTransitionTime":"2025-11-23T14:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.032504 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.054627 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c0214fb48430e8263a76a8412ebe7aaeaa85c1b476c22f8bbe9a5279f35aa5d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.070704 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.070797 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.070830 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.070866 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.070892 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:37Z","lastTransitionTime":"2025-11-23T14:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.074918 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.095722 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gtj96" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc69bd19-1f49-486e-a510-d5b8461fb172\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gtj96\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.113497 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.145784 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:29Z\\\",\\\"message\\\":\\\"tus/network-metrics-daemon-gtj96: failed to update pod openshift-multus/network-metrics-daemon-gtj96: Internal error occurred: failed calling webhook \\\\\\\"pod.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/pod?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:29Z is after 2025-08-24T17:21:41Z\\\\nI1123 14:42:29.428594 6728 ovnkube.go:599] Stopped ovnkube\\\\nI1123 14:42:29.428608 6728 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1123 14:42:29.428611 6728 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI1123 14:42:29.428624 6728 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI1123 14:42:29.428635 6728 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1123 14:42:29.428578 6728 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-7btqb\\\\nF1123 14:42:29.428723 6728 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-j8fzz_openshift-ovn-kubernetes(6573c043-542c-47ae-a2ba-f70b8baf60c2)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.162863 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b75c970-bec5-4155-89c1-3fcba8733f70\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f4f3e4f2e058d94d8ff4dc299e19a23318e04263a4324efa766daf5f7268f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cc3ecb845a59d933cd05a64b4c258b18d167098d933d7b0741d7dbb43bfb328\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9q4mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.174248 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.174299 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.174511 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.174561 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.174592 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:37Z","lastTransitionTime":"2025-11-23T14:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.182198 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.198084 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.211207 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.229144 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.250218 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.273612 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.279025 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.279068 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.279082 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.279102 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.279119 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:37Z","lastTransitionTime":"2025-11-23T14:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.296812 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.322256 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a024193010036bae37aa1c744cd832dced7ca4893acc76efe5c2b9db65834a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.344069 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.366962 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.381771 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.381817 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.381830 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.381849 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.381867 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:37Z","lastTransitionTime":"2025-11-23T14:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.455564 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.471550 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.473768 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.479805 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.479948 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.479983 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:37 crc kubenswrapper[5050]: E1123 14:42:37.480050 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:43:09.479999643 +0000 UTC m=+84.646996158 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:42:37 crc kubenswrapper[5050]: E1123 14:42:37.480080 5050 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 23 14:42:37 crc kubenswrapper[5050]: E1123 14:42:37.480169 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-23 14:43:09.480149817 +0000 UTC m=+84.647146302 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 23 14:42:37 crc kubenswrapper[5050]: E1123 14:42:37.480262 5050 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 23 14:42:37 crc kubenswrapper[5050]: E1123 14:42:37.480396 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-23 14:43:09.480358933 +0000 UTC m=+84.647355458 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.484606 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.484647 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.484658 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.484676 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.484688 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:37Z","lastTransitionTime":"2025-11-23T14:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.495305 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.515726 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.534863 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a024193010036bae37aa1c744cd832dced7ca4893acc76efe5c2b9db65834a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.548065 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:37 crc kubenswrapper[5050]: E1123 14:42:37.548243 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.555873 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.581138 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.581248 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:37 crc kubenswrapper[5050]: E1123 14:42:37.581470 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 23 14:42:37 crc kubenswrapper[5050]: E1123 14:42:37.581497 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 23 14:42:37 crc kubenswrapper[5050]: E1123 14:42:37.581514 5050 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:37 crc kubenswrapper[5050]: E1123 14:42:37.581583 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-23 14:43:09.581561869 +0000 UTC m=+84.748558374 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:37 crc kubenswrapper[5050]: E1123 14:42:37.581583 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 23 14:42:37 crc kubenswrapper[5050]: E1123 14:42:37.581653 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 23 14:42:37 crc kubenswrapper[5050]: E1123 14:42:37.581688 5050 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:37 crc kubenswrapper[5050]: E1123 14:42:37.581816 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-23 14:43:09.581777034 +0000 UTC m=+84.748773559 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.593073 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.595469 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.595522 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.595533 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.595561 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.595583 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:37Z","lastTransitionTime":"2025-11-23T14:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.614628 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.632290 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.649937 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c0214fb48430e8263a76a8412ebe7aaeaa85c1b476c22f8bbe9a5279f35aa5d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.668562 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.686909 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gtj96" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc69bd19-1f49-486e-a510-d5b8461fb172\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gtj96\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.699028 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.699076 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.699092 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.699115 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.699130 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:37Z","lastTransitionTime":"2025-11-23T14:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.704772 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b75c970-bec5-4155-89c1-3fcba8733f70\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f4f3e4f2e058d94d8ff4dc299e19a23318e04263a4324efa766daf5f7268f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cc3ecb845a59d933cd05a64b4c258b18d167098d933d7b0741d7dbb43bfb328\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9q4mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.744624 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.767853 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.789478 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.802032 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.802227 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.802334 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.802466 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.802568 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:37Z","lastTransitionTime":"2025-11-23T14:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.814532 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:29Z\\\",\\\"message\\\":\\\"tus/network-metrics-daemon-gtj96: failed to update pod openshift-multus/network-metrics-daemon-gtj96: Internal error occurred: failed calling webhook \\\\\\\"pod.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/pod?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:29Z is after 2025-08-24T17:21:41Z\\\\nI1123 14:42:29.428594 6728 ovnkube.go:599] Stopped ovnkube\\\\nI1123 14:42:29.428608 6728 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1123 14:42:29.428611 6728 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI1123 14:42:29.428624 6728 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI1123 14:42:29.428635 6728 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1123 14:42:29.428578 6728 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-7btqb\\\\nF1123 14:42:29.428723 6728 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-j8fzz_openshift-ovn-kubernetes(6573c043-542c-47ae-a2ba-f70b8baf60c2)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:37Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.905169 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.905430 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.905588 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.905696 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:37 crc kubenswrapper[5050]: I1123 14:42:37.905787 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:37Z","lastTransitionTime":"2025-11-23T14:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.008820 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.008863 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.008875 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.008893 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.008905 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:38Z","lastTransitionTime":"2025-11-23T14:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.112655 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.112982 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.113063 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.113149 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.113243 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:38Z","lastTransitionTime":"2025-11-23T14:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.216505 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.216550 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.216589 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.216606 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.216618 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:38Z","lastTransitionTime":"2025-11-23T14:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.319610 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.319675 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.319694 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.319724 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.319744 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:38Z","lastTransitionTime":"2025-11-23T14:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.422986 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.423669 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.423710 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.423769 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.423791 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:38Z","lastTransitionTime":"2025-11-23T14:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.527527 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.527611 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.527629 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.527665 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.527688 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:38Z","lastTransitionTime":"2025-11-23T14:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.547719 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.547754 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.547836 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:38 crc kubenswrapper[5050]: E1123 14:42:38.548089 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:38 crc kubenswrapper[5050]: E1123 14:42:38.548401 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:38 crc kubenswrapper[5050]: E1123 14:42:38.548484 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.630776 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.630851 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.630863 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.630884 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.630900 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:38Z","lastTransitionTime":"2025-11-23T14:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.734380 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.734503 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.734554 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.734582 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.734605 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:38Z","lastTransitionTime":"2025-11-23T14:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.837916 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.837987 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.838007 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.838037 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.838061 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:38Z","lastTransitionTime":"2025-11-23T14:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.941815 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.941891 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.941908 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.941935 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:38 crc kubenswrapper[5050]: I1123 14:42:38.941955 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:38Z","lastTransitionTime":"2025-11-23T14:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.045299 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.045358 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.045374 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.045401 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.045419 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:39Z","lastTransitionTime":"2025-11-23T14:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.149855 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.149925 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.149946 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.149973 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.149995 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:39Z","lastTransitionTime":"2025-11-23T14:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.253285 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.253342 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.253359 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.253385 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.253402 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:39Z","lastTransitionTime":"2025-11-23T14:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.356484 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.356612 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.356682 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.356717 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.356740 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:39Z","lastTransitionTime":"2025-11-23T14:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.460229 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.460301 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.460325 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.460354 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.460373 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:39Z","lastTransitionTime":"2025-11-23T14:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.548656 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:39 crc kubenswrapper[5050]: E1123 14:42:39.550460 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.563141 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.563428 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.563505 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.563538 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.563561 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:39Z","lastTransitionTime":"2025-11-23T14:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.667429 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.667553 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.667573 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.667602 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.667621 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:39Z","lastTransitionTime":"2025-11-23T14:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.771174 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.771234 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.771252 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.771282 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.771301 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:39Z","lastTransitionTime":"2025-11-23T14:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.874488 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.874552 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.874569 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.874596 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.874615 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:39Z","lastTransitionTime":"2025-11-23T14:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.978703 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.978809 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.978827 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.978856 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:39 crc kubenswrapper[5050]: I1123 14:42:39.978910 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:39Z","lastTransitionTime":"2025-11-23T14:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.083056 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.083108 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.083120 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.083139 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.083151 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:40Z","lastTransitionTime":"2025-11-23T14:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.187972 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.188032 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.188050 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.188074 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.188093 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:40Z","lastTransitionTime":"2025-11-23T14:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.291648 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.291718 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.291736 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.291765 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.291781 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:40Z","lastTransitionTime":"2025-11-23T14:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.395315 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.395376 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.395397 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.395430 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.395496 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:40Z","lastTransitionTime":"2025-11-23T14:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.499227 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.499330 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.499490 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.499542 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.499588 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:40Z","lastTransitionTime":"2025-11-23T14:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.548561 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.548561 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:40 crc kubenswrapper[5050]: E1123 14:42:40.548825 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.548732 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:40 crc kubenswrapper[5050]: E1123 14:42:40.549118 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:40 crc kubenswrapper[5050]: E1123 14:42:40.549232 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.603292 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.603366 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.603393 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.603423 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.603470 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:40Z","lastTransitionTime":"2025-11-23T14:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.707334 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.707401 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.707420 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.707476 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.707497 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:40Z","lastTransitionTime":"2025-11-23T14:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.810771 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.810843 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.810862 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.810891 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.810909 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:40Z","lastTransitionTime":"2025-11-23T14:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.914614 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.914669 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.914682 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.914701 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:40 crc kubenswrapper[5050]: I1123 14:42:40.914714 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:40Z","lastTransitionTime":"2025-11-23T14:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.018511 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.018601 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.018627 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.018667 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.018696 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:41Z","lastTransitionTime":"2025-11-23T14:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.122012 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.122083 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.122104 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.122136 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.122174 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:41Z","lastTransitionTime":"2025-11-23T14:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.225417 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.225548 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.225570 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.225598 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.225622 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:41Z","lastTransitionTime":"2025-11-23T14:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.329205 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.329296 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.329320 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.329363 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.329389 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:41Z","lastTransitionTime":"2025-11-23T14:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.432977 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.433052 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.433072 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.433099 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.433118 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:41Z","lastTransitionTime":"2025-11-23T14:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.536270 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.536338 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.536356 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.536383 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.536403 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:41Z","lastTransitionTime":"2025-11-23T14:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.548096 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:41 crc kubenswrapper[5050]: E1123 14:42:41.548321 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.639757 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.639828 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.639846 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.639874 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.639894 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:41Z","lastTransitionTime":"2025-11-23T14:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.743425 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.743510 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.743528 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.743555 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.743576 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:41Z","lastTransitionTime":"2025-11-23T14:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.846563 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.846614 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.846631 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.846653 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.846670 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:41Z","lastTransitionTime":"2025-11-23T14:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.949919 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.949967 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.949977 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.949993 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:41 crc kubenswrapper[5050]: I1123 14:42:41.950002 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:41Z","lastTransitionTime":"2025-11-23T14:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.054648 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.054754 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.054781 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.054815 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.054841 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:42Z","lastTransitionTime":"2025-11-23T14:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.157405 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.157548 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.157582 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.157616 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.157640 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:42Z","lastTransitionTime":"2025-11-23T14:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.261335 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.261407 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.261425 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.261492 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.261512 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:42Z","lastTransitionTime":"2025-11-23T14:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.365640 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.365718 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.365741 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.365775 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.365800 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:42Z","lastTransitionTime":"2025-11-23T14:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.469749 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.469831 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.469851 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.469884 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.469905 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:42Z","lastTransitionTime":"2025-11-23T14:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.547573 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.547648 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.547597 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:42 crc kubenswrapper[5050]: E1123 14:42:42.547800 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:42 crc kubenswrapper[5050]: E1123 14:42:42.547944 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:42 crc kubenswrapper[5050]: E1123 14:42:42.548084 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.573612 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.573679 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.573698 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.573728 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.573748 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:42Z","lastTransitionTime":"2025-11-23T14:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.605751 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.605813 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.605831 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.605858 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.605878 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:42Z","lastTransitionTime":"2025-11-23T14:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:42 crc kubenswrapper[5050]: E1123 14:42:42.629804 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:42Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.635519 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.635575 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.635594 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.635621 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.635639 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:42Z","lastTransitionTime":"2025-11-23T14:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:42 crc kubenswrapper[5050]: E1123 14:42:42.663863 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:42Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.669806 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.669885 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.669906 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.669955 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.669978 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:42Z","lastTransitionTime":"2025-11-23T14:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:42 crc kubenswrapper[5050]: E1123 14:42:42.692703 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:42Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.699179 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.699313 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.699335 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.699361 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.699381 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:42Z","lastTransitionTime":"2025-11-23T14:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:42 crc kubenswrapper[5050]: E1123 14:42:42.721410 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:42Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.727199 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.727254 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.727272 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.727299 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.727317 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:42Z","lastTransitionTime":"2025-11-23T14:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:42 crc kubenswrapper[5050]: E1123 14:42:42.751667 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:42Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:42 crc kubenswrapper[5050]: E1123 14:42:42.751897 5050 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.754390 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.754478 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.754508 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.754539 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.754561 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:42Z","lastTransitionTime":"2025-11-23T14:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.858199 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.858262 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.858282 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.858309 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.858334 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:42Z","lastTransitionTime":"2025-11-23T14:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.961864 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.961925 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.961946 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.961975 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:42 crc kubenswrapper[5050]: I1123 14:42:42.961994 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:42Z","lastTransitionTime":"2025-11-23T14:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.065278 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.065369 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.065398 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.065432 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.065491 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:43Z","lastTransitionTime":"2025-11-23T14:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.168277 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.168346 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.168364 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.168394 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.168423 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:43Z","lastTransitionTime":"2025-11-23T14:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.271932 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.272014 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.272038 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.272069 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.272092 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:43Z","lastTransitionTime":"2025-11-23T14:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.374905 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.374965 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.374986 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.375012 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.375045 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:43Z","lastTransitionTime":"2025-11-23T14:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.478788 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.478860 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.478888 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.478921 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.478945 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:43Z","lastTransitionTime":"2025-11-23T14:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.547815 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:43 crc kubenswrapper[5050]: E1123 14:42:43.548000 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.582260 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.582392 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.582410 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.582440 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.582502 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:43Z","lastTransitionTime":"2025-11-23T14:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.686252 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.686337 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.686355 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.686382 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.686401 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:43Z","lastTransitionTime":"2025-11-23T14:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.790223 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.790303 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.790328 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.790355 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.790401 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:43Z","lastTransitionTime":"2025-11-23T14:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.893800 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.893866 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.893883 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.893909 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.893927 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:43Z","lastTransitionTime":"2025-11-23T14:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.996565 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.996625 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.996641 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.996669 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:43 crc kubenswrapper[5050]: I1123 14:42:43.996692 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:43Z","lastTransitionTime":"2025-11-23T14:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.099576 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.099633 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.099645 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.099669 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.099683 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:44Z","lastTransitionTime":"2025-11-23T14:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.203159 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.203268 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.203303 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.203337 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.203360 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:44Z","lastTransitionTime":"2025-11-23T14:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.306795 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.306874 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.306892 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.306919 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.306939 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:44Z","lastTransitionTime":"2025-11-23T14:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.410527 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.410604 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.410622 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.410654 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.410676 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:44Z","lastTransitionTime":"2025-11-23T14:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.514640 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.514720 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.514738 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.514768 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.514791 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:44Z","lastTransitionTime":"2025-11-23T14:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.548510 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.548627 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.548687 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:44 crc kubenswrapper[5050]: E1123 14:42:44.548789 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:44 crc kubenswrapper[5050]: E1123 14:42:44.549042 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:44 crc kubenswrapper[5050]: E1123 14:42:44.549145 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.550535 5050 scope.go:117] "RemoveContainer" containerID="8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a" Nov 23 14:42:44 crc kubenswrapper[5050]: E1123 14:42:44.550864 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-j8fzz_openshift-ovn-kubernetes(6573c043-542c-47ae-a2ba-f70b8baf60c2)\"" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.619399 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.619564 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.619596 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.619635 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.619658 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:44Z","lastTransitionTime":"2025-11-23T14:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.723562 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.723633 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.723649 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.723674 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.723696 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:44Z","lastTransitionTime":"2025-11-23T14:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.826500 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.826577 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.826600 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.826633 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.826656 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:44Z","lastTransitionTime":"2025-11-23T14:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.930533 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.930585 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.930597 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.930618 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:44 crc kubenswrapper[5050]: I1123 14:42:44.930632 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:44Z","lastTransitionTime":"2025-11-23T14:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.034755 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.034824 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.034842 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.034867 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.034884 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:45Z","lastTransitionTime":"2025-11-23T14:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.138587 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.138673 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.138692 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.138723 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.138759 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:45Z","lastTransitionTime":"2025-11-23T14:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.241838 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.241977 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.242003 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.242040 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.242065 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:45Z","lastTransitionTime":"2025-11-23T14:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.345398 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.345547 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.345581 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.345613 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.345636 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:45Z","lastTransitionTime":"2025-11-23T14:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.448438 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.448523 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.448531 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.448549 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.448561 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:45Z","lastTransitionTime":"2025-11-23T14:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.547624 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:45 crc kubenswrapper[5050]: E1123 14:42:45.547864 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.551846 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.551875 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.551911 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.551927 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.551937 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:45Z","lastTransitionTime":"2025-11-23T14:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.569031 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28a3f438-bef5-4ff1-a080-80490963a204\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7eb678421bc625e01448e7eeb5914cc18880228c6d483129f5e0565dc2d00e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65320ae48f1453657b5ff41429b099e308d536f2b7e176941a1f258e77922eeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60d5ec902c3bb132cd6e58cfbb8e7fdbaf26f34ca50654f9bd028dbb05d7aaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced1c27a73cf758143ad23a5852d2211c66925e2c429b91954344f6d78ad5ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ced1c27a73cf758143ad23a5852d2211c66925e2c429b91954344f6d78ad5ffd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:45Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.584039 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:45Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.596722 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:45Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.611301 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:45Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.628952 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a024193010036bae37aa1c744cd832dced7ca4893acc76efe5c2b9db65834a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:45Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.643661 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:45Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.655430 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.655502 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.655513 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.655534 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.655546 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:45Z","lastTransitionTime":"2025-11-23T14:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.664847 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:45Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.683025 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:45Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.695808 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:45Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.711285 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c0214fb48430e8263a76a8412ebe7aaeaa85c1b476c22f8bbe9a5279f35aa5d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:45Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.726977 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:45Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.739626 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gtj96" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc69bd19-1f49-486e-a510-d5b8461fb172\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gtj96\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:45Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.756438 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b75c970-bec5-4155-89c1-3fcba8733f70\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f4f3e4f2e058d94d8ff4dc299e19a23318e04263a4324efa766daf5f7268f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cc3ecb845a59d933cd05a64b4c258b18d167098d933d7b0741d7dbb43bfb328\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9q4mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:45Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.759255 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.759305 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.759316 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.759333 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.759346 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:45Z","lastTransitionTime":"2025-11-23T14:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.770252 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:45Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.783538 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:45Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.796004 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:45Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.822378 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:29Z\\\",\\\"message\\\":\\\"tus/network-metrics-daemon-gtj96: failed to update pod openshift-multus/network-metrics-daemon-gtj96: Internal error occurred: failed calling webhook \\\\\\\"pod.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/pod?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:29Z is after 2025-08-24T17:21:41Z\\\\nI1123 14:42:29.428594 6728 ovnkube.go:599] Stopped ovnkube\\\\nI1123 14:42:29.428608 6728 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1123 14:42:29.428611 6728 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI1123 14:42:29.428624 6728 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI1123 14:42:29.428635 6728 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1123 14:42:29.428578 6728 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-7btqb\\\\nF1123 14:42:29.428723 6728 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-j8fzz_openshift-ovn-kubernetes(6573c043-542c-47ae-a2ba-f70b8baf60c2)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:45Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.862909 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.862958 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.862967 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.862982 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.862992 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:45Z","lastTransitionTime":"2025-11-23T14:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.966293 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.966363 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.966382 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.966409 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:45 crc kubenswrapper[5050]: I1123 14:42:45.966430 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:45Z","lastTransitionTime":"2025-11-23T14:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.069775 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.069844 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.069863 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.069892 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.069913 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:46Z","lastTransitionTime":"2025-11-23T14:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.172967 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.173019 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.173035 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.173055 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.173069 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:46Z","lastTransitionTime":"2025-11-23T14:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.276881 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.276998 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.277061 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.277103 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.277132 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:46Z","lastTransitionTime":"2025-11-23T14:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.380000 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.380061 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.380073 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.380092 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.380120 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:46Z","lastTransitionTime":"2025-11-23T14:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.483932 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.484271 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.484497 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.484642 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.484790 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:46Z","lastTransitionTime":"2025-11-23T14:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.547780 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.547843 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.547844 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:46 crc kubenswrapper[5050]: E1123 14:42:46.548438 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:42:46 crc kubenswrapper[5050]: E1123 14:42:46.548580 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:46 crc kubenswrapper[5050]: E1123 14:42:46.548380 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.588427 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.588517 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.588535 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.588556 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.588574 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:46Z","lastTransitionTime":"2025-11-23T14:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.691698 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.691741 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.691750 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.691765 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.691778 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:46Z","lastTransitionTime":"2025-11-23T14:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.795120 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.795178 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.795193 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.795215 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.795232 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:46Z","lastTransitionTime":"2025-11-23T14:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.922228 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.922298 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.922315 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.922340 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:46 crc kubenswrapper[5050]: I1123 14:42:46.922357 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:46Z","lastTransitionTime":"2025-11-23T14:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.026249 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.027011 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.027036 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.027073 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.027095 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:47Z","lastTransitionTime":"2025-11-23T14:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.131042 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.131133 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.131151 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.131177 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.131197 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:47Z","lastTransitionTime":"2025-11-23T14:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.234753 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.234838 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.234869 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.234905 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.234930 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:47Z","lastTransitionTime":"2025-11-23T14:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.337946 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.338025 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.338044 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.338074 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.338093 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:47Z","lastTransitionTime":"2025-11-23T14:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.441568 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.441627 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.441646 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.441673 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.441694 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:47Z","lastTransitionTime":"2025-11-23T14:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.545640 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.545708 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.545723 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.545753 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.545772 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:47Z","lastTransitionTime":"2025-11-23T14:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.548200 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:47 crc kubenswrapper[5050]: E1123 14:42:47.548434 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.649265 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.649313 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.649347 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.649371 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.649388 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:47Z","lastTransitionTime":"2025-11-23T14:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.752628 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.752693 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.752710 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.752739 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.752758 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:47Z","lastTransitionTime":"2025-11-23T14:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.857009 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.857064 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.857082 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.857106 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.857126 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:47Z","lastTransitionTime":"2025-11-23T14:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.959899 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.959984 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.960006 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.960036 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:47 crc kubenswrapper[5050]: I1123 14:42:47.960056 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:47Z","lastTransitionTime":"2025-11-23T14:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.064774 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.064847 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.064866 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.064895 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.064917 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:48Z","lastTransitionTime":"2025-11-23T14:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.168939 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.169001 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.169018 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.169048 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.169093 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:48Z","lastTransitionTime":"2025-11-23T14:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.273482 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.273537 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.273556 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.273583 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.273604 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:48Z","lastTransitionTime":"2025-11-23T14:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.378634 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.378688 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.378701 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.378722 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.378736 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:48Z","lastTransitionTime":"2025-11-23T14:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.482367 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.482412 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.482424 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.482463 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.482476 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:48Z","lastTransitionTime":"2025-11-23T14:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.548330 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:48 crc kubenswrapper[5050]: E1123 14:42:48.548550 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.548761 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:48 crc kubenswrapper[5050]: E1123 14:42:48.548830 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.549110 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:48 crc kubenswrapper[5050]: E1123 14:42:48.549162 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.585125 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.585163 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.585173 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.585190 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.585201 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:48Z","lastTransitionTime":"2025-11-23T14:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.687784 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.687829 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.687843 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.687864 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.687876 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:48Z","lastTransitionTime":"2025-11-23T14:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.791686 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.791738 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.791755 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.791775 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.791790 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:48Z","lastTransitionTime":"2025-11-23T14:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.894555 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.894632 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.894653 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.894674 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.894689 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:48Z","lastTransitionTime":"2025-11-23T14:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.997839 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.997892 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.997907 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.997929 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:48 crc kubenswrapper[5050]: I1123 14:42:48.997942 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:48Z","lastTransitionTime":"2025-11-23T14:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.102018 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.102088 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.102106 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.102132 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.102153 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:49Z","lastTransitionTime":"2025-11-23T14:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.206242 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.206296 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.206313 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.206399 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.206421 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:49Z","lastTransitionTime":"2025-11-23T14:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.309706 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.309759 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.309776 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.309803 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.309820 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:49Z","lastTransitionTime":"2025-11-23T14:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.412876 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.412924 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.412933 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.412953 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.412963 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:49Z","lastTransitionTime":"2025-11-23T14:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.516511 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.516562 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.516572 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.516588 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.516600 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:49Z","lastTransitionTime":"2025-11-23T14:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.547984 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:49 crc kubenswrapper[5050]: E1123 14:42:49.548180 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.619959 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.620028 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.620046 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.620077 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.620096 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:49Z","lastTransitionTime":"2025-11-23T14:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.723135 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.723188 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.723198 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.723218 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.723230 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:49Z","lastTransitionTime":"2025-11-23T14:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.826295 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.826355 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.826371 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.826400 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.826418 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:49Z","lastTransitionTime":"2025-11-23T14:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.929237 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.929338 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.929358 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.929388 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:49 crc kubenswrapper[5050]: I1123 14:42:49.929410 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:49Z","lastTransitionTime":"2025-11-23T14:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.032022 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.032125 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.032150 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.032190 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.032219 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:50Z","lastTransitionTime":"2025-11-23T14:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.135435 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.135504 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.135516 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.135535 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.135546 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:50Z","lastTransitionTime":"2025-11-23T14:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.239079 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.239147 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.239163 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.239191 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.239204 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:50Z","lastTransitionTime":"2025-11-23T14:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.342017 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.342066 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.342079 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.342098 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.342113 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:50Z","lastTransitionTime":"2025-11-23T14:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.445768 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.445842 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.445854 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.445878 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.445891 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:50Z","lastTransitionTime":"2025-11-23T14:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.547606 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.547711 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.547785 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:50 crc kubenswrapper[5050]: E1123 14:42:50.547783 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:42:50 crc kubenswrapper[5050]: E1123 14:42:50.547908 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:50 crc kubenswrapper[5050]: E1123 14:42:50.548167 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.548633 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.548694 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.548707 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.548728 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.548742 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:50Z","lastTransitionTime":"2025-11-23T14:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.653276 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.653371 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.653392 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.653420 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.653512 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:50Z","lastTransitionTime":"2025-11-23T14:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.757053 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.757123 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.757143 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.757172 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.757189 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:50Z","lastTransitionTime":"2025-11-23T14:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.859745 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.859797 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.859806 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.859824 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.859835 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:50Z","lastTransitionTime":"2025-11-23T14:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.962997 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.963064 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.963085 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.963113 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:50 crc kubenswrapper[5050]: I1123 14:42:50.963133 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:50Z","lastTransitionTime":"2025-11-23T14:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.066268 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.066339 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.066361 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.066390 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.066411 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:51Z","lastTransitionTime":"2025-11-23T14:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.169673 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.169752 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.169773 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.169804 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.169828 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:51Z","lastTransitionTime":"2025-11-23T14:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.272917 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.272986 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.273004 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.273040 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.273059 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:51Z","lastTransitionTime":"2025-11-23T14:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.376563 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.376632 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.376651 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.376682 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.376702 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:51Z","lastTransitionTime":"2025-11-23T14:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.479797 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.479850 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.479862 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.479883 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.479897 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:51Z","lastTransitionTime":"2025-11-23T14:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.548880 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:51 crc kubenswrapper[5050]: E1123 14:42:51.549058 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.565619 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.582681 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.582733 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.582756 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.582780 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.582798 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:51Z","lastTransitionTime":"2025-11-23T14:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.686523 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.686585 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.686599 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.686633 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.686645 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:51Z","lastTransitionTime":"2025-11-23T14:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.790090 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.790150 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.790159 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.790176 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.790187 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:51Z","lastTransitionTime":"2025-11-23T14:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.893632 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.893705 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.893723 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.893750 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.893770 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:51Z","lastTransitionTime":"2025-11-23T14:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.997021 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.997543 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.997733 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.997929 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:51 crc kubenswrapper[5050]: I1123 14:42:51.998134 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:51Z","lastTransitionTime":"2025-11-23T14:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.101354 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.101402 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.101413 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.101430 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.101459 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:52Z","lastTransitionTime":"2025-11-23T14:42:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.204663 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.204744 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.204765 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.204795 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.204826 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:52Z","lastTransitionTime":"2025-11-23T14:42:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.287885 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cc69bd19-1f49-486e-a510-d5b8461fb172-metrics-certs\") pod \"network-metrics-daemon-gtj96\" (UID: \"cc69bd19-1f49-486e-a510-d5b8461fb172\") " pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:52 crc kubenswrapper[5050]: E1123 14:42:52.288157 5050 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 23 14:42:52 crc kubenswrapper[5050]: E1123 14:42:52.288281 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cc69bd19-1f49-486e-a510-d5b8461fb172-metrics-certs podName:cc69bd19-1f49-486e-a510-d5b8461fb172 nodeName:}" failed. No retries permitted until 2025-11-23 14:43:24.288254242 +0000 UTC m=+99.455250727 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/cc69bd19-1f49-486e-a510-d5b8461fb172-metrics-certs") pod "network-metrics-daemon-gtj96" (UID: "cc69bd19-1f49-486e-a510-d5b8461fb172") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.307881 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.307940 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.307960 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.307994 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.308015 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:52Z","lastTransitionTime":"2025-11-23T14:42:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.410673 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.410731 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.410754 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.410787 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.410811 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:52Z","lastTransitionTime":"2025-11-23T14:42:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.513558 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.513641 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.513665 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.513695 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.513717 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:52Z","lastTransitionTime":"2025-11-23T14:42:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.548157 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.548243 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.548349 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:52 crc kubenswrapper[5050]: E1123 14:42:52.548436 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:52 crc kubenswrapper[5050]: E1123 14:42:52.548812 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:42:52 crc kubenswrapper[5050]: E1123 14:42:52.548884 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.616663 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.616712 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.616738 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.616765 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.616787 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:52Z","lastTransitionTime":"2025-11-23T14:42:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.720007 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.720055 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.720068 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.720089 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.720109 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:52Z","lastTransitionTime":"2025-11-23T14:42:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.824029 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.824087 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.824100 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.824122 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.824137 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:52Z","lastTransitionTime":"2025-11-23T14:42:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.856104 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.856183 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.856203 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.856236 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.856257 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:52Z","lastTransitionTime":"2025-11-23T14:42:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:52 crc kubenswrapper[5050]: E1123 14:42:52.882659 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:52Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.887295 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.887360 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.887374 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.887395 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.887413 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:52Z","lastTransitionTime":"2025-11-23T14:42:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:52 crc kubenswrapper[5050]: E1123 14:42:52.902372 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:52Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.906320 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.906394 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.906419 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.906522 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.906557 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:52Z","lastTransitionTime":"2025-11-23T14:42:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:52 crc kubenswrapper[5050]: E1123 14:42:52.923850 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:52Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.928390 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.928764 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.928828 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.928855 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.929183 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:52Z","lastTransitionTime":"2025-11-23T14:42:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:52 crc kubenswrapper[5050]: E1123 14:42:52.951787 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:52Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.956849 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.956888 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.956900 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.956920 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.956935 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:52Z","lastTransitionTime":"2025-11-23T14:42:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:52 crc kubenswrapper[5050]: E1123 14:42:52.971750 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:52Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:52 crc kubenswrapper[5050]: E1123 14:42:52.971974 5050 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.974235 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.974282 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.974301 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.974326 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:52 crc kubenswrapper[5050]: I1123 14:42:52.974346 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:52Z","lastTransitionTime":"2025-11-23T14:42:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.078475 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.078553 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.078591 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.078612 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.078625 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:53Z","lastTransitionTime":"2025-11-23T14:42:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.181553 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.181599 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.181610 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.181629 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.181644 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:53Z","lastTransitionTime":"2025-11-23T14:42:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.284828 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.284889 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.284899 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.284918 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.284933 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:53Z","lastTransitionTime":"2025-11-23T14:42:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.388670 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.388741 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.388766 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.388800 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.388824 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:53Z","lastTransitionTime":"2025-11-23T14:42:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.493001 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.493060 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.493077 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.493106 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.493124 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:53Z","lastTransitionTime":"2025-11-23T14:42:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.548134 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:53 crc kubenswrapper[5050]: E1123 14:42:53.548341 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.596093 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.596163 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.596180 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.596208 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.596230 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:53Z","lastTransitionTime":"2025-11-23T14:42:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.699965 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.700044 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.700072 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.700107 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.700132 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:53Z","lastTransitionTime":"2025-11-23T14:42:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.803296 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.803373 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.803392 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.803423 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.803472 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:53Z","lastTransitionTime":"2025-11-23T14:42:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.906183 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.906275 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.906294 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.906345 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:53 crc kubenswrapper[5050]: I1123 14:42:53.906364 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:53Z","lastTransitionTime":"2025-11-23T14:42:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.009101 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.009178 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.009200 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.009236 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.009261 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:54Z","lastTransitionTime":"2025-11-23T14:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.112307 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.112416 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.112436 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.112484 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.112506 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:54Z","lastTransitionTime":"2025-11-23T14:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.216340 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.216410 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.216429 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.216483 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.216504 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:54Z","lastTransitionTime":"2025-11-23T14:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.319918 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.319977 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.319994 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.320016 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.320033 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:54Z","lastTransitionTime":"2025-11-23T14:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.423516 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.423595 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.423613 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.423641 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.423660 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:54Z","lastTransitionTime":"2025-11-23T14:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.526692 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.527018 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.527112 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.527259 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.527349 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:54Z","lastTransitionTime":"2025-11-23T14:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.548097 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.548128 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.548191 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:54 crc kubenswrapper[5050]: E1123 14:42:54.548268 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:54 crc kubenswrapper[5050]: E1123 14:42:54.548394 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:42:54 crc kubenswrapper[5050]: E1123 14:42:54.548550 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.630297 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.630694 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.630757 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.630792 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.630815 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:54Z","lastTransitionTime":"2025-11-23T14:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.734615 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.734681 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.734699 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.734723 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.734742 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:54Z","lastTransitionTime":"2025-11-23T14:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.838142 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.838246 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.838275 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.838311 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.838335 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:54Z","lastTransitionTime":"2025-11-23T14:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.940936 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.941002 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.941021 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.941046 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:54 crc kubenswrapper[5050]: I1123 14:42:54.941065 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:54Z","lastTransitionTime":"2025-11-23T14:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.042030 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-qvjn2_abdac21e-18fc-460d-bd3b-73bed66b8ab9/kube-multus/0.log" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.042111 5050 generic.go:334] "Generic (PLEG): container finished" podID="abdac21e-18fc-460d-bd3b-73bed66b8ab9" containerID="7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf" exitCode=1 Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.042162 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-qvjn2" event={"ID":"abdac21e-18fc-460d-bd3b-73bed66b8ab9","Type":"ContainerDied","Data":"7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf"} Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.042984 5050 scope.go:117] "RemoveContainer" containerID="7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.043285 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.043383 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.043410 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.043479 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.043595 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:55Z","lastTransitionTime":"2025-11-23T14:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.059516 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4c9762bc-6dbe-4d7f-a93a-591033e8c3b0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0e3f802f93600b6a56df2a1709f042c01893f2d619ae5d173a014975b1d33d8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e2428cf4682b20bc4bfea576e90dcfd5de93218b167a7d27e9c6af5496a7fd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e2428cf4682b20bc4bfea576e90dcfd5de93218b167a7d27e9c6af5496a7fd8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.074086 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.089363 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.101089 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.122523 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a024193010036bae37aa1c744cd832dced7ca4893acc76efe5c2b9db65834a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.138131 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.146805 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.147154 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.147164 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.147182 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.147195 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:55Z","lastTransitionTime":"2025-11-23T14:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.153116 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.165807 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gtj96" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc69bd19-1f49-486e-a510-d5b8461fb172\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gtj96\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.182478 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c0214fb48430e8263a76a8412ebe7aaeaa85c1b476c22f8bbe9a5279f35aa5d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.197486 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.207925 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.219894 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.240196 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:29Z\\\",\\\"message\\\":\\\"tus/network-metrics-daemon-gtj96: failed to update pod openshift-multus/network-metrics-daemon-gtj96: Internal error occurred: failed calling webhook \\\\\\\"pod.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/pod?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:29Z is after 2025-08-24T17:21:41Z\\\\nI1123 14:42:29.428594 6728 ovnkube.go:599] Stopped ovnkube\\\\nI1123 14:42:29.428608 6728 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1123 14:42:29.428611 6728 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI1123 14:42:29.428624 6728 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI1123 14:42:29.428635 6728 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1123 14:42:29.428578 6728 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-7btqb\\\\nF1123 14:42:29.428723 6728 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-j8fzz_openshift-ovn-kubernetes(6573c043-542c-47ae-a2ba-f70b8baf60c2)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.249748 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.249797 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.249809 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.249830 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.249843 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:55Z","lastTransitionTime":"2025-11-23T14:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.252893 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b75c970-bec5-4155-89c1-3fcba8733f70\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f4f3e4f2e058d94d8ff4dc299e19a23318e04263a4324efa766daf5f7268f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cc3ecb845a59d933cd05a64b4c258b18d167098d933d7b0741d7dbb43bfb328\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9q4mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.266387 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.277415 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:54Z\\\",\\\"message\\\":\\\"2025-11-23T14:42:08+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b627f88d-4467-4bef-bc32-e51f4c0a4ffd\\\\n2025-11-23T14:42:08+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b627f88d-4467-4bef-bc32-e51f4c0a4ffd to /host/opt/cni/bin/\\\\n2025-11-23T14:42:09Z [verbose] multus-daemon started\\\\n2025-11-23T14:42:09Z [verbose] Readiness Indicator file check\\\\n2025-11-23T14:42:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.287520 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.298876 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28a3f438-bef5-4ff1-a080-80490963a204\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7eb678421bc625e01448e7eeb5914cc18880228c6d483129f5e0565dc2d00e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65320ae48f1453657b5ff41429b099e308d536f2b7e176941a1f258e77922eeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60d5ec902c3bb132cd6e58cfbb8e7fdbaf26f34ca50654f9bd028dbb05d7aaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced1c27a73cf758143ad23a5852d2211c66925e2c429b91954344f6d78ad5ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ced1c27a73cf758143ad23a5852d2211c66925e2c429b91954344f6d78ad5ffd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.352855 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.352922 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.353131 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.353161 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.353187 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:55Z","lastTransitionTime":"2025-11-23T14:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.457085 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.457157 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.457185 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.457218 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.457244 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:55Z","lastTransitionTime":"2025-11-23T14:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.547984 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:55 crc kubenswrapper[5050]: E1123 14:42:55.548258 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.560680 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.560788 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.560808 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.560842 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.560866 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:55Z","lastTransitionTime":"2025-11-23T14:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.572330 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c0214fb48430e8263a76a8412ebe7aaeaa85c1b476c22f8bbe9a5279f35aa5d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.598823 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.618150 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gtj96" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc69bd19-1f49-486e-a510-d5b8461fb172\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gtj96\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.634724 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b75c970-bec5-4155-89c1-3fcba8733f70\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f4f3e4f2e058d94d8ff4dc299e19a23318e04263a4324efa766daf5f7268f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cc3ecb845a59d933cd05a64b4c258b18d167098d933d7b0741d7dbb43bfb328\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9q4mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.653128 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.665714 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.665796 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.665825 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.665864 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.665897 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:55Z","lastTransitionTime":"2025-11-23T14:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.679263 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.698707 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.724403 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:29Z\\\",\\\"message\\\":\\\"tus/network-metrics-daemon-gtj96: failed to update pod openshift-multus/network-metrics-daemon-gtj96: Internal error occurred: failed calling webhook \\\\\\\"pod.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/pod?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:29Z is after 2025-08-24T17:21:41Z\\\\nI1123 14:42:29.428594 6728 ovnkube.go:599] Stopped ovnkube\\\\nI1123 14:42:29.428608 6728 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1123 14:42:29.428611 6728 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI1123 14:42:29.428624 6728 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI1123 14:42:29.428635 6728 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1123 14:42:29.428578 6728 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-7btqb\\\\nF1123 14:42:29.428723 6728 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-j8fzz_openshift-ovn-kubernetes(6573c043-542c-47ae-a2ba-f70b8baf60c2)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.746034 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28a3f438-bef5-4ff1-a080-80490963a204\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7eb678421bc625e01448e7eeb5914cc18880228c6d483129f5e0565dc2d00e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65320ae48f1453657b5ff41429b099e308d536f2b7e176941a1f258e77922eeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60d5ec902c3bb132cd6e58cfbb8e7fdbaf26f34ca50654f9bd028dbb05d7aaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced1c27a73cf758143ad23a5852d2211c66925e2c429b91954344f6d78ad5ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ced1c27a73cf758143ad23a5852d2211c66925e2c429b91954344f6d78ad5ffd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.762519 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.767758 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.767793 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.767807 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.767826 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.767839 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:55Z","lastTransitionTime":"2025-11-23T14:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.779439 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:54Z\\\",\\\"message\\\":\\\"2025-11-23T14:42:08+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b627f88d-4467-4bef-bc32-e51f4c0a4ffd\\\\n2025-11-23T14:42:08+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b627f88d-4467-4bef-bc32-e51f4c0a4ffd to /host/opt/cni/bin/\\\\n2025-11-23T14:42:09Z [verbose] multus-daemon started\\\\n2025-11-23T14:42:09Z [verbose] Readiness Indicator file check\\\\n2025-11-23T14:42:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.795783 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.821042 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a024193010036bae37aa1c744cd832dced7ca4893acc76efe5c2b9db65834a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.841659 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.860940 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4c9762bc-6dbe-4d7f-a93a-591033e8c3b0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0e3f802f93600b6a56df2a1709f042c01893f2d619ae5d173a014975b1d33d8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e2428cf4682b20bc4bfea576e90dcfd5de93218b167a7d27e9c6af5496a7fd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e2428cf4682b20bc4bfea576e90dcfd5de93218b167a7d27e9c6af5496a7fd8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.871247 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.871308 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.871323 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.871349 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.871369 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:55Z","lastTransitionTime":"2025-11-23T14:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.884809 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.900732 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.914259 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:55Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.973903 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.973942 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.973960 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.973985 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:55 crc kubenswrapper[5050]: I1123 14:42:55.974005 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:55Z","lastTransitionTime":"2025-11-23T14:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.047976 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-qvjn2_abdac21e-18fc-460d-bd3b-73bed66b8ab9/kube-multus/0.log" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.048075 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-qvjn2" event={"ID":"abdac21e-18fc-460d-bd3b-73bed66b8ab9","Type":"ContainerStarted","Data":"db0c98dfdc0723630b1490357e96c25cfe466caf464ee1883509dc0bd2521337"} Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.065708 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28a3f438-bef5-4ff1-a080-80490963a204\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7eb678421bc625e01448e7eeb5914cc18880228c6d483129f5e0565dc2d00e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65320ae48f1453657b5ff41429b099e308d536f2b7e176941a1f258e77922eeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60d5ec902c3bb132cd6e58cfbb8e7fdbaf26f34ca50654f9bd028dbb05d7aaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced1c27a73cf758143ad23a5852d2211c66925e2c429b91954344f6d78ad5ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ced1c27a73cf758143ad23a5852d2211c66925e2c429b91954344f6d78ad5ffd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:56Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.076829 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.076948 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.076972 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.077005 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.077029 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:56Z","lastTransitionTime":"2025-11-23T14:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.080543 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:56Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.096694 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db0c98dfdc0723630b1490357e96c25cfe466caf464ee1883509dc0bd2521337\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:54Z\\\",\\\"message\\\":\\\"2025-11-23T14:42:08+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b627f88d-4467-4bef-bc32-e51f4c0a4ffd\\\\n2025-11-23T14:42:08+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b627f88d-4467-4bef-bc32-e51f4c0a4ffd to /host/opt/cni/bin/\\\\n2025-11-23T14:42:09Z [verbose] multus-daemon started\\\\n2025-11-23T14:42:09Z [verbose] Readiness Indicator file check\\\\n2025-11-23T14:42:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:56Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.110819 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:56Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.128343 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:56Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.142706 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4c9762bc-6dbe-4d7f-a93a-591033e8c3b0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0e3f802f93600b6a56df2a1709f042c01893f2d619ae5d173a014975b1d33d8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e2428cf4682b20bc4bfea576e90dcfd5de93218b167a7d27e9c6af5496a7fd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e2428cf4682b20bc4bfea576e90dcfd5de93218b167a7d27e9c6af5496a7fd8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:56Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.160310 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:56Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.177909 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:56Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.180035 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.180085 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.180099 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.180121 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.180136 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:56Z","lastTransitionTime":"2025-11-23T14:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.193492 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:56Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.208544 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a024193010036bae37aa1c744cd832dced7ca4893acc76efe5c2b9db65834a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:56Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.222054 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c0214fb48430e8263a76a8412ebe7aaeaa85c1b476c22f8bbe9a5279f35aa5d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:56Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.239646 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:56Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.253178 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gtj96" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc69bd19-1f49-486e-a510-d5b8461fb172\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gtj96\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:56Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.275372 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:56Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.284697 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.284753 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.284771 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.284799 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.284818 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:56Z","lastTransitionTime":"2025-11-23T14:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.291592 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:56Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.306176 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:56Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.327432 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:29Z\\\",\\\"message\\\":\\\"tus/network-metrics-daemon-gtj96: failed to update pod openshift-multus/network-metrics-daemon-gtj96: Internal error occurred: failed calling webhook \\\\\\\"pod.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/pod?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:29Z is after 2025-08-24T17:21:41Z\\\\nI1123 14:42:29.428594 6728 ovnkube.go:599] Stopped ovnkube\\\\nI1123 14:42:29.428608 6728 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1123 14:42:29.428611 6728 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI1123 14:42:29.428624 6728 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI1123 14:42:29.428635 6728 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1123 14:42:29.428578 6728 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-7btqb\\\\nF1123 14:42:29.428723 6728 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-j8fzz_openshift-ovn-kubernetes(6573c043-542c-47ae-a2ba-f70b8baf60c2)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:56Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.343638 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b75c970-bec5-4155-89c1-3fcba8733f70\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f4f3e4f2e058d94d8ff4dc299e19a23318e04263a4324efa766daf5f7268f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cc3ecb845a59d933cd05a64b4c258b18d167098d933d7b0741d7dbb43bfb328\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9q4mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:56Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.388809 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.388860 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.388872 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.388891 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.388902 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:56Z","lastTransitionTime":"2025-11-23T14:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.492476 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.492517 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.492545 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.492567 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.492579 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:56Z","lastTransitionTime":"2025-11-23T14:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.547752 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.547900 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:56 crc kubenswrapper[5050]: E1123 14:42:56.547941 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:56 crc kubenswrapper[5050]: E1123 14:42:56.548105 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.548293 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:56 crc kubenswrapper[5050]: E1123 14:42:56.548378 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.596329 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.596393 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.596416 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.596483 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.596513 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:56Z","lastTransitionTime":"2025-11-23T14:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.700335 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.700399 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.700417 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.700474 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.700497 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:56Z","lastTransitionTime":"2025-11-23T14:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.803979 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.804208 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.804231 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.804274 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.804298 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:56Z","lastTransitionTime":"2025-11-23T14:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.907910 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.907988 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.908011 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.908042 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:56 crc kubenswrapper[5050]: I1123 14:42:56.908062 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:56Z","lastTransitionTime":"2025-11-23T14:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.010872 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.011743 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.011841 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.011904 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.011970 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:57Z","lastTransitionTime":"2025-11-23T14:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.115298 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.115714 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.116099 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.116393 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.116545 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:57Z","lastTransitionTime":"2025-11-23T14:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.220499 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.220868 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.221011 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.221155 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.221298 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:57Z","lastTransitionTime":"2025-11-23T14:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.325002 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.325072 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.325092 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.325127 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.325156 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:57Z","lastTransitionTime":"2025-11-23T14:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.427664 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.427738 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.427756 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.427788 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.427808 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:57Z","lastTransitionTime":"2025-11-23T14:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.531520 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.531601 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.531621 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.531644 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.531664 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:57Z","lastTransitionTime":"2025-11-23T14:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.547774 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:57 crc kubenswrapper[5050]: E1123 14:42:57.547926 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.635596 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.635659 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.635675 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.635696 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.635713 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:57Z","lastTransitionTime":"2025-11-23T14:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.739154 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.739232 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.739253 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.739280 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.739307 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:57Z","lastTransitionTime":"2025-11-23T14:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.841991 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.842044 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.842056 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.842076 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.842091 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:57Z","lastTransitionTime":"2025-11-23T14:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.945234 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.945290 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.945300 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.945316 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:57 crc kubenswrapper[5050]: I1123 14:42:57.945326 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:57Z","lastTransitionTime":"2025-11-23T14:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.053567 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.055306 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.055349 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.055377 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.055394 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:58Z","lastTransitionTime":"2025-11-23T14:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.159765 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.159851 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.159870 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.159901 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.159922 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:58Z","lastTransitionTime":"2025-11-23T14:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.262429 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.262488 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.262497 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.262515 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.262526 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:58Z","lastTransitionTime":"2025-11-23T14:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.365337 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.365410 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.365424 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.365475 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.365490 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:58Z","lastTransitionTime":"2025-11-23T14:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.467522 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.467572 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.467585 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.467606 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.467620 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:58Z","lastTransitionTime":"2025-11-23T14:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.548237 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.548275 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.548251 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:42:58 crc kubenswrapper[5050]: E1123 14:42:58.548402 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:42:58 crc kubenswrapper[5050]: E1123 14:42:58.548529 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:42:58 crc kubenswrapper[5050]: E1123 14:42:58.548756 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.549672 5050 scope.go:117] "RemoveContainer" containerID="8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.569886 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.569943 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.569953 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.569973 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.569985 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:58Z","lastTransitionTime":"2025-11-23T14:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.672997 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.673060 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.673077 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.673105 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.673124 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:58Z","lastTransitionTime":"2025-11-23T14:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.775677 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.775722 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.775732 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.775749 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.775762 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:58Z","lastTransitionTime":"2025-11-23T14:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.878641 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.878674 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.878682 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.878699 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.878711 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:58Z","lastTransitionTime":"2025-11-23T14:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.981982 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.982038 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.982049 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.982071 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:58 crc kubenswrapper[5050]: I1123 14:42:58.982083 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:58Z","lastTransitionTime":"2025-11-23T14:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.062885 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-j8fzz_6573c043-542c-47ae-a2ba-f70b8baf60c2/ovnkube-controller/2.log" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.066020 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" event={"ID":"6573c043-542c-47ae-a2ba-f70b8baf60c2","Type":"ContainerStarted","Data":"82a0b0931dae074fdea5a30d5f082d4040553acde383b476d50f275a40eaec82"} Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.066524 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.081572 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c0214fb48430e8263a76a8412ebe7aaeaa85c1b476c22f8bbe9a5279f35aa5d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:59Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.084362 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.084412 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.084426 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.084461 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.084477 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:59Z","lastTransitionTime":"2025-11-23T14:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.098483 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:59Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.112828 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gtj96" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc69bd19-1f49-486e-a510-d5b8461fb172\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gtj96\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:59Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.127274 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:59Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.142144 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:59Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.153675 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:59Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.180667 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a0b0931dae074fdea5a30d5f082d4040553acde383b476d50f275a40eaec82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:29Z\\\",\\\"message\\\":\\\"tus/network-metrics-daemon-gtj96: failed to update pod openshift-multus/network-metrics-daemon-gtj96: Internal error occurred: failed calling webhook \\\\\\\"pod.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/pod?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:29Z is after 2025-08-24T17:21:41Z\\\\nI1123 14:42:29.428594 6728 ovnkube.go:599] Stopped ovnkube\\\\nI1123 14:42:29.428608 6728 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1123 14:42:29.428611 6728 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI1123 14:42:29.428624 6728 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI1123 14:42:29.428635 6728 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1123 14:42:29.428578 6728 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-7btqb\\\\nF1123 14:42:29.428723 6728 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:59Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.188986 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.189042 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.189054 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.189074 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.189087 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:59Z","lastTransitionTime":"2025-11-23T14:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.194893 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b75c970-bec5-4155-89c1-3fcba8733f70\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f4f3e4f2e058d94d8ff4dc299e19a23318e04263a4324efa766daf5f7268f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cc3ecb845a59d933cd05a64b4c258b18d167098d933d7b0741d7dbb43bfb328\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9q4mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:59Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.222337 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28a3f438-bef5-4ff1-a080-80490963a204\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7eb678421bc625e01448e7eeb5914cc18880228c6d483129f5e0565dc2d00e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65320ae48f1453657b5ff41429b099e308d536f2b7e176941a1f258e77922eeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60d5ec902c3bb132cd6e58cfbb8e7fdbaf26f34ca50654f9bd028dbb05d7aaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced1c27a73cf758143ad23a5852d2211c66925e2c429b91954344f6d78ad5ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ced1c27a73cf758143ad23a5852d2211c66925e2c429b91954344f6d78ad5ffd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:59Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.249347 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:59Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.285868 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db0c98dfdc0723630b1490357e96c25cfe466caf464ee1883509dc0bd2521337\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:54Z\\\",\\\"message\\\":\\\"2025-11-23T14:42:08+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b627f88d-4467-4bef-bc32-e51f4c0a4ffd\\\\n2025-11-23T14:42:08+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b627f88d-4467-4bef-bc32-e51f4c0a4ffd to /host/opt/cni/bin/\\\\n2025-11-23T14:42:09Z [verbose] multus-daemon started\\\\n2025-11-23T14:42:09Z [verbose] Readiness Indicator file check\\\\n2025-11-23T14:42:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:59Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.291806 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.291869 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.291883 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.291904 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.291918 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:59Z","lastTransitionTime":"2025-11-23T14:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.307519 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:59Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.324807 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:59Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.341780 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4c9762bc-6dbe-4d7f-a93a-591033e8c3b0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0e3f802f93600b6a56df2a1709f042c01893f2d619ae5d173a014975b1d33d8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e2428cf4682b20bc4bfea576e90dcfd5de93218b167a7d27e9c6af5496a7fd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e2428cf4682b20bc4bfea576e90dcfd5de93218b167a7d27e9c6af5496a7fd8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:59Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.358987 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:59Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.372897 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:59Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.388593 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:59Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.394142 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.394218 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.394232 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.394255 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.394271 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:59Z","lastTransitionTime":"2025-11-23T14:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.406984 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a024193010036bae37aa1c744cd832dced7ca4893acc76efe5c2b9db65834a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:59Z is after 2025-08-24T17:21:41Z" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.496985 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.497036 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.497049 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.497069 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.497080 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:59Z","lastTransitionTime":"2025-11-23T14:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.548269 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:42:59 crc kubenswrapper[5050]: E1123 14:42:59.548563 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.600048 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.600112 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.600130 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.600159 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.600183 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:59Z","lastTransitionTime":"2025-11-23T14:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.703919 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.704013 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.704034 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.704064 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.704085 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:59Z","lastTransitionTime":"2025-11-23T14:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.807782 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.807857 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.807875 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.807902 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.807921 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:59Z","lastTransitionTime":"2025-11-23T14:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.912803 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.912908 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.912934 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.912982 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:42:59 crc kubenswrapper[5050]: I1123 14:42:59.913008 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:42:59Z","lastTransitionTime":"2025-11-23T14:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.016530 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.016594 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.016614 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.016639 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.016661 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:00Z","lastTransitionTime":"2025-11-23T14:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.072890 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-j8fzz_6573c043-542c-47ae-a2ba-f70b8baf60c2/ovnkube-controller/3.log" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.074116 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-j8fzz_6573c043-542c-47ae-a2ba-f70b8baf60c2/ovnkube-controller/2.log" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.078463 5050 generic.go:334] "Generic (PLEG): container finished" podID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerID="82a0b0931dae074fdea5a30d5f082d4040553acde383b476d50f275a40eaec82" exitCode=1 Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.078518 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" event={"ID":"6573c043-542c-47ae-a2ba-f70b8baf60c2","Type":"ContainerDied","Data":"82a0b0931dae074fdea5a30d5f082d4040553acde383b476d50f275a40eaec82"} Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.078630 5050 scope.go:117] "RemoveContainer" containerID="8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.080332 5050 scope.go:117] "RemoveContainer" containerID="82a0b0931dae074fdea5a30d5f082d4040553acde383b476d50f275a40eaec82" Nov 23 14:43:00 crc kubenswrapper[5050]: E1123 14:43:00.080701 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-j8fzz_openshift-ovn-kubernetes(6573c043-542c-47ae-a2ba-f70b8baf60c2)\"" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.102705 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28a3f438-bef5-4ff1-a080-80490963a204\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7eb678421bc625e01448e7eeb5914cc18880228c6d483129f5e0565dc2d00e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65320ae48f1453657b5ff41429b099e308d536f2b7e176941a1f258e77922eeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60d5ec902c3bb132cd6e58cfbb8e7fdbaf26f34ca50654f9bd028dbb05d7aaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced1c27a73cf758143ad23a5852d2211c66925e2c429b91954344f6d78ad5ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ced1c27a73cf758143ad23a5852d2211c66925e2c429b91954344f6d78ad5ffd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:00Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.120194 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.120234 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.120246 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.120266 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.120279 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:00Z","lastTransitionTime":"2025-11-23T14:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.128249 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:00Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.149150 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db0c98dfdc0723630b1490357e96c25cfe466caf464ee1883509dc0bd2521337\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:54Z\\\",\\\"message\\\":\\\"2025-11-23T14:42:08+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b627f88d-4467-4bef-bc32-e51f4c0a4ffd\\\\n2025-11-23T14:42:08+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b627f88d-4467-4bef-bc32-e51f4c0a4ffd to /host/opt/cni/bin/\\\\n2025-11-23T14:42:09Z [verbose] multus-daemon started\\\\n2025-11-23T14:42:09Z [verbose] Readiness Indicator file check\\\\n2025-11-23T14:42:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:00Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.164277 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:00Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.181936 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:00Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.197662 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4c9762bc-6dbe-4d7f-a93a-591033e8c3b0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0e3f802f93600b6a56df2a1709f042c01893f2d619ae5d173a014975b1d33d8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e2428cf4682b20bc4bfea576e90dcfd5de93218b167a7d27e9c6af5496a7fd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e2428cf4682b20bc4bfea576e90dcfd5de93218b167a7d27e9c6af5496a7fd8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:00Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.212034 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:00Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.223068 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.223128 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.223142 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.223166 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.223186 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:00Z","lastTransitionTime":"2025-11-23T14:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.232525 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:00Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.249790 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:00Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.272951 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a024193010036bae37aa1c744cd832dced7ca4893acc76efe5c2b9db65834a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:00Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.296893 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c0214fb48430e8263a76a8412ebe7aaeaa85c1b476c22f8bbe9a5279f35aa5d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:00Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.315509 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:00Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.329883 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.329943 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.329956 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.329977 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.329996 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:00Z","lastTransitionTime":"2025-11-23T14:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.335570 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gtj96" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc69bd19-1f49-486e-a510-d5b8461fb172\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gtj96\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:00Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.357264 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:00Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.378251 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:00Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.401312 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:00Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.433961 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a0b0931dae074fdea5a30d5f082d4040553acde383b476d50f275a40eaec82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8563b3e8d8f18133f64756e8dfd9f30cdcdad52ed6e13c8edc39fbdee3f71e4a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:29Z\\\",\\\"message\\\":\\\"tus/network-metrics-daemon-gtj96: failed to update pod openshift-multus/network-metrics-daemon-gtj96: Internal error occurred: failed calling webhook \\\\\\\"pod.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/pod?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:42:29Z is after 2025-08-24T17:21:41Z\\\\nI1123 14:42:29.428594 6728 ovnkube.go:599] Stopped ovnkube\\\\nI1123 14:42:29.428608 6728 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1123 14:42:29.428611 6728 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI1123 14:42:29.428624 6728 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI1123 14:42:29.428635 6728 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1123 14:42:29.428578 6728 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-7btqb\\\\nF1123 14:42:29.428723 6728 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82a0b0931dae074fdea5a30d5f082d4040553acde383b476d50f275a40eaec82\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:59Z\\\",\\\"message\\\":\\\"t-controller-manager-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.58:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {4607c9b7-15f9-4ba0-86e5-0021ba7e4488}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1123 14:42:59.492284 7071 ovnkube.go:599] Stopped ovnkube\\\\nI1123 14:42:59.492499 7071 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1123 14:42:59.492299 7071 obj_retry.go:434] periodicallyRetryResources: Retry channel got triggered: retrying failed objects of type *v1.Pod\\\\nI1123 14:42:59.492584 7071 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-etcd/etcd\\\\\\\"}\\\\nI1123 14:42:59.492604 7071 services_controller.go:360] Finished syncing service etcd on namespace openshift-etcd for network=default : 1.557802ms\\\\nF1123 14:42:59.492605 7071 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, han\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:00Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.439317 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.439382 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.439400 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.439433 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.439495 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:00Z","lastTransitionTime":"2025-11-23T14:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.454831 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b75c970-bec5-4155-89c1-3fcba8733f70\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f4f3e4f2e058d94d8ff4dc299e19a23318e04263a4324efa766daf5f7268f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cc3ecb845a59d933cd05a64b4c258b18d167098d933d7b0741d7dbb43bfb328\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9q4mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:00Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.543194 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.543287 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.543311 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.543350 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.543374 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:00Z","lastTransitionTime":"2025-11-23T14:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.548562 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.548608 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:00 crc kubenswrapper[5050]: E1123 14:43:00.548768 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:00 crc kubenswrapper[5050]: E1123 14:43:00.548971 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.549149 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:00 crc kubenswrapper[5050]: E1123 14:43:00.549495 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.646913 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.646981 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.647000 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.647032 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.647053 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:00Z","lastTransitionTime":"2025-11-23T14:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.751107 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.751175 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.751197 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.751226 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.751248 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:00Z","lastTransitionTime":"2025-11-23T14:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.854576 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.855057 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.855224 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.855368 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.855517 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:00Z","lastTransitionTime":"2025-11-23T14:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.959593 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.959638 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.959658 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.959684 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:00 crc kubenswrapper[5050]: I1123 14:43:00.959702 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:00Z","lastTransitionTime":"2025-11-23T14:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.062923 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.063337 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.063493 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.063860 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.064067 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:01Z","lastTransitionTime":"2025-11-23T14:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.085980 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-j8fzz_6573c043-542c-47ae-a2ba-f70b8baf60c2/ovnkube-controller/3.log" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.092369 5050 scope.go:117] "RemoveContainer" containerID="82a0b0931dae074fdea5a30d5f082d4040553acde383b476d50f275a40eaec82" Nov 23 14:43:01 crc kubenswrapper[5050]: E1123 14:43:01.092703 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-j8fzz_openshift-ovn-kubernetes(6573c043-542c-47ae-a2ba-f70b8baf60c2)\"" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.114434 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gtj96" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc69bd19-1f49-486e-a510-d5b8461fb172\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gtj96\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:01Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.134650 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c0214fb48430e8263a76a8412ebe7aaeaa85c1b476c22f8bbe9a5279f35aa5d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:01Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.159373 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:01Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.168125 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.168210 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.168236 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.168271 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.168292 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:01Z","lastTransitionTime":"2025-11-23T14:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.185010 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:01Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.203037 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:01Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.236391 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a0b0931dae074fdea5a30d5f082d4040553acde383b476d50f275a40eaec82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82a0b0931dae074fdea5a30d5f082d4040553acde383b476d50f275a40eaec82\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:59Z\\\",\\\"message\\\":\\\"t-controller-manager-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.58:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {4607c9b7-15f9-4ba0-86e5-0021ba7e4488}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1123 14:42:59.492284 7071 ovnkube.go:599] Stopped ovnkube\\\\nI1123 14:42:59.492499 7071 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1123 14:42:59.492299 7071 obj_retry.go:434] periodicallyRetryResources: Retry channel got triggered: retrying failed objects of type *v1.Pod\\\\nI1123 14:42:59.492584 7071 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-etcd/etcd\\\\\\\"}\\\\nI1123 14:42:59.492604 7071 services_controller.go:360] Finished syncing service etcd on namespace openshift-etcd for network=default : 1.557802ms\\\\nF1123 14:42:59.492605 7071 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, han\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:58Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-j8fzz_openshift-ovn-kubernetes(6573c043-542c-47ae-a2ba-f70b8baf60c2)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:01Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.256685 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b75c970-bec5-4155-89c1-3fcba8733f70\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f4f3e4f2e058d94d8ff4dc299e19a23318e04263a4324efa766daf5f7268f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cc3ecb845a59d933cd05a64b4c258b18d167098d933d7b0741d7dbb43bfb328\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9q4mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:01Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.271885 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.271942 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.271957 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.271979 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.271993 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:01Z","lastTransitionTime":"2025-11-23T14:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.278098 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:01Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.301167 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db0c98dfdc0723630b1490357e96c25cfe466caf464ee1883509dc0bd2521337\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:54Z\\\",\\\"message\\\":\\\"2025-11-23T14:42:08+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b627f88d-4467-4bef-bc32-e51f4c0a4ffd\\\\n2025-11-23T14:42:08+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b627f88d-4467-4bef-bc32-e51f4c0a4ffd to /host/opt/cni/bin/\\\\n2025-11-23T14:42:09Z [verbose] multus-daemon started\\\\n2025-11-23T14:42:09Z [verbose] Readiness Indicator file check\\\\n2025-11-23T14:42:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:01Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.321957 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:01Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.341793 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28a3f438-bef5-4ff1-a080-80490963a204\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7eb678421bc625e01448e7eeb5914cc18880228c6d483129f5e0565dc2d00e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65320ae48f1453657b5ff41429b099e308d536f2b7e176941a1f258e77922eeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60d5ec902c3bb132cd6e58cfbb8e7fdbaf26f34ca50654f9bd028dbb05d7aaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced1c27a73cf758143ad23a5852d2211c66925e2c429b91954344f6d78ad5ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ced1c27a73cf758143ad23a5852d2211c66925e2c429b91954344f6d78ad5ffd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:01Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.362724 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:01Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.376436 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.376557 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.376580 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.376609 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.376630 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:01Z","lastTransitionTime":"2025-11-23T14:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.386004 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:01Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.408790 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:01Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.429315 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:01Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.457201 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a024193010036bae37aa1c744cd832dced7ca4893acc76efe5c2b9db65834a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:01Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.477254 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:01Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.480175 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.480245 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.480279 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.480308 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.480329 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:01Z","lastTransitionTime":"2025-11-23T14:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.500496 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4c9762bc-6dbe-4d7f-a93a-591033e8c3b0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0e3f802f93600b6a56df2a1709f042c01893f2d619ae5d173a014975b1d33d8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e2428cf4682b20bc4bfea576e90dcfd5de93218b167a7d27e9c6af5496a7fd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e2428cf4682b20bc4bfea576e90dcfd5de93218b167a7d27e9c6af5496a7fd8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:01Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.548510 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:01 crc kubenswrapper[5050]: E1123 14:43:01.548752 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.584419 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.584557 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.584582 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.584619 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.584708 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:01Z","lastTransitionTime":"2025-11-23T14:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.688520 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.689166 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.689485 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.689684 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.689863 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:01Z","lastTransitionTime":"2025-11-23T14:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.793985 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.794436 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.794734 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.794995 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.795191 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:01Z","lastTransitionTime":"2025-11-23T14:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.899910 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.899961 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.899979 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.900006 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:01 crc kubenswrapper[5050]: I1123 14:43:01.900028 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:01Z","lastTransitionTime":"2025-11-23T14:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.002960 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.003023 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.003036 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.003059 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.003073 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:02Z","lastTransitionTime":"2025-11-23T14:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.106439 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.107377 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.107552 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.107690 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.107878 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:02Z","lastTransitionTime":"2025-11-23T14:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.212257 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.212340 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.212362 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.212392 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.212416 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:02Z","lastTransitionTime":"2025-11-23T14:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.316487 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.316562 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.316587 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.316666 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.316701 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:02Z","lastTransitionTime":"2025-11-23T14:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.420254 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.420327 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.420352 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.420392 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.420416 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:02Z","lastTransitionTime":"2025-11-23T14:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.524717 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.525159 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.525365 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.525724 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.525949 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:02Z","lastTransitionTime":"2025-11-23T14:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.548529 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.548535 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.548535 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:02 crc kubenswrapper[5050]: E1123 14:43:02.548736 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:02 crc kubenswrapper[5050]: E1123 14:43:02.549079 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:02 crc kubenswrapper[5050]: E1123 14:43:02.549330 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.629615 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.629695 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.629713 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.629749 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.629770 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:02Z","lastTransitionTime":"2025-11-23T14:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.733682 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.733749 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.733774 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.733800 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.733820 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:02Z","lastTransitionTime":"2025-11-23T14:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.837252 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.837766 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.837998 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.838209 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.838391 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:02Z","lastTransitionTime":"2025-11-23T14:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.941663 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.941778 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.941797 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.941823 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:02 crc kubenswrapper[5050]: I1123 14:43:02.941842 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:02Z","lastTransitionTime":"2025-11-23T14:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.044517 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.044588 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.044621 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.044651 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.044673 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:03Z","lastTransitionTime":"2025-11-23T14:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.085854 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.085926 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.085952 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.085985 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.086012 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:03Z","lastTransitionTime":"2025-11-23T14:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:03 crc kubenswrapper[5050]: E1123 14:43:03.111218 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:03Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.117168 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.117230 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.117250 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.117279 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.117301 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:03Z","lastTransitionTime":"2025-11-23T14:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:03 crc kubenswrapper[5050]: E1123 14:43:03.138627 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:03Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.144898 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.144975 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.144997 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.145032 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.145052 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:03Z","lastTransitionTime":"2025-11-23T14:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:03 crc kubenswrapper[5050]: E1123 14:43:03.167994 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:03Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.174384 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.174481 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.174504 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.174537 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.174554 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:03Z","lastTransitionTime":"2025-11-23T14:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:03 crc kubenswrapper[5050]: E1123 14:43:03.193251 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:03Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.198527 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.198589 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.198604 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.198628 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.198645 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:03Z","lastTransitionTime":"2025-11-23T14:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:03 crc kubenswrapper[5050]: E1123 14:43:03.219610 5050 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-23T14:43:03Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bf8d1c9f-d59a-4fb5-8c40-1d0a144375f2\\\",\\\"systemUUID\\\":\\\"6e38a339-19e4-4f40-bd49-1fbe05dbb3f1\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:03Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:03 crc kubenswrapper[5050]: E1123 14:43:03.219866 5050 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.221631 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.221693 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.221719 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.221752 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.221771 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:03Z","lastTransitionTime":"2025-11-23T14:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.325319 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.325398 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.325417 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.325483 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.325503 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:03Z","lastTransitionTime":"2025-11-23T14:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.434769 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.434844 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.434860 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.434885 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.434902 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:03Z","lastTransitionTime":"2025-11-23T14:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.538410 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.538498 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.538516 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.538542 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.538561 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:03Z","lastTransitionTime":"2025-11-23T14:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.547886 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:03 crc kubenswrapper[5050]: E1123 14:43:03.548084 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.642228 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.642299 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.642323 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.642357 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.642382 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:03Z","lastTransitionTime":"2025-11-23T14:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.745642 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.745711 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.745733 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.745764 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.745802 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:03Z","lastTransitionTime":"2025-11-23T14:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.849225 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.849291 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.849309 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.849333 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.849350 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:03Z","lastTransitionTime":"2025-11-23T14:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.953162 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.953227 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.953244 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.953273 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:03 crc kubenswrapper[5050]: I1123 14:43:03.953292 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:03Z","lastTransitionTime":"2025-11-23T14:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.073749 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.073843 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.073860 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.073879 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.073916 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:04Z","lastTransitionTime":"2025-11-23T14:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.176693 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.176750 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.176768 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.176791 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.176809 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:04Z","lastTransitionTime":"2025-11-23T14:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.279897 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.280199 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.280331 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.280418 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.280509 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:04Z","lastTransitionTime":"2025-11-23T14:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.384137 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.384215 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.384235 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.384264 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.384284 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:04Z","lastTransitionTime":"2025-11-23T14:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.487031 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.487099 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.487125 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.487161 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.487187 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:04Z","lastTransitionTime":"2025-11-23T14:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.547950 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.548027 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.547950 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:04 crc kubenswrapper[5050]: E1123 14:43:04.548195 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:04 crc kubenswrapper[5050]: E1123 14:43:04.548355 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:04 crc kubenswrapper[5050]: E1123 14:43:04.548535 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.591034 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.591155 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.591183 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.591215 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.591239 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:04Z","lastTransitionTime":"2025-11-23T14:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.694177 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.694229 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.694242 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.694260 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.694273 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:04Z","lastTransitionTime":"2025-11-23T14:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.797728 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.797795 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.797807 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.797828 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.797842 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:04Z","lastTransitionTime":"2025-11-23T14:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.900749 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.900824 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.900847 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.900876 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:04 crc kubenswrapper[5050]: I1123 14:43:04.900897 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:04Z","lastTransitionTime":"2025-11-23T14:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.004642 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.004726 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.004744 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.004772 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.004794 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:05Z","lastTransitionTime":"2025-11-23T14:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.107601 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.107665 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.107682 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.107707 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.107724 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:05Z","lastTransitionTime":"2025-11-23T14:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.211788 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.211873 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.211910 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.211945 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.211970 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:05Z","lastTransitionTime":"2025-11-23T14:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.315984 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.316067 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.316092 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.316121 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.316140 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:05Z","lastTransitionTime":"2025-11-23T14:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.419728 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.419799 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.419820 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.419846 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.419863 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:05Z","lastTransitionTime":"2025-11-23T14:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.523938 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.524013 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.524033 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.524062 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.524092 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:05Z","lastTransitionTime":"2025-11-23T14:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.547507 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:05 crc kubenswrapper[5050]: E1123 14:43:05.547666 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.567343 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4c9762bc-6dbe-4d7f-a93a-591033e8c3b0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0e3f802f93600b6a56df2a1709f042c01893f2d619ae5d173a014975b1d33d8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e2428cf4682b20bc4bfea576e90dcfd5de93218b167a7d27e9c6af5496a7fd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e2428cf4682b20bc4bfea576e90dcfd5de93218b167a7d27e9c6af5496a7fd8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:05Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.584241 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719b29f3f0e7ff3aa3d105c9f4d9fb8d180d13494fcef94b204dece23c0f763d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:05Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.606597 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac91134c1368426cb845f20e6c777b0b613d93b1761d097051a7e63dae4203a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef730e87b3ef157558a92f93526e4dec23e71b48f5cb08a30242f143182a5d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:05Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.623366 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d998909-9470-47ef-87e8-b34f0473682f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5bbc40aad4e1fff72cfeaf901bbe2edde01b909690cec06e0828aee8a4c93e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jx4bd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-hlrlq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:05Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.627198 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.627235 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.627248 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.627271 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.627329 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:05Z","lastTransitionTime":"2025-11-23T14:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.641170 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-km979" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d7b7de2-b361-44b9-ba10-5d4f889abc9b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a024193010036bae37aa1c744cd832dced7ca4893acc76efe5c2b9db65834a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://32b4eece06b72ca080adc2e1fab7f1db357c926b47433f2d374afaa45dad6b38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86867dd85be2aec28e680e55a6fb723964f26082eff07f656c00bd79cd2d3cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c72b02501d9b33ae056e91b10f13e50327984b1cabcd69b5066780aa4fde2682\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba558b2174a35efd061ce97a84b1b785916ea34d481d58cf4582a5bf29d6b24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9cd2247c7d101274e0f7a09b531266720489fb8ed77f89e7ccd1ee74882508d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0873c53f2aa7003b92b7460595f79f52b70796f9e38c133da799b5c49785d857\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b4rwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-km979\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:05Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.657801 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8861091e-df98-4ea6-a853-e76e7dec48c5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://374a569595342d2e7684dc020e82bd80b35965aff751215c6c926005ea040798\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://63b17cb181aa6b22312393ce35127c8e06ae80cd1feaaa001d7cb34cdc111456\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3e232fb0da11b5caaec81c7b6ae7185d121508d99fce67c77e1d5d52289dad8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7b8be59b2e91f123c3d88b93237121402f84e9f54aeeaa0fae57baaa6238140\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:05Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.679354 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:05Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.694119 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gtj96" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc69bd19-1f49-486e-a510-d5b8461fb172\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ltjqr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gtj96\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:05Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.710354 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a2166ba-b7f4-4eb1-95bc-4f44d90e72ea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://358800bef7b7e16e58003aa5e2411212334a5b1a33c80d223eb7d40ad13867f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456093b33927a232192a5fa7ee1a4228e9f37da58e5e09cb33018ae6f5b14b2c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccfc14c36ecee0d8502415582be6053b096ea77504bf5de67f77bf6f74c0936b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0c0214fb48430e8263a76a8412ebe7aaeaa85c1b476c22f8bbe9a5279f35aa5d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10cf72fab955e4ec81917127043d9e4a2f9d427a03edc22120ab2ae711fdc5a4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"ed_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763908925\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763908925\\\\\\\\\\\\\\\" (2025-11-23 13:42:05 +0000 UTC to 2026-11-23 13:42:05 +0000 UTC (now=2025-11-23 14:42:05.669138051 +0000 UTC))\\\\\\\"\\\\nI1123 14:42:05.669209 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1123 14:42:05.669265 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1123 14:42:05.668647 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669361 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1123 14:42:05.669413 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1123 14:42:05.668681 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3465722692/tls.crt::/tmp/serving-cert-3465722692/tls.key\\\\\\\"\\\\nI1123 14:42:05.669629 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1123 14:42:05.669660 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1123 14:42:05.668685 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669884 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1123 14:42:05.668719 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1123 14:42:05.669899 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1123 14:42:05.668776 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://beaf638703b4b64fabd10eb14bad058dc0dae74261a462e7f2fddd8aa3811cf7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a63464a6e2f79d8e5a7e0eed9f72b684e0071cad60d3a801754762be4a2735c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:05Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.726489 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:05Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.729892 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.729920 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.729928 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.729945 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.729956 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:05Z","lastTransitionTime":"2025-11-23T14:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.745816 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced068547f7e7c0807e996ab8500b30cda68a4c31363b415f66516b883221972\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:05Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.757200 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-7btqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"963992d0-f69e-4327-9789-4571451c1838\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4b1570d42f2b86c042b9abcb595bf36b35789f24d12573f94503c7383849647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vphk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:05Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-7btqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:05Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.787036 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6573c043-542c-47ae-a2ba-f70b8baf60c2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://82a0b0931dae074fdea5a30d5f082d4040553acde383b476d50f275a40eaec82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://82a0b0931dae074fdea5a30d5f082d4040553acde383b476d50f275a40eaec82\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:59Z\\\",\\\"message\\\":\\\"t-controller-manager-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.58:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {4607c9b7-15f9-4ba0-86e5-0021ba7e4488}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1123 14:42:59.492284 7071 ovnkube.go:599] Stopped ovnkube\\\\nI1123 14:42:59.492499 7071 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1123 14:42:59.492299 7071 obj_retry.go:434] periodicallyRetryResources: Retry channel got triggered: retrying failed objects of type *v1.Pod\\\\nI1123 14:42:59.492584 7071 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-etcd/etcd\\\\\\\"}\\\\nI1123 14:42:59.492604 7071 services_controller.go:360] Finished syncing service etcd on namespace openshift-etcd for network=default : 1.557802ms\\\\nF1123 14:42:59.492605 7071 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, han\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:58Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-j8fzz_openshift-ovn-kubernetes(6573c043-542c-47ae-a2ba-f70b8baf60c2)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mtwf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-j8fzz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:05Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.810424 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b75c970-bec5-4155-89c1-3fcba8733f70\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f4f3e4f2e058d94d8ff4dc299e19a23318e04263a4324efa766daf5f7268f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cc3ecb845a59d933cd05a64b4c258b18d167098d933d7b0741d7dbb43bfb328\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lsct2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9q4mg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:05Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.828528 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:05Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.834559 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.834631 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.834640 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.834660 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.834675 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:05Z","lastTransitionTime":"2025-11-23T14:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.850954 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qvjn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"abdac21e-18fc-460d-bd3b-73bed66b8ab9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db0c98dfdc0723630b1490357e96c25cfe466caf464ee1883509dc0bd2521337\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-23T14:42:54Z\\\",\\\"message\\\":\\\"2025-11-23T14:42:08+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b627f88d-4467-4bef-bc32-e51f4c0a4ffd\\\\n2025-11-23T14:42:08+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b627f88d-4467-4bef-bc32-e51f4c0a4ffd to /host/opt/cni/bin/\\\\n2025-11-23T14:42:09Z [verbose] multus-daemon started\\\\n2025-11-23T14:42:09Z [verbose] Readiness Indicator file check\\\\n2025-11-23T14:42:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-23T14:42:07Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bggpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qvjn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:05Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.867066 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7cjg9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f65245a-e7ea-4b29-9534-5dbe5a7ee271\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf73369e7515e9d61bf8fc0b0bd940f15e49a8e84b3e181dad104bd56c2d9b2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-586ds\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:42:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7cjg9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:05Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.884387 5050 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28a3f438-bef5-4ff1-a080-80490963a204\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:42:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T14:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7eb678421bc625e01448e7eeb5914cc18880228c6d483129f5e0565dc2d00e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://65320ae48f1453657b5ff41429b099e308d536f2b7e176941a1f258e77922eeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60d5ec902c3bb132cd6e58cfbb8e7fdbaf26f34ca50654f9bd028dbb05d7aaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-23T14:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ced1c27a73cf758143ad23a5852d2211c66925e2c429b91954344f6d78ad5ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ced1c27a73cf758143ad23a5852d2211c66925e2c429b91954344f6d78ad5ffd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-23T14:41:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-23T14:41:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T14:41:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-23T14:43:05Z is after 2025-08-24T17:21:41Z" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.938009 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.938120 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.938148 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.938184 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:05 crc kubenswrapper[5050]: I1123 14:43:05.938212 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:05Z","lastTransitionTime":"2025-11-23T14:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.042119 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.042172 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.042187 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.042208 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.042222 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:06Z","lastTransitionTime":"2025-11-23T14:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.145961 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.146034 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.146053 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.146082 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.146104 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:06Z","lastTransitionTime":"2025-11-23T14:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.250005 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.250063 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.250078 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.250103 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.250119 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:06Z","lastTransitionTime":"2025-11-23T14:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.355198 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.355290 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.355316 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.355349 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.355374 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:06Z","lastTransitionTime":"2025-11-23T14:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.459795 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.459874 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.459894 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.459923 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.459950 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:06Z","lastTransitionTime":"2025-11-23T14:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.548215 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.548217 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.548412 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:06 crc kubenswrapper[5050]: E1123 14:43:06.548669 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:06 crc kubenswrapper[5050]: E1123 14:43:06.548818 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:06 crc kubenswrapper[5050]: E1123 14:43:06.548956 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.564048 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.564094 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.564110 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.564133 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.564151 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:06Z","lastTransitionTime":"2025-11-23T14:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.668858 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.668948 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.668975 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.669011 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.669041 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:06Z","lastTransitionTime":"2025-11-23T14:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.773155 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.773243 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.773265 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.773293 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.773314 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:06Z","lastTransitionTime":"2025-11-23T14:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.881192 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.881287 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.881314 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.881362 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.881385 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:06Z","lastTransitionTime":"2025-11-23T14:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.986643 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.986731 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.986751 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.986781 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:06 crc kubenswrapper[5050]: I1123 14:43:06.986802 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:06Z","lastTransitionTime":"2025-11-23T14:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.090947 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.091015 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.091027 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.091051 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.091065 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:07Z","lastTransitionTime":"2025-11-23T14:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.193850 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.193906 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.193923 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.193948 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.193967 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:07Z","lastTransitionTime":"2025-11-23T14:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.298308 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.298356 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.298374 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.298400 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.298423 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:07Z","lastTransitionTime":"2025-11-23T14:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.402800 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.402895 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.402923 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.402962 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.402989 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:07Z","lastTransitionTime":"2025-11-23T14:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.506730 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.506799 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.506820 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.506857 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.506881 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:07Z","lastTransitionTime":"2025-11-23T14:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.548542 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:07 crc kubenswrapper[5050]: E1123 14:43:07.548752 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.610293 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.610373 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.610401 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.610432 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.610502 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:07Z","lastTransitionTime":"2025-11-23T14:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.714723 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.714787 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.714806 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.714835 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.714853 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:07Z","lastTransitionTime":"2025-11-23T14:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.818648 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.818711 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.818727 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.818751 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.818770 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:07Z","lastTransitionTime":"2025-11-23T14:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.922280 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.922346 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.922363 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.922391 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:07 crc kubenswrapper[5050]: I1123 14:43:07.922409 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:07Z","lastTransitionTime":"2025-11-23T14:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.026488 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.026604 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.026625 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.026658 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.026683 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:08Z","lastTransitionTime":"2025-11-23T14:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.130257 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.130343 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.130361 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.130387 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.130407 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:08Z","lastTransitionTime":"2025-11-23T14:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.234979 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.235398 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.235426 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.235489 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.235517 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:08Z","lastTransitionTime":"2025-11-23T14:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.338020 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.338082 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.338104 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.338132 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.338153 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:08Z","lastTransitionTime":"2025-11-23T14:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.441991 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.442054 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.442077 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.442108 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.442137 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:08Z","lastTransitionTime":"2025-11-23T14:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.545541 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.545602 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.545617 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.545642 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.545657 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:08Z","lastTransitionTime":"2025-11-23T14:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.548085 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.548127 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.548089 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:08 crc kubenswrapper[5050]: E1123 14:43:08.548215 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:08 crc kubenswrapper[5050]: E1123 14:43:08.548337 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:08 crc kubenswrapper[5050]: E1123 14:43:08.548507 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.648077 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.648125 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.648141 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.648166 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.648190 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:08Z","lastTransitionTime":"2025-11-23T14:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.751184 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.751243 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.751266 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.751297 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.751316 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:08Z","lastTransitionTime":"2025-11-23T14:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.854538 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.854635 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.854698 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.854727 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.854793 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:08Z","lastTransitionTime":"2025-11-23T14:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.957768 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.957842 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.957863 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.957897 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:08 crc kubenswrapper[5050]: I1123 14:43:08.957922 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:08Z","lastTransitionTime":"2025-11-23T14:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.061215 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.061282 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.061300 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.061327 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.061349 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:09Z","lastTransitionTime":"2025-11-23T14:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.165582 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.165664 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.165682 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.165711 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.165731 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:09Z","lastTransitionTime":"2025-11-23T14:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.269822 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.269929 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.269950 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.269978 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.269999 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:09Z","lastTransitionTime":"2025-11-23T14:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.373495 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.373547 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.373567 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.373594 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.373612 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:09Z","lastTransitionTime":"2025-11-23T14:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.476843 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.476914 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.476933 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.476959 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.476980 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:09Z","lastTransitionTime":"2025-11-23T14:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.535189 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:43:09 crc kubenswrapper[5050]: E1123 14:43:09.535583 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:13.535527821 +0000 UTC m=+148.702524346 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.535657 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.535746 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:09 crc kubenswrapper[5050]: E1123 14:43:09.535944 5050 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 23 14:43:09 crc kubenswrapper[5050]: E1123 14:43:09.536071 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-23 14:44:13.536036415 +0000 UTC m=+148.703032940 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 23 14:43:09 crc kubenswrapper[5050]: E1123 14:43:09.535958 5050 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 23 14:43:09 crc kubenswrapper[5050]: E1123 14:43:09.536211 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-23 14:44:13.536180429 +0000 UTC m=+148.703176964 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.548495 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:09 crc kubenswrapper[5050]: E1123 14:43:09.548737 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.579631 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.579737 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.579757 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.579782 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.579809 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:09Z","lastTransitionTime":"2025-11-23T14:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.637190 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.637266 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:09 crc kubenswrapper[5050]: E1123 14:43:09.637607 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 23 14:43:09 crc kubenswrapper[5050]: E1123 14:43:09.637664 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 23 14:43:09 crc kubenswrapper[5050]: E1123 14:43:09.637694 5050 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:43:09 crc kubenswrapper[5050]: E1123 14:43:09.637610 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 23 14:43:09 crc kubenswrapper[5050]: E1123 14:43:09.637782 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-23 14:44:13.63774887 +0000 UTC m=+148.804745395 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:43:09 crc kubenswrapper[5050]: E1123 14:43:09.637785 5050 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 23 14:43:09 crc kubenswrapper[5050]: E1123 14:43:09.637818 5050 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:43:09 crc kubenswrapper[5050]: E1123 14:43:09.637892 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-23 14:44:13.637866463 +0000 UTC m=+148.804862978 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.682836 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.682911 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.682931 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.682957 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.682976 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:09Z","lastTransitionTime":"2025-11-23T14:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.786038 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.786112 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.786129 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.786153 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.786171 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:09Z","lastTransitionTime":"2025-11-23T14:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.890025 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.890116 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.890139 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.890177 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.890202 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:09Z","lastTransitionTime":"2025-11-23T14:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.993038 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.993106 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.993131 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.993159 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:09 crc kubenswrapper[5050]: I1123 14:43:09.993177 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:09Z","lastTransitionTime":"2025-11-23T14:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.097071 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.097142 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.097197 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.097226 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.097246 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:10Z","lastTransitionTime":"2025-11-23T14:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.200622 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.200691 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.200708 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.200736 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.200757 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:10Z","lastTransitionTime":"2025-11-23T14:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.303847 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.303917 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.303934 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.303966 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.303986 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:10Z","lastTransitionTime":"2025-11-23T14:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.407764 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.407860 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.407877 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.407902 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.407925 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:10Z","lastTransitionTime":"2025-11-23T14:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.511708 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.511779 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.511799 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.511825 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.511844 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:10Z","lastTransitionTime":"2025-11-23T14:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.547690 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.547725 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.547778 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:10 crc kubenswrapper[5050]: E1123 14:43:10.547883 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:10 crc kubenswrapper[5050]: E1123 14:43:10.548032 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:10 crc kubenswrapper[5050]: E1123 14:43:10.548339 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.616228 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.616286 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.616306 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.616335 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.616356 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:10Z","lastTransitionTime":"2025-11-23T14:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.719682 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.719754 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.719778 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.719811 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.719836 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:10Z","lastTransitionTime":"2025-11-23T14:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.823723 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.823771 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.823787 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.823812 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.823826 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:10Z","lastTransitionTime":"2025-11-23T14:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.927395 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.927499 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.927516 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.927545 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:10 crc kubenswrapper[5050]: I1123 14:43:10.927567 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:10Z","lastTransitionTime":"2025-11-23T14:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.032110 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.032193 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.032212 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.032242 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.032264 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:11Z","lastTransitionTime":"2025-11-23T14:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.135069 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.135117 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.135136 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.135158 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.135178 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:11Z","lastTransitionTime":"2025-11-23T14:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.237865 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.237926 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.237946 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.237969 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.237990 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:11Z","lastTransitionTime":"2025-11-23T14:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.340995 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.341076 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.341096 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.341128 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.341152 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:11Z","lastTransitionTime":"2025-11-23T14:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.445030 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.445100 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.445120 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.445151 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.445170 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:11Z","lastTransitionTime":"2025-11-23T14:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.548012 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.548047 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.548063 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.548080 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.548095 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:11Z","lastTransitionTime":"2025-11-23T14:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.548103 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:11 crc kubenswrapper[5050]: E1123 14:43:11.548951 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.652821 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.652894 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.652913 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.652942 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.652964 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:11Z","lastTransitionTime":"2025-11-23T14:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.758054 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.758133 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.758153 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.758187 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.758209 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:11Z","lastTransitionTime":"2025-11-23T14:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.861986 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.862118 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.862140 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.862167 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.862222 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:11Z","lastTransitionTime":"2025-11-23T14:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.966367 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.966481 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.966510 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.966548 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:11 crc kubenswrapper[5050]: I1123 14:43:11.966583 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:11Z","lastTransitionTime":"2025-11-23T14:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.070121 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.070159 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.070168 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.070186 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.070198 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:12Z","lastTransitionTime":"2025-11-23T14:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.173064 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.173126 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.173149 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.173186 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.173209 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:12Z","lastTransitionTime":"2025-11-23T14:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.276953 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.277026 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.277058 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.277086 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.277105 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:12Z","lastTransitionTime":"2025-11-23T14:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.380752 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.380789 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.380799 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.380816 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.380831 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:12Z","lastTransitionTime":"2025-11-23T14:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.484890 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.485694 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.485749 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.485798 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.485827 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:12Z","lastTransitionTime":"2025-11-23T14:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.547901 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.547953 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.548194 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:12 crc kubenswrapper[5050]: E1123 14:43:12.548202 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:12 crc kubenswrapper[5050]: E1123 14:43:12.548395 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:12 crc kubenswrapper[5050]: E1123 14:43:12.548561 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.589414 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.589606 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.589642 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.589717 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.589779 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:12Z","lastTransitionTime":"2025-11-23T14:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.693069 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.693120 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.693132 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.693152 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.693165 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:12Z","lastTransitionTime":"2025-11-23T14:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.795677 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.795760 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.795783 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.795813 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.795840 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:12Z","lastTransitionTime":"2025-11-23T14:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.898739 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.898867 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.898904 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.898949 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:12 crc kubenswrapper[5050]: I1123 14:43:12.899011 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:12Z","lastTransitionTime":"2025-11-23T14:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.002432 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.002529 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.002540 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.002565 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.002578 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:13Z","lastTransitionTime":"2025-11-23T14:43:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.106954 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.107025 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.107042 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.107068 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.107087 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:13Z","lastTransitionTime":"2025-11-23T14:43:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.210926 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.211028 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.211049 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.211075 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.211095 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:13Z","lastTransitionTime":"2025-11-23T14:43:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.314583 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.314684 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.314703 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.314728 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.314748 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:13Z","lastTransitionTime":"2025-11-23T14:43:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.418258 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.418332 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.418348 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.418381 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.418405 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:13Z","lastTransitionTime":"2025-11-23T14:43:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.434286 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.434352 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.434370 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.434399 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.434418 5050 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-23T14:43:13Z","lastTransitionTime":"2025-11-23T14:43:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.517350 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-gc9m9"] Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.518015 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gc9m9" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.521862 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.522184 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.522731 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.522894 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.547955 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:13 crc kubenswrapper[5050]: E1123 14:43:13.548220 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.565366 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=67.565338624 podStartE2EDuration="1m7.565338624s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:43:13.545287695 +0000 UTC m=+88.712284260" watchObservedRunningTime="2025-11-23 14:43:13.565338624 +0000 UTC m=+88.732335119" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.586369 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8fdaab3e-3521-4b93-b800-020a52519f06-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-gc9m9\" (UID: \"8fdaab3e-3521-4b93-b800-020a52519f06\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gc9m9" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.586501 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/8fdaab3e-3521-4b93-b800-020a52519f06-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-gc9m9\" (UID: \"8fdaab3e-3521-4b93-b800-020a52519f06\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gc9m9" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.586545 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8fdaab3e-3521-4b93-b800-020a52519f06-service-ca\") pod \"cluster-version-operator-5c965bbfc6-gc9m9\" (UID: \"8fdaab3e-3521-4b93-b800-020a52519f06\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gc9m9" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.586670 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8fdaab3e-3521-4b93-b800-020a52519f06-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-gc9m9\" (UID: \"8fdaab3e-3521-4b93-b800-020a52519f06\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gc9m9" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.586723 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/8fdaab3e-3521-4b93-b800-020a52519f06-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-gc9m9\" (UID: \"8fdaab3e-3521-4b93-b800-020a52519f06\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gc9m9" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.592120 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=22.592081323 podStartE2EDuration="22.592081323s" podCreationTimestamp="2025-11-23 14:42:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:43:13.565288583 +0000 UTC m=+88.732285108" watchObservedRunningTime="2025-11-23 14:43:13.592081323 +0000 UTC m=+88.759077848" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.632290 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podStartSLOduration=67.632251303 podStartE2EDuration="1m7.632251303s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:43:13.631231766 +0000 UTC m=+88.798228261" watchObservedRunningTime="2025-11-23 14:43:13.632251303 +0000 UTC m=+88.799247818" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.688917 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8fdaab3e-3521-4b93-b800-020a52519f06-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-gc9m9\" (UID: \"8fdaab3e-3521-4b93-b800-020a52519f06\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gc9m9" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.689013 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/8fdaab3e-3521-4b93-b800-020a52519f06-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-gc9m9\" (UID: \"8fdaab3e-3521-4b93-b800-020a52519f06\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gc9m9" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.689054 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8fdaab3e-3521-4b93-b800-020a52519f06-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-gc9m9\" (UID: \"8fdaab3e-3521-4b93-b800-020a52519f06\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gc9m9" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.689090 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/8fdaab3e-3521-4b93-b800-020a52519f06-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-gc9m9\" (UID: \"8fdaab3e-3521-4b93-b800-020a52519f06\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gc9m9" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.689120 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8fdaab3e-3521-4b93-b800-020a52519f06-service-ca\") pod \"cluster-version-operator-5c965bbfc6-gc9m9\" (UID: \"8fdaab3e-3521-4b93-b800-020a52519f06\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gc9m9" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.689954 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/8fdaab3e-3521-4b93-b800-020a52519f06-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-gc9m9\" (UID: \"8fdaab3e-3521-4b93-b800-020a52519f06\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gc9m9" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.690194 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/8fdaab3e-3521-4b93-b800-020a52519f06-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-gc9m9\" (UID: \"8fdaab3e-3521-4b93-b800-020a52519f06\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gc9m9" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.691213 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8fdaab3e-3521-4b93-b800-020a52519f06-service-ca\") pod \"cluster-version-operator-5c965bbfc6-gc9m9\" (UID: \"8fdaab3e-3521-4b93-b800-020a52519f06\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gc9m9" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.693249 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-km979" podStartSLOduration=67.693230853 podStartE2EDuration="1m7.693230853s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:43:13.676716769 +0000 UTC m=+88.843713274" watchObservedRunningTime="2025-11-23 14:43:13.693230853 +0000 UTC m=+88.860227348" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.705488 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8fdaab3e-3521-4b93-b800-020a52519f06-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-gc9m9\" (UID: \"8fdaab3e-3521-4b93-b800-020a52519f06\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gc9m9" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.718801 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8fdaab3e-3521-4b93-b800-020a52519f06-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-gc9m9\" (UID: \"8fdaab3e-3521-4b93-b800-020a52519f06\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gc9m9" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.744649 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=68.744625155 podStartE2EDuration="1m8.744625155s" podCreationTimestamp="2025-11-23 14:42:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:43:13.724261947 +0000 UTC m=+88.891258442" watchObservedRunningTime="2025-11-23 14:43:13.744625155 +0000 UTC m=+88.911621650" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.808543 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-7btqb" podStartSLOduration=68.808515583 podStartE2EDuration="1m8.808515583s" podCreationTimestamp="2025-11-23 14:42:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:43:13.807910637 +0000 UTC m=+88.974907122" watchObservedRunningTime="2025-11-23 14:43:13.808515583 +0000 UTC m=+88.975512078" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.839847 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gc9m9" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.863199 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9q4mg" podStartSLOduration=67.863167832 podStartE2EDuration="1m7.863167832s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:43:13.859414571 +0000 UTC m=+89.026411066" watchObservedRunningTime="2025-11-23 14:43:13.863167832 +0000 UTC m=+89.030164337" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.908160 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=36.908134671 podStartE2EDuration="36.908134671s" podCreationTimestamp="2025-11-23 14:42:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:43:13.892359777 +0000 UTC m=+89.059356272" watchObservedRunningTime="2025-11-23 14:43:13.908134671 +0000 UTC m=+89.075131166" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.944592 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-7cjg9" podStartSLOduration=67.944566481 podStartE2EDuration="1m7.944566481s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:43:13.944279373 +0000 UTC m=+89.111275878" watchObservedRunningTime="2025-11-23 14:43:13.944566481 +0000 UTC m=+89.111562976" Nov 23 14:43:13 crc kubenswrapper[5050]: I1123 14:43:13.945159 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-qvjn2" podStartSLOduration=67.945152617 podStartE2EDuration="1m7.945152617s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:43:13.930085742 +0000 UTC m=+89.097082237" watchObservedRunningTime="2025-11-23 14:43:13.945152617 +0000 UTC m=+89.112149112" Nov 23 14:43:14 crc kubenswrapper[5050]: I1123 14:43:14.149130 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gc9m9" event={"ID":"8fdaab3e-3521-4b93-b800-020a52519f06","Type":"ContainerStarted","Data":"220d3e01d404fe09799feada5886fbd81fe1e7e3e0cf0a8f30d700a53bdc0045"} Nov 23 14:43:14 crc kubenswrapper[5050]: I1123 14:43:14.149228 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gc9m9" event={"ID":"8fdaab3e-3521-4b93-b800-020a52519f06","Type":"ContainerStarted","Data":"777bd8937abfea665c90e373c0c9e09dda4980c25fcdf93b71427368963d21bb"} Nov 23 14:43:14 crc kubenswrapper[5050]: I1123 14:43:14.548082 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:14 crc kubenswrapper[5050]: I1123 14:43:14.548174 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:14 crc kubenswrapper[5050]: I1123 14:43:14.548278 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:14 crc kubenswrapper[5050]: E1123 14:43:14.548534 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:14 crc kubenswrapper[5050]: E1123 14:43:14.548727 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:14 crc kubenswrapper[5050]: E1123 14:43:14.549367 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:14 crc kubenswrapper[5050]: I1123 14:43:14.549937 5050 scope.go:117] "RemoveContainer" containerID="82a0b0931dae074fdea5a30d5f082d4040553acde383b476d50f275a40eaec82" Nov 23 14:43:14 crc kubenswrapper[5050]: E1123 14:43:14.550239 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-j8fzz_openshift-ovn-kubernetes(6573c043-542c-47ae-a2ba-f70b8baf60c2)\"" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" Nov 23 14:43:15 crc kubenswrapper[5050]: I1123 14:43:15.548261 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:15 crc kubenswrapper[5050]: E1123 14:43:15.549392 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:16 crc kubenswrapper[5050]: I1123 14:43:16.548264 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:16 crc kubenswrapper[5050]: I1123 14:43:16.548330 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:16 crc kubenswrapper[5050]: I1123 14:43:16.548537 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:16 crc kubenswrapper[5050]: E1123 14:43:16.548871 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:16 crc kubenswrapper[5050]: E1123 14:43:16.549042 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:16 crc kubenswrapper[5050]: E1123 14:43:16.549223 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:17 crc kubenswrapper[5050]: I1123 14:43:17.548219 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:17 crc kubenswrapper[5050]: E1123 14:43:17.548548 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:18 crc kubenswrapper[5050]: I1123 14:43:18.548158 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:18 crc kubenswrapper[5050]: I1123 14:43:18.548228 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:18 crc kubenswrapper[5050]: I1123 14:43:18.548190 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:18 crc kubenswrapper[5050]: E1123 14:43:18.548434 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:18 crc kubenswrapper[5050]: E1123 14:43:18.548582 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:18 crc kubenswrapper[5050]: E1123 14:43:18.548685 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:19 crc kubenswrapper[5050]: I1123 14:43:19.547995 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:19 crc kubenswrapper[5050]: E1123 14:43:19.548237 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:20 crc kubenswrapper[5050]: I1123 14:43:20.548426 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:20 crc kubenswrapper[5050]: E1123 14:43:20.548615 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:20 crc kubenswrapper[5050]: I1123 14:43:20.548788 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:20 crc kubenswrapper[5050]: E1123 14:43:20.549017 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:20 crc kubenswrapper[5050]: I1123 14:43:20.549688 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:20 crc kubenswrapper[5050]: E1123 14:43:20.549996 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:21 crc kubenswrapper[5050]: I1123 14:43:21.547721 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:21 crc kubenswrapper[5050]: E1123 14:43:21.547946 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:21 crc kubenswrapper[5050]: I1123 14:43:21.573592 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gc9m9" podStartSLOduration=75.573562485 podStartE2EDuration="1m15.573562485s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:43:14.171993566 +0000 UTC m=+89.338990081" watchObservedRunningTime="2025-11-23 14:43:21.573562485 +0000 UTC m=+96.740558990" Nov 23 14:43:21 crc kubenswrapper[5050]: I1123 14:43:21.574199 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 23 14:43:22 crc kubenswrapper[5050]: I1123 14:43:22.549090 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:22 crc kubenswrapper[5050]: I1123 14:43:22.549090 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:22 crc kubenswrapper[5050]: E1123 14:43:22.549391 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:22 crc kubenswrapper[5050]: I1123 14:43:22.549526 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:22 crc kubenswrapper[5050]: E1123 14:43:22.549742 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:22 crc kubenswrapper[5050]: E1123 14:43:22.549838 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:23 crc kubenswrapper[5050]: I1123 14:43:23.548196 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:23 crc kubenswrapper[5050]: E1123 14:43:23.548498 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:24 crc kubenswrapper[5050]: I1123 14:43:24.320012 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cc69bd19-1f49-486e-a510-d5b8461fb172-metrics-certs\") pod \"network-metrics-daemon-gtj96\" (UID: \"cc69bd19-1f49-486e-a510-d5b8461fb172\") " pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:24 crc kubenswrapper[5050]: E1123 14:43:24.320351 5050 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 23 14:43:24 crc kubenswrapper[5050]: E1123 14:43:24.320860 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cc69bd19-1f49-486e-a510-d5b8461fb172-metrics-certs podName:cc69bd19-1f49-486e-a510-d5b8461fb172 nodeName:}" failed. No retries permitted until 2025-11-23 14:44:28.320527276 +0000 UTC m=+163.487523791 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/cc69bd19-1f49-486e-a510-d5b8461fb172-metrics-certs") pod "network-metrics-daemon-gtj96" (UID: "cc69bd19-1f49-486e-a510-d5b8461fb172") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 23 14:43:24 crc kubenswrapper[5050]: I1123 14:43:24.548354 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:24 crc kubenswrapper[5050]: I1123 14:43:24.548354 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:24 crc kubenswrapper[5050]: E1123 14:43:24.548742 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:24 crc kubenswrapper[5050]: I1123 14:43:24.548804 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:24 crc kubenswrapper[5050]: E1123 14:43:24.549346 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:24 crc kubenswrapper[5050]: E1123 14:43:24.549517 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:25 crc kubenswrapper[5050]: I1123 14:43:25.548172 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:25 crc kubenswrapper[5050]: E1123 14:43:25.550406 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:26 crc kubenswrapper[5050]: I1123 14:43:26.548035 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:26 crc kubenswrapper[5050]: E1123 14:43:26.548945 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:26 crc kubenswrapper[5050]: I1123 14:43:26.548114 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:26 crc kubenswrapper[5050]: E1123 14:43:26.549386 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:26 crc kubenswrapper[5050]: I1123 14:43:26.549622 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:26 crc kubenswrapper[5050]: E1123 14:43:26.549870 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:27 crc kubenswrapper[5050]: I1123 14:43:27.547902 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:27 crc kubenswrapper[5050]: E1123 14:43:27.548508 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:28 crc kubenswrapper[5050]: I1123 14:43:28.547556 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:28 crc kubenswrapper[5050]: I1123 14:43:28.547690 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:28 crc kubenswrapper[5050]: E1123 14:43:28.547844 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:28 crc kubenswrapper[5050]: I1123 14:43:28.548112 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:28 crc kubenswrapper[5050]: E1123 14:43:28.548391 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:28 crc kubenswrapper[5050]: E1123 14:43:28.548623 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:28 crc kubenswrapper[5050]: I1123 14:43:28.548818 5050 scope.go:117] "RemoveContainer" containerID="82a0b0931dae074fdea5a30d5f082d4040553acde383b476d50f275a40eaec82" Nov 23 14:43:28 crc kubenswrapper[5050]: E1123 14:43:28.548968 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-j8fzz_openshift-ovn-kubernetes(6573c043-542c-47ae-a2ba-f70b8baf60c2)\"" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" Nov 23 14:43:29 crc kubenswrapper[5050]: I1123 14:43:29.548409 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:29 crc kubenswrapper[5050]: E1123 14:43:29.548699 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:30 crc kubenswrapper[5050]: I1123 14:43:30.548607 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:30 crc kubenswrapper[5050]: I1123 14:43:30.548646 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:30 crc kubenswrapper[5050]: I1123 14:43:30.549541 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:30 crc kubenswrapper[5050]: E1123 14:43:30.550420 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:30 crc kubenswrapper[5050]: E1123 14:43:30.549719 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:30 crc kubenswrapper[5050]: E1123 14:43:30.550266 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:31 crc kubenswrapper[5050]: I1123 14:43:31.548345 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:31 crc kubenswrapper[5050]: E1123 14:43:31.548641 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:32 crc kubenswrapper[5050]: I1123 14:43:32.547872 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:32 crc kubenswrapper[5050]: E1123 14:43:32.548072 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:32 crc kubenswrapper[5050]: I1123 14:43:32.547857 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:32 crc kubenswrapper[5050]: E1123 14:43:32.548438 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:32 crc kubenswrapper[5050]: I1123 14:43:32.549323 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:32 crc kubenswrapper[5050]: E1123 14:43:32.549727 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:33 crc kubenswrapper[5050]: I1123 14:43:33.548225 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:33 crc kubenswrapper[5050]: E1123 14:43:33.548512 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:34 crc kubenswrapper[5050]: I1123 14:43:34.548488 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:34 crc kubenswrapper[5050]: I1123 14:43:34.548684 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:34 crc kubenswrapper[5050]: I1123 14:43:34.548508 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:34 crc kubenswrapper[5050]: E1123 14:43:34.548829 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:34 crc kubenswrapper[5050]: E1123 14:43:34.549000 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:34 crc kubenswrapper[5050]: E1123 14:43:34.549163 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:35 crc kubenswrapper[5050]: I1123 14:43:35.548319 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:35 crc kubenswrapper[5050]: E1123 14:43:35.549246 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:36 crc kubenswrapper[5050]: I1123 14:43:36.548050 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:36 crc kubenswrapper[5050]: I1123 14:43:36.548123 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:36 crc kubenswrapper[5050]: E1123 14:43:36.548264 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:36 crc kubenswrapper[5050]: I1123 14:43:36.548483 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:36 crc kubenswrapper[5050]: E1123 14:43:36.549112 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:36 crc kubenswrapper[5050]: E1123 14:43:36.549192 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:37 crc kubenswrapper[5050]: I1123 14:43:37.547876 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:37 crc kubenswrapper[5050]: E1123 14:43:37.548100 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:38 crc kubenswrapper[5050]: I1123 14:43:38.547651 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:38 crc kubenswrapper[5050]: I1123 14:43:38.547821 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:38 crc kubenswrapper[5050]: E1123 14:43:38.547923 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:38 crc kubenswrapper[5050]: I1123 14:43:38.547995 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:38 crc kubenswrapper[5050]: E1123 14:43:38.548090 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:38 crc kubenswrapper[5050]: E1123 14:43:38.548228 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:39 crc kubenswrapper[5050]: I1123 14:43:39.548066 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:39 crc kubenswrapper[5050]: E1123 14:43:39.548427 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:40 crc kubenswrapper[5050]: I1123 14:43:40.548024 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:40 crc kubenswrapper[5050]: I1123 14:43:40.548115 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:40 crc kubenswrapper[5050]: I1123 14:43:40.548194 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:40 crc kubenswrapper[5050]: E1123 14:43:40.548271 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:40 crc kubenswrapper[5050]: E1123 14:43:40.548407 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:40 crc kubenswrapper[5050]: E1123 14:43:40.548593 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:41 crc kubenswrapper[5050]: I1123 14:43:41.260796 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-qvjn2_abdac21e-18fc-460d-bd3b-73bed66b8ab9/kube-multus/1.log" Nov 23 14:43:41 crc kubenswrapper[5050]: I1123 14:43:41.261658 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-qvjn2_abdac21e-18fc-460d-bd3b-73bed66b8ab9/kube-multus/0.log" Nov 23 14:43:41 crc kubenswrapper[5050]: I1123 14:43:41.261726 5050 generic.go:334] "Generic (PLEG): container finished" podID="abdac21e-18fc-460d-bd3b-73bed66b8ab9" containerID="db0c98dfdc0723630b1490357e96c25cfe466caf464ee1883509dc0bd2521337" exitCode=1 Nov 23 14:43:41 crc kubenswrapper[5050]: I1123 14:43:41.261780 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-qvjn2" event={"ID":"abdac21e-18fc-460d-bd3b-73bed66b8ab9","Type":"ContainerDied","Data":"db0c98dfdc0723630b1490357e96c25cfe466caf464ee1883509dc0bd2521337"} Nov 23 14:43:41 crc kubenswrapper[5050]: I1123 14:43:41.261846 5050 scope.go:117] "RemoveContainer" containerID="7d239bbc61595516c6e76763dd66ee795750adb765c56bb5d9c967c1e5897fcf" Nov 23 14:43:41 crc kubenswrapper[5050]: I1123 14:43:41.262640 5050 scope.go:117] "RemoveContainer" containerID="db0c98dfdc0723630b1490357e96c25cfe466caf464ee1883509dc0bd2521337" Nov 23 14:43:41 crc kubenswrapper[5050]: E1123 14:43:41.263049 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-qvjn2_openshift-multus(abdac21e-18fc-460d-bd3b-73bed66b8ab9)\"" pod="openshift-multus/multus-qvjn2" podUID="abdac21e-18fc-460d-bd3b-73bed66b8ab9" Nov 23 14:43:41 crc kubenswrapper[5050]: I1123 14:43:41.299637 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=20.29960716 podStartE2EDuration="20.29960716s" podCreationTimestamp="2025-11-23 14:43:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:43:25.596374151 +0000 UTC m=+100.763370666" watchObservedRunningTime="2025-11-23 14:43:41.29960716 +0000 UTC m=+116.466603685" Nov 23 14:43:41 crc kubenswrapper[5050]: I1123 14:43:41.548579 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:41 crc kubenswrapper[5050]: E1123 14:43:41.548835 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:42 crc kubenswrapper[5050]: I1123 14:43:42.268754 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-qvjn2_abdac21e-18fc-460d-bd3b-73bed66b8ab9/kube-multus/1.log" Nov 23 14:43:42 crc kubenswrapper[5050]: I1123 14:43:42.548030 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:42 crc kubenswrapper[5050]: E1123 14:43:42.548213 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:42 crc kubenswrapper[5050]: I1123 14:43:42.548636 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:42 crc kubenswrapper[5050]: I1123 14:43:42.548825 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:42 crc kubenswrapper[5050]: E1123 14:43:42.548940 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:42 crc kubenswrapper[5050]: E1123 14:43:42.549675 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:42 crc kubenswrapper[5050]: I1123 14:43:42.550212 5050 scope.go:117] "RemoveContainer" containerID="82a0b0931dae074fdea5a30d5f082d4040553acde383b476d50f275a40eaec82" Nov 23 14:43:43 crc kubenswrapper[5050]: I1123 14:43:43.277752 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-j8fzz_6573c043-542c-47ae-a2ba-f70b8baf60c2/ovnkube-controller/3.log" Nov 23 14:43:43 crc kubenswrapper[5050]: I1123 14:43:43.283058 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" event={"ID":"6573c043-542c-47ae-a2ba-f70b8baf60c2","Type":"ContainerStarted","Data":"64b99478c6a7e63e8f900194b4f35e1d77c9836c192f4a52a281dd036504cd5f"} Nov 23 14:43:43 crc kubenswrapper[5050]: I1123 14:43:43.283687 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:43:43 crc kubenswrapper[5050]: I1123 14:43:43.321609 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" podStartSLOduration=97.321588198 podStartE2EDuration="1m37.321588198s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:43:43.321096105 +0000 UTC m=+118.488092610" watchObservedRunningTime="2025-11-23 14:43:43.321588198 +0000 UTC m=+118.488584683" Nov 23 14:43:43 crc kubenswrapper[5050]: I1123 14:43:43.548268 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:43 crc kubenswrapper[5050]: E1123 14:43:43.548407 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:43 crc kubenswrapper[5050]: I1123 14:43:43.570058 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-gtj96"] Nov 23 14:43:43 crc kubenswrapper[5050]: I1123 14:43:43.570300 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:43 crc kubenswrapper[5050]: E1123 14:43:43.570514 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:44 crc kubenswrapper[5050]: I1123 14:43:44.547958 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:44 crc kubenswrapper[5050]: I1123 14:43:44.548034 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:44 crc kubenswrapper[5050]: E1123 14:43:44.548202 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:44 crc kubenswrapper[5050]: E1123 14:43:44.548371 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:45 crc kubenswrapper[5050]: E1123 14:43:45.521843 5050 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 23 14:43:45 crc kubenswrapper[5050]: I1123 14:43:45.547823 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:45 crc kubenswrapper[5050]: I1123 14:43:45.550046 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:45 crc kubenswrapper[5050]: E1123 14:43:45.550289 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:45 crc kubenswrapper[5050]: E1123 14:43:45.550948 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:45 crc kubenswrapper[5050]: E1123 14:43:45.673986 5050 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 23 14:43:46 crc kubenswrapper[5050]: I1123 14:43:46.548098 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:46 crc kubenswrapper[5050]: I1123 14:43:46.548138 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:46 crc kubenswrapper[5050]: E1123 14:43:46.548287 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:46 crc kubenswrapper[5050]: E1123 14:43:46.548431 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:47 crc kubenswrapper[5050]: I1123 14:43:47.548787 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:47 crc kubenswrapper[5050]: E1123 14:43:47.549072 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:47 crc kubenswrapper[5050]: I1123 14:43:47.549194 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:47 crc kubenswrapper[5050]: E1123 14:43:47.549480 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:48 crc kubenswrapper[5050]: I1123 14:43:48.547896 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:48 crc kubenswrapper[5050]: I1123 14:43:48.547925 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:48 crc kubenswrapper[5050]: E1123 14:43:48.548130 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:48 crc kubenswrapper[5050]: E1123 14:43:48.548326 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:49 crc kubenswrapper[5050]: I1123 14:43:49.548540 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:49 crc kubenswrapper[5050]: I1123 14:43:49.548550 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:49 crc kubenswrapper[5050]: E1123 14:43:49.548815 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:49 crc kubenswrapper[5050]: E1123 14:43:49.549036 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:50 crc kubenswrapper[5050]: I1123 14:43:50.548130 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:50 crc kubenswrapper[5050]: I1123 14:43:50.548268 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:50 crc kubenswrapper[5050]: E1123 14:43:50.548369 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:50 crc kubenswrapper[5050]: E1123 14:43:50.548619 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:50 crc kubenswrapper[5050]: E1123 14:43:50.676539 5050 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 23 14:43:51 crc kubenswrapper[5050]: I1123 14:43:51.547986 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:51 crc kubenswrapper[5050]: E1123 14:43:51.548721 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:51 crc kubenswrapper[5050]: I1123 14:43:51.548133 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:51 crc kubenswrapper[5050]: E1123 14:43:51.548907 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:52 crc kubenswrapper[5050]: I1123 14:43:52.548908 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:52 crc kubenswrapper[5050]: I1123 14:43:52.549022 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:52 crc kubenswrapper[5050]: E1123 14:43:52.549294 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:52 crc kubenswrapper[5050]: E1123 14:43:52.550022 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:53 crc kubenswrapper[5050]: I1123 14:43:53.548345 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:53 crc kubenswrapper[5050]: I1123 14:43:53.548411 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:53 crc kubenswrapper[5050]: E1123 14:43:53.548689 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:53 crc kubenswrapper[5050]: E1123 14:43:53.548859 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:54 crc kubenswrapper[5050]: I1123 14:43:54.548257 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:54 crc kubenswrapper[5050]: I1123 14:43:54.548286 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:54 crc kubenswrapper[5050]: E1123 14:43:54.548561 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:54 crc kubenswrapper[5050]: E1123 14:43:54.549060 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:55 crc kubenswrapper[5050]: I1123 14:43:55.547932 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:55 crc kubenswrapper[5050]: I1123 14:43:55.547982 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:55 crc kubenswrapper[5050]: E1123 14:43:55.550130 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:55 crc kubenswrapper[5050]: E1123 14:43:55.550304 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:55 crc kubenswrapper[5050]: I1123 14:43:55.550332 5050 scope.go:117] "RemoveContainer" containerID="db0c98dfdc0723630b1490357e96c25cfe466caf464ee1883509dc0bd2521337" Nov 23 14:43:55 crc kubenswrapper[5050]: E1123 14:43:55.677323 5050 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 23 14:43:56 crc kubenswrapper[5050]: I1123 14:43:56.346119 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-qvjn2_abdac21e-18fc-460d-bd3b-73bed66b8ab9/kube-multus/1.log" Nov 23 14:43:56 crc kubenswrapper[5050]: I1123 14:43:56.346807 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-qvjn2" event={"ID":"abdac21e-18fc-460d-bd3b-73bed66b8ab9","Type":"ContainerStarted","Data":"c623d3e60ee42251333c8132c3a910eddaf9de12b5abc799178b24d7174e6e5d"} Nov 23 14:43:56 crc kubenswrapper[5050]: I1123 14:43:56.547794 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:56 crc kubenswrapper[5050]: I1123 14:43:56.547836 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:56 crc kubenswrapper[5050]: E1123 14:43:56.548091 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:56 crc kubenswrapper[5050]: E1123 14:43:56.548241 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:57 crc kubenswrapper[5050]: I1123 14:43:57.548634 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:57 crc kubenswrapper[5050]: I1123 14:43:57.548742 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:57 crc kubenswrapper[5050]: E1123 14:43:57.548909 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:57 crc kubenswrapper[5050]: E1123 14:43:57.549004 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:43:58 crc kubenswrapper[5050]: I1123 14:43:58.547793 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:43:58 crc kubenswrapper[5050]: I1123 14:43:58.547937 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:43:58 crc kubenswrapper[5050]: E1123 14:43:58.548079 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:43:58 crc kubenswrapper[5050]: E1123 14:43:58.548393 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:43:58 crc kubenswrapper[5050]: I1123 14:43:58.649020 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:43:59 crc kubenswrapper[5050]: I1123 14:43:59.548667 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:43:59 crc kubenswrapper[5050]: I1123 14:43:59.548712 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:43:59 crc kubenswrapper[5050]: E1123 14:43:59.548946 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gtj96" podUID="cc69bd19-1f49-486e-a510-d5b8461fb172" Nov 23 14:43:59 crc kubenswrapper[5050]: E1123 14:43:59.549085 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 23 14:44:00 crc kubenswrapper[5050]: I1123 14:44:00.547784 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:44:00 crc kubenswrapper[5050]: I1123 14:44:00.547913 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:44:00 crc kubenswrapper[5050]: E1123 14:44:00.548058 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 23 14:44:00 crc kubenswrapper[5050]: E1123 14:44:00.548306 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 23 14:44:01 crc kubenswrapper[5050]: I1123 14:44:01.547788 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:44:01 crc kubenswrapper[5050]: I1123 14:44:01.547866 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:44:01 crc kubenswrapper[5050]: I1123 14:44:01.553101 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 23 14:44:01 crc kubenswrapper[5050]: I1123 14:44:01.553523 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 23 14:44:01 crc kubenswrapper[5050]: I1123 14:44:01.554324 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 23 14:44:01 crc kubenswrapper[5050]: I1123 14:44:01.556063 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 23 14:44:02 crc kubenswrapper[5050]: I1123 14:44:02.547606 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:44:02 crc kubenswrapper[5050]: I1123 14:44:02.547853 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:44:02 crc kubenswrapper[5050]: I1123 14:44:02.551795 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 23 14:44:02 crc kubenswrapper[5050]: I1123 14:44:02.551904 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.600245 5050 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.663766 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-hlskc"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.664995 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hlskc" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.665205 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-46rrm"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.665938 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-46rrm" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.674861 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-gvb4q"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.675829 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-gvb4q" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.683218 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.683969 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.684762 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.684830 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.685092 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.685136 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.685696 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.685788 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.685803 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.685833 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.685937 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.686120 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.686413 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.690243 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-cg8k9"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.691006 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-cg8k9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.692891 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-zfnk9"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.694069 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-zfnk9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.695804 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.700558 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.700650 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.705624 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.705907 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.706129 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.706400 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.706819 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.706920 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.706987 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.707218 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.707386 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.707562 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.707717 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.707862 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.717242 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2bsvx"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.717752 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-cvknj"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.718109 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.718353 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.718803 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-bglcd"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.719690 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-bglcd" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.755805 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.756130 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vvkg6\" (UniqueName: \"kubernetes.io/projected/faf517b0-a6d3-44ed-bdf9-b7430d956916-kube-api-access-vvkg6\") pod \"openshift-apiserver-operator-796bbdcf4f-46rrm\" (UID: \"faf517b0-a6d3-44ed-bdf9-b7430d956916\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-46rrm" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.756205 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/faf517b0-a6d3-44ed-bdf9-b7430d956916-config\") pod \"openshift-apiserver-operator-796bbdcf4f-46rrm\" (UID: \"faf517b0-a6d3-44ed-bdf9-b7430d956916\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-46rrm" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.756238 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1cd7d6f0-caf1-4640-87b1-95d6faf69aa4-config\") pod \"machine-approver-56656f9798-hlskc\" (UID: \"1cd7d6f0-caf1-4640-87b1-95d6faf69aa4\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hlskc" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.756348 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/faf517b0-a6d3-44ed-bdf9-b7430d956916-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-46rrm\" (UID: \"faf517b0-a6d3-44ed-bdf9-b7430d956916\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-46rrm" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.756377 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1cd7d6f0-caf1-4640-87b1-95d6faf69aa4-auth-proxy-config\") pod \"machine-approver-56656f9798-hlskc\" (UID: \"1cd7d6f0-caf1-4640-87b1-95d6faf69aa4\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hlskc" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.756399 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/1cd7d6f0-caf1-4640-87b1-95d6faf69aa4-machine-approver-tls\") pod \"machine-approver-56656f9798-hlskc\" (UID: \"1cd7d6f0-caf1-4640-87b1-95d6faf69aa4\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hlskc" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.756420 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8gnx\" (UniqueName: \"kubernetes.io/projected/1cd7d6f0-caf1-4640-87b1-95d6faf69aa4-kube-api-access-j8gnx\") pod \"machine-approver-56656f9798-hlskc\" (UID: \"1cd7d6f0-caf1-4640-87b1-95d6faf69aa4\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hlskc" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.756611 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-ph6h8"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.757229 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-ph6h8" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.757362 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.758054 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.761465 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.761896 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.762082 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.762299 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.762342 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.762397 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.762479 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.762558 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.764267 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.764304 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.764738 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.764795 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.764845 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.764887 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.764909 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.764985 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.765037 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.765088 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.765110 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.764748 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.764799 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.765191 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.765041 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.764751 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.764817 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.768584 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.768801 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.769274 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.769356 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.769488 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.769596 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.769765 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.772849 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-j5zvd"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.773603 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-j5zvd" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.773846 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.775697 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.776237 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.776267 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.777137 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.777939 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.779688 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-h2nqh"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.780195 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-96dfp"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.782662 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-96dfp" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.783026 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-h2nqh" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.803926 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.804081 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.804311 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.804319 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.808013 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.808134 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.808213 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.808331 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.808598 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.808693 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.808787 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.809274 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.809658 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.809818 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.809986 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.810168 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.810337 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.810555 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.812503 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-b646l"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.813164 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.814091 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.814237 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.814436 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.814583 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.814679 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.816575 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.816750 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.817884 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dzz98"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.829736 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.830403 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.830824 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.834720 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dzz98" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.837201 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ngl6b"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.852286 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.854756 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.857006 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.857039 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ngl6b" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.857923 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.859744 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.861733 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-svd5r"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.861918 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.862831 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.863376 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.863483 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/74ee62a2-d057-4ddf-8e5f-619cd90645d0-serving-cert\") pod \"apiserver-7bbb656c7d-tt6kr\" (UID: \"74ee62a2-d057-4ddf-8e5f-619cd90645d0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.863545 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/2a07c9a0-b842-4f33-83a4-d6a07eeed391-etcd-ca\") pod \"etcd-operator-b45778765-cg8k9\" (UID: \"2a07c9a0-b842-4f33-83a4-d6a07eeed391\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cg8k9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.863578 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/2a07c9a0-b842-4f33-83a4-d6a07eeed391-etcd-service-ca\") pod \"etcd-operator-b45778765-cg8k9\" (UID: \"2a07c9a0-b842-4f33-83a4-d6a07eeed391\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cg8k9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.863632 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/74ee62a2-d057-4ddf-8e5f-619cd90645d0-etcd-client\") pod \"apiserver-7bbb656c7d-tt6kr\" (UID: \"74ee62a2-d057-4ddf-8e5f-619cd90645d0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.863669 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.863709 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.864780 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.865080 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7vr67\" (UniqueName: \"kubernetes.io/projected/16e600fc-0484-4c1c-8800-bb3958374bad-kube-api-access-7vr67\") pod \"downloads-7954f5f757-gvb4q\" (UID: \"16e600fc-0484-4c1c-8800-bb3958374bad\") " pod="openshift-console/downloads-7954f5f757-gvb4q" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.865143 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e118c084-6256-42a4-ad24-f06222a1b3b4-serving-cert\") pod \"authentication-operator-69f744f599-j5zvd\" (UID: \"e118c084-6256-42a4-ad24-f06222a1b3b4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-j5zvd" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.865198 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72f33e56-9fe4-420b-aa31-8d49ed5c7584-serving-cert\") pod \"controller-manager-879f6c89f-2bsvx\" (UID: \"72f33e56-9fe4-420b-aa31-8d49ed5c7584\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.865251 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/faf517b0-a6d3-44ed-bdf9-b7430d956916-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-46rrm\" (UID: \"faf517b0-a6d3-44ed-bdf9-b7430d956916\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-46rrm" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.865296 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1cd7d6f0-caf1-4640-87b1-95d6faf69aa4-auth-proxy-config\") pod \"machine-approver-56656f9798-hlskc\" (UID: \"1cd7d6f0-caf1-4640-87b1-95d6faf69aa4\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hlskc" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.865434 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.865475 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/1cd7d6f0-caf1-4640-87b1-95d6faf69aa4-machine-approver-tls\") pod \"machine-approver-56656f9798-hlskc\" (UID: \"1cd7d6f0-caf1-4640-87b1-95d6faf69aa4\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hlskc" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.865570 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8gnx\" (UniqueName: \"kubernetes.io/projected/1cd7d6f0-caf1-4640-87b1-95d6faf69aa4-kube-api-access-j8gnx\") pod \"machine-approver-56656f9798-hlskc\" (UID: \"1cd7d6f0-caf1-4640-87b1-95d6faf69aa4\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hlskc" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.865998 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.866691 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1cd7d6f0-caf1-4640-87b1-95d6faf69aa4-auth-proxy-config\") pod \"machine-approver-56656f9798-hlskc\" (UID: \"1cd7d6f0-caf1-4640-87b1-95d6faf69aa4\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hlskc" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.865623 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2a07c9a0-b842-4f33-83a4-d6a07eeed391-serving-cert\") pod \"etcd-operator-b45778765-cg8k9\" (UID: \"2a07c9a0-b842-4f33-83a4-d6a07eeed391\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cg8k9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.866785 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.866840 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0a4f6fb7-314a-4896-bcba-c11f622a5e3f-serving-cert\") pod \"openshift-config-operator-7777fb866f-ph6h8\" (UID: \"0a4f6fb7-314a-4896-bcba-c11f622a5e3f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-ph6h8" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.866886 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/74ee62a2-d057-4ddf-8e5f-619cd90645d0-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-tt6kr\" (UID: \"74ee62a2-d057-4ddf-8e5f-619cd90645d0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.866909 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a934d47f-f19d-4d29-aed8-20141e5bcf2b-config\") pod \"machine-api-operator-5694c8668f-zfnk9\" (UID: \"a934d47f-f19d-4d29-aed8-20141e5bcf2b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-zfnk9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.867248 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.867556 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wdw7p\" (UniqueName: \"kubernetes.io/projected/a934d47f-f19d-4d29-aed8-20141e5bcf2b-kube-api-access-wdw7p\") pod \"machine-api-operator-5694c8668f-zfnk9\" (UID: \"a934d47f-f19d-4d29-aed8-20141e5bcf2b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-zfnk9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.867603 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.867640 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1cd7d6f0-caf1-4640-87b1-95d6faf69aa4-config\") pod \"machine-approver-56656f9798-hlskc\" (UID: \"1cd7d6f0-caf1-4640-87b1-95d6faf69aa4\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hlskc" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.867670 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fpvmd\" (UniqueName: \"kubernetes.io/projected/2a07c9a0-b842-4f33-83a4-d6a07eeed391-kube-api-access-fpvmd\") pod \"etcd-operator-b45778765-cg8k9\" (UID: \"2a07c9a0-b842-4f33-83a4-d6a07eeed391\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cg8k9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.867783 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0d7bc9ed-da92-4305-bda8-78966e7451db-metrics-tls\") pod \"dns-operator-744455d44c-bglcd\" (UID: \"0d7bc9ed-da92-4305-bda8-78966e7451db\") " pod="openshift-dns-operator/dns-operator-744455d44c-bglcd" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868168 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1cd7d6f0-caf1-4640-87b1-95d6faf69aa4-config\") pod \"machine-approver-56656f9798-hlskc\" (UID: \"1cd7d6f0-caf1-4640-87b1-95d6faf69aa4\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hlskc" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868239 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72f33e56-9fe4-420b-aa31-8d49ed5c7584-config\") pod \"controller-manager-879f6c89f-2bsvx\" (UID: \"72f33e56-9fe4-420b-aa31-8d49ed5c7584\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868268 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868293 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868321 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/74ee62a2-d057-4ddf-8e5f-619cd90645d0-audit-policies\") pod \"apiserver-7bbb656c7d-tt6kr\" (UID: \"74ee62a2-d057-4ddf-8e5f-619cd90645d0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868347 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2hw79\" (UniqueName: \"kubernetes.io/projected/74ee62a2-d057-4ddf-8e5f-619cd90645d0-kube-api-access-2hw79\") pod \"apiserver-7bbb656c7d-tt6kr\" (UID: \"74ee62a2-d057-4ddf-8e5f-619cd90645d0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868389 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/72f33e56-9fe4-420b-aa31-8d49ed5c7584-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-2bsvx\" (UID: \"72f33e56-9fe4-420b-aa31-8d49ed5c7584\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868412 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868470 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wpc6\" (UniqueName: \"kubernetes.io/projected/0a4f6fb7-314a-4896-bcba-c11f622a5e3f-kube-api-access-2wpc6\") pod \"openshift-config-operator-7777fb866f-ph6h8\" (UID: \"0a4f6fb7-314a-4896-bcba-c11f622a5e3f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-ph6h8" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868504 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e118c084-6256-42a4-ad24-f06222a1b3b4-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-j5zvd\" (UID: \"e118c084-6256-42a4-ad24-f06222a1b3b4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-j5zvd" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868525 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868545 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-crxcs\" (UniqueName: \"kubernetes.io/projected/5c3f6b64-15bf-4147-be4e-414a2569cb58-kube-api-access-crxcs\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868565 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/74ee62a2-d057-4ddf-8e5f-619cd90645d0-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-tt6kr\" (UID: \"74ee62a2-d057-4ddf-8e5f-619cd90645d0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868600 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2a07c9a0-b842-4f33-83a4-d6a07eeed391-config\") pod \"etcd-operator-b45778765-cg8k9\" (UID: \"2a07c9a0-b842-4f33-83a4-d6a07eeed391\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cg8k9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868627 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c59xw\" (UniqueName: \"kubernetes.io/projected/e118c084-6256-42a4-ad24-f06222a1b3b4-kube-api-access-c59xw\") pod \"authentication-operator-69f744f599-j5zvd\" (UID: \"e118c084-6256-42a4-ad24-f06222a1b3b4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-j5zvd" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868654 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e118c084-6256-42a4-ad24-f06222a1b3b4-config\") pod \"authentication-operator-69f744f599-j5zvd\" (UID: \"e118c084-6256-42a4-ad24-f06222a1b3b4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-j5zvd" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868673 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868697 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/0a4f6fb7-314a-4896-bcba-c11f622a5e3f-available-featuregates\") pod \"openshift-config-operator-7777fb866f-ph6h8\" (UID: \"0a4f6fb7-314a-4896-bcba-c11f622a5e3f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-ph6h8" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868721 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e118c084-6256-42a4-ad24-f06222a1b3b4-service-ca-bundle\") pod \"authentication-operator-69f744f599-j5zvd\" (UID: \"e118c084-6256-42a4-ad24-f06222a1b3b4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-j5zvd" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868773 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-72r27\" (UniqueName: \"kubernetes.io/projected/0d7bc9ed-da92-4305-bda8-78966e7451db-kube-api-access-72r27\") pod \"dns-operator-744455d44c-bglcd\" (UID: \"0d7bc9ed-da92-4305-bda8-78966e7451db\") " pod="openshift-dns-operator/dns-operator-744455d44c-bglcd" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868801 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2hll\" (UniqueName: \"kubernetes.io/projected/72f33e56-9fe4-420b-aa31-8d49ed5c7584-kube-api-access-n2hll\") pod \"controller-manager-879f6c89f-2bsvx\" (UID: \"72f33e56-9fe4-420b-aa31-8d49ed5c7584\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868819 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5c3f6b64-15bf-4147-be4e-414a2569cb58-audit-dir\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868839 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868865 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a934d47f-f19d-4d29-aed8-20141e5bcf2b-images\") pod \"machine-api-operator-5694c8668f-zfnk9\" (UID: \"a934d47f-f19d-4d29-aed8-20141e5bcf2b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-zfnk9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868889 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vvkg6\" (UniqueName: \"kubernetes.io/projected/faf517b0-a6d3-44ed-bdf9-b7430d956916-kube-api-access-vvkg6\") pod \"openshift-apiserver-operator-796bbdcf4f-46rrm\" (UID: \"faf517b0-a6d3-44ed-bdf9-b7430d956916\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-46rrm" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868912 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/faf517b0-a6d3-44ed-bdf9-b7430d956916-config\") pod \"openshift-apiserver-operator-796bbdcf4f-46rrm\" (UID: \"faf517b0-a6d3-44ed-bdf9-b7430d956916\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-46rrm" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868936 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868959 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/74ee62a2-d057-4ddf-8e5f-619cd90645d0-audit-dir\") pod \"apiserver-7bbb656c7d-tt6kr\" (UID: \"74ee62a2-d057-4ddf-8e5f-619cd90645d0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.868980 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/a934d47f-f19d-4d29-aed8-20141e5bcf2b-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-zfnk9\" (UID: \"a934d47f-f19d-4d29-aed8-20141e5bcf2b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-zfnk9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.869006 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/72f33e56-9fe4-420b-aa31-8d49ed5c7584-client-ca\") pod \"controller-manager-879f6c89f-2bsvx\" (UID: \"72f33e56-9fe4-420b-aa31-8d49ed5c7584\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.869035 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/2a07c9a0-b842-4f33-83a4-d6a07eeed391-etcd-client\") pod \"etcd-operator-b45778765-cg8k9\" (UID: \"2a07c9a0-b842-4f33-83a4-d6a07eeed391\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cg8k9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.869057 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5c3f6b64-15bf-4147-be4e-414a2569cb58-audit-policies\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.869094 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/74ee62a2-d057-4ddf-8e5f-619cd90645d0-encryption-config\") pod \"apiserver-7bbb656c7d-tt6kr\" (UID: \"74ee62a2-d057-4ddf-8e5f-619cd90645d0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.869962 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/faf517b0-a6d3-44ed-bdf9-b7430d956916-config\") pod \"openshift-apiserver-operator-796bbdcf4f-46rrm\" (UID: \"faf517b0-a6d3-44ed-bdf9-b7430d956916\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-46rrm" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.871484 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-djc5v"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.872099 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/1cd7d6f0-caf1-4640-87b1-95d6faf69aa4-machine-approver-tls\") pod \"machine-approver-56656f9798-hlskc\" (UID: \"1cd7d6f0-caf1-4640-87b1-95d6faf69aa4\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hlskc" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.872251 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-djc5v" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.872515 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-xwq4h"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.873026 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/faf517b0-a6d3-44ed-bdf9-b7430d956916-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-46rrm\" (UID: \"faf517b0-a6d3-44ed-bdf9-b7430d956916\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-46rrm" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.873338 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xwq4h" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.874795 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-2g8pj"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.875007 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.875398 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-n9kmn"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.876192 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-n9kmn" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.876429 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-2g8pj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.877122 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-rzj2p"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.877668 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-rzj2p" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.877712 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-rw2cv"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.878673 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.878974 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wtbfr"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.879747 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wtbfr" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.879901 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-n5v2l"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.880680 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-n5v2l" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.881695 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ngz59"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.882065 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ngz59" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.882691 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rgjfm"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.883152 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rgjfm" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.883871 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-jzg9r"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.884378 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jzg9r" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.884802 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-p4p66"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.885271 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-p4p66" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.889978 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5wl59"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.891006 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fcc5l"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.892750 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.893188 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5wl59" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.894167 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2bsvx"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.894189 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-cg8k9"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.894233 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fcc5l" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.894240 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tcs6j"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.906701 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-gvb4q"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.907497 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tcs6j" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.913925 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.924157 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qjw5q"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.932784 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.937030 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-2kj76"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.937205 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qjw5q" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.937508 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wr6q4"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.937789 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-2kj76" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.937985 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-wr6q4" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.939089 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398470-mm7rl"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.939640 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398470-mm7rl" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.941145 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-9rdbb"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.941848 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-9rdbb" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.943322 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-46rrm"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.944456 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-ph6h8"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.945689 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-wdqwt"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.946679 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-wdqwt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.947179 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-zfnk9"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.951706 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.952638 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-j5zvd"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.955217 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.957119 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-h2nqh"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.958293 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-n5v2l"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.959723 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wtbfr"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.960670 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.961988 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-djc5v"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.964513 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-bglcd"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.965544 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-cvknj"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.967369 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.968745 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-96dfp"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.969185 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-n9kmn"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.969789 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e118c084-6256-42a4-ad24-f06222a1b3b4-serving-cert\") pod \"authentication-operator-69f744f599-j5zvd\" (UID: \"e118c084-6256-42a4-ad24-f06222a1b3b4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-j5zvd" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.969822 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b1e1a221-b849-45e1-ad2c-d0b3b3eeccb5-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ngl6b\" (UID: \"b1e1a221-b849-45e1-ad2c-d0b3b3eeccb5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ngl6b" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.969849 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.969869 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.969889 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7vr67\" (UniqueName: \"kubernetes.io/projected/16e600fc-0484-4c1c-8800-bb3958374bad-kube-api-access-7vr67\") pod \"downloads-7954f5f757-gvb4q\" (UID: \"16e600fc-0484-4c1c-8800-bb3958374bad\") " pod="openshift-console/downloads-7954f5f757-gvb4q" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.969911 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/05b57f72-3ec2-41f4-ba15-4bb73c15a1af-default-certificate\") pod \"router-default-5444994796-2g8pj\" (UID: \"05b57f72-3ec2-41f4-ba15-4bb73c15a1af\") " pod="openshift-ingress/router-default-5444994796-2g8pj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.969932 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55pb2\" (UniqueName: \"kubernetes.io/projected/6e9d92c3-4f51-481c-adcb-e48f3e671025-kube-api-access-55pb2\") pod \"ingress-operator-5b745b69d9-xwq4h\" (UID: \"6e9d92c3-4f51-481c-adcb-e48f3e671025\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xwq4h" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.969951 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/005c682b-725d-471a-98ca-08f2760e6603-node-pullsecrets\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.969968 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/005c682b-725d-471a-98ca-08f2760e6603-etcd-serving-ca\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.969992 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72f33e56-9fe4-420b-aa31-8d49ed5c7584-serving-cert\") pod \"controller-manager-879f6c89f-2bsvx\" (UID: \"72f33e56-9fe4-420b-aa31-8d49ed5c7584\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970011 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10e4194f-eb7d-4228-8cf8-4925e4a0ec4f-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-96dfp\" (UID: \"10e4194f-eb7d-4228-8cf8-4925e4a0ec4f\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-96dfp" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970039 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2a07c9a0-b842-4f33-83a4-d6a07eeed391-serving-cert\") pod \"etcd-operator-b45778765-cg8k9\" (UID: \"2a07c9a0-b842-4f33-83a4-d6a07eeed391\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cg8k9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970057 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970074 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6b5f71c7-bb75-435e-ac44-3328aa8fe73d-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-dzz98\" (UID: \"6b5f71c7-bb75-435e-ac44-3328aa8fe73d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dzz98" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970093 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/005c682b-725d-471a-98ca-08f2760e6603-config\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970127 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/005c682b-725d-471a-98ca-08f2760e6603-serving-cert\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970145 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0a4f6fb7-314a-4896-bcba-c11f622a5e3f-serving-cert\") pod \"openshift-config-operator-7777fb866f-ph6h8\" (UID: \"0a4f6fb7-314a-4896-bcba-c11f622a5e3f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-ph6h8" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970164 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/74ee62a2-d057-4ddf-8e5f-619cd90645d0-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-tt6kr\" (UID: \"74ee62a2-d057-4ddf-8e5f-619cd90645d0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970197 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/650d8b46-fa24-4b73-8f95-a0fbfe331fa6-config\") pod \"route-controller-manager-6576b87f9c-b9jzj\" (UID: \"650d8b46-fa24-4b73-8f95-a0fbfe331fa6\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970214 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/629f02dd-753b-46d2-b808-812468f6c9c5-trusted-ca-bundle\") pod \"console-f9d7485db-b646l\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970234 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a934d47f-f19d-4d29-aed8-20141e5bcf2b-config\") pod \"machine-api-operator-5694c8668f-zfnk9\" (UID: \"a934d47f-f19d-4d29-aed8-20141e5bcf2b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-zfnk9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970252 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bpl2x\" (UniqueName: \"kubernetes.io/projected/fe0bb0ba-8376-4437-9cec-fe95bbd77a9a-kube-api-access-bpl2x\") pod \"console-operator-58897d9998-h2nqh\" (UID: \"fe0bb0ba-8376-4437-9cec-fe95bbd77a9a\") " pod="openshift-console-operator/console-operator-58897d9998-h2nqh" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970273 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/005c682b-725d-471a-98ca-08f2760e6603-encryption-config\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970295 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970314 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wdw7p\" (UniqueName: \"kubernetes.io/projected/a934d47f-f19d-4d29-aed8-20141e5bcf2b-kube-api-access-wdw7p\") pod \"machine-api-operator-5694c8668f-zfnk9\" (UID: \"a934d47f-f19d-4d29-aed8-20141e5bcf2b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-zfnk9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970331 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/650d8b46-fa24-4b73-8f95-a0fbfe331fa6-serving-cert\") pod \"route-controller-manager-6576b87f9c-b9jzj\" (UID: \"650d8b46-fa24-4b73-8f95-a0fbfe331fa6\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970348 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/05b57f72-3ec2-41f4-ba15-4bb73c15a1af-metrics-certs\") pod \"router-default-5444994796-2g8pj\" (UID: \"05b57f72-3ec2-41f4-ba15-4bb73c15a1af\") " pod="openshift-ingress/router-default-5444994796-2g8pj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970367 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpvmd\" (UniqueName: \"kubernetes.io/projected/2a07c9a0-b842-4f33-83a4-d6a07eeed391-kube-api-access-fpvmd\") pod \"etcd-operator-b45778765-cg8k9\" (UID: \"2a07c9a0-b842-4f33-83a4-d6a07eeed391\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cg8k9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970384 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0d7bc9ed-da92-4305-bda8-78966e7451db-metrics-tls\") pod \"dns-operator-744455d44c-bglcd\" (UID: \"0d7bc9ed-da92-4305-bda8-78966e7451db\") " pod="openshift-dns-operator/dns-operator-744455d44c-bglcd" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970401 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/005c682b-725d-471a-98ca-08f2760e6603-trusted-ca-bundle\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970429 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72f33e56-9fe4-420b-aa31-8d49ed5c7584-config\") pod \"controller-manager-879f6c89f-2bsvx\" (UID: \"72f33e56-9fe4-420b-aa31-8d49ed5c7584\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970466 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970486 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970504 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/74ee62a2-d057-4ddf-8e5f-619cd90645d0-audit-policies\") pod \"apiserver-7bbb656c7d-tt6kr\" (UID: \"74ee62a2-d057-4ddf-8e5f-619cd90645d0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970522 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2hw79\" (UniqueName: \"kubernetes.io/projected/74ee62a2-d057-4ddf-8e5f-619cd90645d0-kube-api-access-2hw79\") pod \"apiserver-7bbb656c7d-tt6kr\" (UID: \"74ee62a2-d057-4ddf-8e5f-619cd90645d0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970541 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae2bdaf0-ab3a-4822-b376-945d76181e8f-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-djc5v\" (UID: \"ae2bdaf0-ab3a-4822-b376-945d76181e8f\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-djc5v" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970561 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970582 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6b5f71c7-bb75-435e-ac44-3328aa8fe73d-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-dzz98\" (UID: \"6b5f71c7-bb75-435e-ac44-3328aa8fe73d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dzz98" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970603 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/72f33e56-9fe4-420b-aa31-8d49ed5c7584-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-2bsvx\" (UID: \"72f33e56-9fe4-420b-aa31-8d49ed5c7584\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970622 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe0bb0ba-8376-4437-9cec-fe95bbd77a9a-config\") pod \"console-operator-58897d9998-h2nqh\" (UID: \"fe0bb0ba-8376-4437-9cec-fe95bbd77a9a\") " pod="openshift-console-operator/console-operator-58897d9998-h2nqh" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970644 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2wpc6\" (UniqueName: \"kubernetes.io/projected/0a4f6fb7-314a-4896-bcba-c11f622a5e3f-kube-api-access-2wpc6\") pod \"openshift-config-operator-7777fb866f-ph6h8\" (UID: \"0a4f6fb7-314a-4896-bcba-c11f622a5e3f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-ph6h8" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970664 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/650d8b46-fa24-4b73-8f95-a0fbfe331fa6-client-ca\") pod \"route-controller-manager-6576b87f9c-b9jzj\" (UID: \"650d8b46-fa24-4b73-8f95-a0fbfe331fa6\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970682 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/629f02dd-753b-46d2-b808-812468f6c9c5-console-config\") pod \"console-f9d7485db-b646l\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970692 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-jzg9r"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970700 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/629f02dd-753b-46d2-b808-812468f6c9c5-oauth-serving-cert\") pod \"console-f9d7485db-b646l\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970718 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdknq\" (UniqueName: \"kubernetes.io/projected/6b5f71c7-bb75-435e-ac44-3328aa8fe73d-kube-api-access-pdknq\") pod \"cluster-image-registry-operator-dc59b4c8b-dzz98\" (UID: \"6b5f71c7-bb75-435e-ac44-3328aa8fe73d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dzz98" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970738 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e118c084-6256-42a4-ad24-f06222a1b3b4-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-j5zvd\" (UID: \"e118c084-6256-42a4-ad24-f06222a1b3b4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-j5zvd" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970757 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/05b57f72-3ec2-41f4-ba15-4bb73c15a1af-stats-auth\") pod \"router-default-5444994796-2g8pj\" (UID: \"05b57f72-3ec2-41f4-ba15-4bb73c15a1af\") " pod="openshift-ingress/router-default-5444994796-2g8pj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970775 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fe0bb0ba-8376-4437-9cec-fe95bbd77a9a-trusted-ca\") pod \"console-operator-58897d9998-h2nqh\" (UID: \"fe0bb0ba-8376-4437-9cec-fe95bbd77a9a\") " pod="openshift-console-operator/console-operator-58897d9998-h2nqh" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970793 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c59xw\" (UniqueName: \"kubernetes.io/projected/e118c084-6256-42a4-ad24-f06222a1b3b4-kube-api-access-c59xw\") pod \"authentication-operator-69f744f599-j5zvd\" (UID: \"e118c084-6256-42a4-ad24-f06222a1b3b4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-j5zvd" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970822 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fbsv9\" (UniqueName: \"kubernetes.io/projected/f568ca0b-0692-482b-a80b-7c6c4f8f80ba-kube-api-access-fbsv9\") pod \"migrator-59844c95c7-n9kmn\" (UID: \"f568ca0b-0692-482b-a80b-7c6c4f8f80ba\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-n9kmn" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970842 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970860 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-crxcs\" (UniqueName: \"kubernetes.io/projected/5c3f6b64-15bf-4147-be4e-414a2569cb58-kube-api-access-crxcs\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970879 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/74ee62a2-d057-4ddf-8e5f-619cd90645d0-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-tt6kr\" (UID: \"74ee62a2-d057-4ddf-8e5f-619cd90645d0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970900 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2a07c9a0-b842-4f33-83a4-d6a07eeed391-config\") pod \"etcd-operator-b45778765-cg8k9\" (UID: \"2a07c9a0-b842-4f33-83a4-d6a07eeed391\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cg8k9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970920 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970939 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e118c084-6256-42a4-ad24-f06222a1b3b4-config\") pod \"authentication-operator-69f744f599-j5zvd\" (UID: \"e118c084-6256-42a4-ad24-f06222a1b3b4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-j5zvd" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970958 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/05b57f72-3ec2-41f4-ba15-4bb73c15a1af-service-ca-bundle\") pod \"router-default-5444994796-2g8pj\" (UID: \"05b57f72-3ec2-41f4-ba15-4bb73c15a1af\") " pod="openshift-ingress/router-default-5444994796-2g8pj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970977 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6e9d92c3-4f51-481c-adcb-e48f3e671025-trusted-ca\") pod \"ingress-operator-5b745b69d9-xwq4h\" (UID: \"6e9d92c3-4f51-481c-adcb-e48f3e671025\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xwq4h" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.970995 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/005c682b-725d-471a-98ca-08f2760e6603-audit-dir\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971014 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6e9d92c3-4f51-481c-adcb-e48f3e671025-metrics-tls\") pod \"ingress-operator-5b745b69d9-xwq4h\" (UID: \"6e9d92c3-4f51-481c-adcb-e48f3e671025\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xwq4h" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971033 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/0a4f6fb7-314a-4896-bcba-c11f622a5e3f-available-featuregates\") pod \"openshift-config-operator-7777fb866f-ph6h8\" (UID: \"0a4f6fb7-314a-4896-bcba-c11f622a5e3f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-ph6h8" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971055 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hslq\" (UniqueName: \"kubernetes.io/projected/10e4194f-eb7d-4228-8cf8-4925e4a0ec4f-kube-api-access-6hslq\") pod \"openshift-controller-manager-operator-756b6f6bc6-96dfp\" (UID: \"10e4194f-eb7d-4228-8cf8-4925e4a0ec4f\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-96dfp" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971074 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/629f02dd-753b-46d2-b808-812468f6c9c5-console-oauth-config\") pod \"console-f9d7485db-b646l\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971095 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e118c084-6256-42a4-ad24-f06222a1b3b4-service-ca-bundle\") pod \"authentication-operator-69f744f599-j5zvd\" (UID: \"e118c084-6256-42a4-ad24-f06222a1b3b4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-j5zvd" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971122 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9m2p\" (UniqueName: \"kubernetes.io/projected/650d8b46-fa24-4b73-8f95-a0fbfe331fa6-kube-api-access-z9m2p\") pod \"route-controller-manager-6576b87f9c-b9jzj\" (UID: \"650d8b46-fa24-4b73-8f95-a0fbfe331fa6\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971141 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-224cq\" (UniqueName: \"kubernetes.io/projected/005c682b-725d-471a-98ca-08f2760e6603-kube-api-access-224cq\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971160 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-72r27\" (UniqueName: \"kubernetes.io/projected/0d7bc9ed-da92-4305-bda8-78966e7451db-kube-api-access-72r27\") pod \"dns-operator-744455d44c-bglcd\" (UID: \"0d7bc9ed-da92-4305-bda8-78966e7451db\") " pod="openshift-dns-operator/dns-operator-744455d44c-bglcd" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971179 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/6b5f71c7-bb75-435e-ac44-3328aa8fe73d-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-dzz98\" (UID: \"6b5f71c7-bb75-435e-ac44-3328aa8fe73d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dzz98" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971208 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b1e1a221-b849-45e1-ad2c-d0b3b3eeccb5-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ngl6b\" (UID: \"b1e1a221-b849-45e1-ad2c-d0b3b3eeccb5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ngl6b" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971228 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2hll\" (UniqueName: \"kubernetes.io/projected/72f33e56-9fe4-420b-aa31-8d49ed5c7584-kube-api-access-n2hll\") pod \"controller-manager-879f6c89f-2bsvx\" (UID: \"72f33e56-9fe4-420b-aa31-8d49ed5c7584\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971248 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5c3f6b64-15bf-4147-be4e-414a2569cb58-audit-dir\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971267 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971287 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f7pm4\" (UniqueName: \"kubernetes.io/projected/629f02dd-753b-46d2-b808-812468f6c9c5-kube-api-access-f7pm4\") pod \"console-f9d7485db-b646l\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971315 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a934d47f-f19d-4d29-aed8-20141e5bcf2b-images\") pod \"machine-api-operator-5694c8668f-zfnk9\" (UID: \"a934d47f-f19d-4d29-aed8-20141e5bcf2b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-zfnk9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971334 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/10e4194f-eb7d-4228-8cf8-4925e4a0ec4f-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-96dfp\" (UID: \"10e4194f-eb7d-4228-8cf8-4925e4a0ec4f\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-96dfp" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971356 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ae2bdaf0-ab3a-4822-b376-945d76181e8f-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-djc5v\" (UID: \"ae2bdaf0-ab3a-4822-b376-945d76181e8f\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-djc5v" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971374 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/005c682b-725d-471a-98ca-08f2760e6603-image-import-ca\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971393 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-57vw6\" (UniqueName: \"kubernetes.io/projected/ae2bdaf0-ab3a-4822-b376-945d76181e8f-kube-api-access-57vw6\") pod \"kube-storage-version-migrator-operator-b67b599dd-djc5v\" (UID: \"ae2bdaf0-ab3a-4822-b376-945d76181e8f\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-djc5v" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971414 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/629f02dd-753b-46d2-b808-812468f6c9c5-service-ca\") pod \"console-f9d7485db-b646l\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971437 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971473 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/74ee62a2-d057-4ddf-8e5f-619cd90645d0-audit-dir\") pod \"apiserver-7bbb656c7d-tt6kr\" (UID: \"74ee62a2-d057-4ddf-8e5f-619cd90645d0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971494 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/a934d47f-f19d-4d29-aed8-20141e5bcf2b-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-zfnk9\" (UID: \"a934d47f-f19d-4d29-aed8-20141e5bcf2b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-zfnk9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971513 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/005c682b-725d-471a-98ca-08f2760e6603-etcd-client\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971533 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/72f33e56-9fe4-420b-aa31-8d49ed5c7584-client-ca\") pod \"controller-manager-879f6c89f-2bsvx\" (UID: \"72f33e56-9fe4-420b-aa31-8d49ed5c7584\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971555 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/2a07c9a0-b842-4f33-83a4-d6a07eeed391-etcd-client\") pod \"etcd-operator-b45778765-cg8k9\" (UID: \"2a07c9a0-b842-4f33-83a4-d6a07eeed391\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cg8k9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971573 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6e9d92c3-4f51-481c-adcb-e48f3e671025-bound-sa-token\") pod \"ingress-operator-5b745b69d9-xwq4h\" (UID: \"6e9d92c3-4f51-481c-adcb-e48f3e671025\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xwq4h" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971598 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5c3f6b64-15bf-4147-be4e-414a2569cb58-audit-policies\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971618 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/629f02dd-753b-46d2-b808-812468f6c9c5-console-serving-cert\") pod \"console-f9d7485db-b646l\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971641 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/74ee62a2-d057-4ddf-8e5f-619cd90645d0-encryption-config\") pod \"apiserver-7bbb656c7d-tt6kr\" (UID: \"74ee62a2-d057-4ddf-8e5f-619cd90645d0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971659 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jk6km\" (UniqueName: \"kubernetes.io/projected/05b57f72-3ec2-41f4-ba15-4bb73c15a1af-kube-api-access-jk6km\") pod \"router-default-5444994796-2g8pj\" (UID: \"05b57f72-3ec2-41f4-ba15-4bb73c15a1af\") " pod="openshift-ingress/router-default-5444994796-2g8pj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971677 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fe0bb0ba-8376-4437-9cec-fe95bbd77a9a-serving-cert\") pod \"console-operator-58897d9998-h2nqh\" (UID: \"fe0bb0ba-8376-4437-9cec-fe95bbd77a9a\") " pod="openshift-console-operator/console-operator-58897d9998-h2nqh" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971696 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/74ee62a2-d057-4ddf-8e5f-619cd90645d0-serving-cert\") pod \"apiserver-7bbb656c7d-tt6kr\" (UID: \"74ee62a2-d057-4ddf-8e5f-619cd90645d0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971714 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/2a07c9a0-b842-4f33-83a4-d6a07eeed391-etcd-ca\") pod \"etcd-operator-b45778765-cg8k9\" (UID: \"2a07c9a0-b842-4f33-83a4-d6a07eeed391\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cg8k9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971730 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/2a07c9a0-b842-4f33-83a4-d6a07eeed391-etcd-service-ca\") pod \"etcd-operator-b45778765-cg8k9\" (UID: \"2a07c9a0-b842-4f33-83a4-d6a07eeed391\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cg8k9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971746 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/005c682b-725d-471a-98ca-08f2760e6603-audit\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971764 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/74ee62a2-d057-4ddf-8e5f-619cd90645d0-etcd-client\") pod \"apiserver-7bbb656c7d-tt6kr\" (UID: \"74ee62a2-d057-4ddf-8e5f-619cd90645d0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.971781 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1e1a221-b849-45e1-ad2c-d0b3b3eeccb5-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ngl6b\" (UID: \"b1e1a221-b849-45e1-ad2c-d0b3b3eeccb5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ngl6b" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.972492 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ngl6b"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.973158 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/74ee62a2-d057-4ddf-8e5f-619cd90645d0-audit-dir\") pod \"apiserver-7bbb656c7d-tt6kr\" (UID: \"74ee62a2-d057-4ddf-8e5f-619cd90645d0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.973685 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5c3f6b64-15bf-4147-be4e-414a2569cb58-audit-dir\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.974014 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2a07c9a0-b842-4f33-83a4-d6a07eeed391-config\") pod \"etcd-operator-b45778765-cg8k9\" (UID: \"2a07c9a0-b842-4f33-83a4-d6a07eeed391\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cg8k9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.974287 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/72f33e56-9fe4-420b-aa31-8d49ed5c7584-client-ca\") pod \"controller-manager-879f6c89f-2bsvx\" (UID: \"72f33e56-9fe4-420b-aa31-8d49ed5c7584\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.974389 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a934d47f-f19d-4d29-aed8-20141e5bcf2b-config\") pod \"machine-api-operator-5694c8668f-zfnk9\" (UID: \"a934d47f-f19d-4d29-aed8-20141e5bcf2b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-zfnk9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.974634 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.975207 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.975384 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e118c084-6256-42a4-ad24-f06222a1b3b4-service-ca-bundle\") pod \"authentication-operator-69f744f599-j5zvd\" (UID: \"e118c084-6256-42a4-ad24-f06222a1b3b4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-j5zvd" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.975759 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.975812 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rgjfm"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.975831 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-b646l"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.975873 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72f33e56-9fe4-420b-aa31-8d49ed5c7584-serving-cert\") pod \"controller-manager-879f6c89f-2bsvx\" (UID: \"72f33e56-9fe4-420b-aa31-8d49ed5c7584\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.976308 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/2a07c9a0-b842-4f33-83a4-d6a07eeed391-etcd-service-ca\") pod \"etcd-operator-b45778765-cg8k9\" (UID: \"2a07c9a0-b842-4f33-83a4-d6a07eeed391\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cg8k9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.977348 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2a07c9a0-b842-4f33-83a4-d6a07eeed391-serving-cert\") pod \"etcd-operator-b45778765-cg8k9\" (UID: \"2a07c9a0-b842-4f33-83a4-d6a07eeed391\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cg8k9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.977754 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/0a4f6fb7-314a-4896-bcba-c11f622a5e3f-available-featuregates\") pod \"openshift-config-operator-7777fb866f-ph6h8\" (UID: \"0a4f6fb7-314a-4896-bcba-c11f622a5e3f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-ph6h8" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.978261 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/72f33e56-9fe4-420b-aa31-8d49ed5c7584-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-2bsvx\" (UID: \"72f33e56-9fe4-420b-aa31-8d49ed5c7584\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.978260 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a934d47f-f19d-4d29-aed8-20141e5bcf2b-images\") pod \"machine-api-operator-5694c8668f-zfnk9\" (UID: \"a934d47f-f19d-4d29-aed8-20141e5bcf2b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-zfnk9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.978389 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/74ee62a2-d057-4ddf-8e5f-619cd90645d0-serving-cert\") pod \"apiserver-7bbb656c7d-tt6kr\" (UID: \"74ee62a2-d057-4ddf-8e5f-619cd90645d0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.978664 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/2a07c9a0-b842-4f33-83a4-d6a07eeed391-etcd-ca\") pod \"etcd-operator-b45778765-cg8k9\" (UID: \"2a07c9a0-b842-4f33-83a4-d6a07eeed391\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cg8k9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.978799 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-fkkng"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.978895 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/74ee62a2-d057-4ddf-8e5f-619cd90645d0-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-tt6kr\" (UID: \"74ee62a2-d057-4ddf-8e5f-619cd90645d0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.979022 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/74ee62a2-d057-4ddf-8e5f-619cd90645d0-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-tt6kr\" (UID: \"74ee62a2-d057-4ddf-8e5f-619cd90645d0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.979560 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/74ee62a2-d057-4ddf-8e5f-619cd90645d0-audit-policies\") pod \"apiserver-7bbb656c7d-tt6kr\" (UID: \"74ee62a2-d057-4ddf-8e5f-619cd90645d0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.979624 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-fkkng" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.980902 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/2a07c9a0-b842-4f33-83a4-d6a07eeed391-etcd-client\") pod \"etcd-operator-b45778765-cg8k9\" (UID: \"2a07c9a0-b842-4f33-83a4-d6a07eeed391\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cg8k9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.981434 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.982582 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5c3f6b64-15bf-4147-be4e-414a2569cb58-audit-policies\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.982641 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-svd5r"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.982904 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.983554 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.984146 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-rzj2p"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.984520 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/74ee62a2-d057-4ddf-8e5f-619cd90645d0-encryption-config\") pod \"apiserver-7bbb656c7d-tt6kr\" (UID: \"74ee62a2-d057-4ddf-8e5f-619cd90645d0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.984946 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/a934d47f-f19d-4d29-aed8-20141e5bcf2b-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-zfnk9\" (UID: \"a934d47f-f19d-4d29-aed8-20141e5bcf2b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-zfnk9" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.984958 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72f33e56-9fe4-420b-aa31-8d49ed5c7584-config\") pod \"controller-manager-879f6c89f-2bsvx\" (UID: \"72f33e56-9fe4-420b-aa31-8d49ed5c7584\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.985136 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e118c084-6256-42a4-ad24-f06222a1b3b4-config\") pod \"authentication-operator-69f744f599-j5zvd\" (UID: \"e118c084-6256-42a4-ad24-f06222a1b3b4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-j5zvd" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.985155 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-wdqwt"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.985342 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.985556 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e118c084-6256-42a4-ad24-f06222a1b3b4-serving-cert\") pod \"authentication-operator-69f744f599-j5zvd\" (UID: \"e118c084-6256-42a4-ad24-f06222a1b3b4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-j5zvd" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.986046 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e118c084-6256-42a4-ad24-f06222a1b3b4-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-j5zvd\" (UID: \"e118c084-6256-42a4-ad24-f06222a1b3b4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-j5zvd" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.986260 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dzz98"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.986500 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0d7bc9ed-da92-4305-bda8-78966e7451db-metrics-tls\") pod \"dns-operator-744455d44c-bglcd\" (UID: \"0d7bc9ed-da92-4305-bda8-78966e7451db\") " pod="openshift-dns-operator/dns-operator-744455d44c-bglcd" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.986741 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.987181 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.989020 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5wl59"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.989538 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.991692 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.995752 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-fkkng"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.995812 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-9rdbb"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.995825 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-xwq4h"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.996770 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0a4f6fb7-314a-4896-bcba-c11f622a5e3f-serving-cert\") pod \"openshift-config-operator-7777fb866f-ph6h8\" (UID: \"0a4f6fb7-314a-4896-bcba-c11f622a5e3f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-ph6h8" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.997175 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/74ee62a2-d057-4ddf-8e5f-619cd90645d0-etcd-client\") pod \"apiserver-7bbb656c7d-tt6kr\" (UID: \"74ee62a2-d057-4ddf-8e5f-619cd90645d0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.997712 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ngz59"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.998771 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-rw2cv"] Nov 23 14:44:04 crc kubenswrapper[5050]: I1123 14:44:04.999873 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-p4p66"] Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.000998 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.001048 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tcs6j"] Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.002415 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fcc5l"] Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.005367 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qjw5q"] Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.007048 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wr6q4"] Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.008256 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.011531 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398470-mm7rl"] Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.013746 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-2kj76"] Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.022269 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-tq999"] Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.023347 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-tq999" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.023664 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-jz58r"] Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.024723 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-jz58r"] Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.024789 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-jz58r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.027935 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.048958 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.067776 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.073482 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/4c1df8cb-7a6f-46da-9d4e-a1b88f85997e-proxy-tls\") pod \"machine-config-operator-74547568cd-jzg9r\" (UID: \"4c1df8cb-7a6f-46da-9d4e-a1b88f85997e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jzg9r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.073540 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/005c682b-725d-471a-98ca-08f2760e6603-image-import-ca\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.073579 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a9150bbc-0207-48f1-a027-d33d285b085b-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-ngz59\" (UID: \"a9150bbc-0207-48f1-a027-d33d285b085b\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ngz59" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.073607 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/629f02dd-753b-46d2-b808-812468f6c9c5-service-ca\") pod \"console-f9d7485db-b646l\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.073636 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4c1df8cb-7a6f-46da-9d4e-a1b88f85997e-images\") pod \"machine-config-operator-74547568cd-jzg9r\" (UID: \"4c1df8cb-7a6f-46da-9d4e-a1b88f85997e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jzg9r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.073661 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fe0bb0ba-8376-4437-9cec-fe95bbd77a9a-serving-cert\") pod \"console-operator-58897d9998-h2nqh\" (UID: \"fe0bb0ba-8376-4437-9cec-fe95bbd77a9a\") " pod="openshift-console-operator/console-operator-58897d9998-h2nqh" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.073686 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/005c682b-725d-471a-98ca-08f2760e6603-audit\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.073723 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b1e1a221-b849-45e1-ad2c-d0b3b3eeccb5-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ngl6b\" (UID: \"b1e1a221-b849-45e1-ad2c-d0b3b3eeccb5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ngl6b" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.073750 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55pb2\" (UniqueName: \"kubernetes.io/projected/6e9d92c3-4f51-481c-adcb-e48f3e671025-kube-api-access-55pb2\") pod \"ingress-operator-5b745b69d9-xwq4h\" (UID: \"6e9d92c3-4f51-481c-adcb-e48f3e671025\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xwq4h" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.073774 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/005c682b-725d-471a-98ca-08f2760e6603-node-pullsecrets\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.073800 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ab27cc8e-1d04-43dd-bf34-0cfdb48daba7-config-volume\") pod \"collect-profiles-29398470-mm7rl\" (UID: \"ab27cc8e-1d04-43dd-bf34-0cfdb48daba7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398470-mm7rl" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.073826 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10e4194f-eb7d-4228-8cf8-4925e4a0ec4f-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-96dfp\" (UID: \"10e4194f-eb7d-4228-8cf8-4925e4a0ec4f\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-96dfp" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.073862 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhb5x\" (UniqueName: \"kubernetes.io/projected/4c1df8cb-7a6f-46da-9d4e-a1b88f85997e-kube-api-access-nhb5x\") pod \"machine-config-operator-74547568cd-jzg9r\" (UID: \"4c1df8cb-7a6f-46da-9d4e-a1b88f85997e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jzg9r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.073887 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/005c682b-725d-471a-98ca-08f2760e6603-config\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.073932 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/650d8b46-fa24-4b73-8f95-a0fbfe331fa6-config\") pod \"route-controller-manager-6576b87f9c-b9jzj\" (UID: \"650d8b46-fa24-4b73-8f95-a0fbfe331fa6\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.073955 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/005c682b-725d-471a-98ca-08f2760e6603-serving-cert\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074002 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bpl2x\" (UniqueName: \"kubernetes.io/projected/fe0bb0ba-8376-4437-9cec-fe95bbd77a9a-kube-api-access-bpl2x\") pod \"console-operator-58897d9998-h2nqh\" (UID: \"fe0bb0ba-8376-4437-9cec-fe95bbd77a9a\") " pod="openshift-console-operator/console-operator-58897d9998-h2nqh" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074029 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/05b57f72-3ec2-41f4-ba15-4bb73c15a1af-metrics-certs\") pod \"router-default-5444994796-2g8pj\" (UID: \"05b57f72-3ec2-41f4-ba15-4bb73c15a1af\") " pod="openshift-ingress/router-default-5444994796-2g8pj" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074052 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ab27cc8e-1d04-43dd-bf34-0cfdb48daba7-secret-volume\") pod \"collect-profiles-29398470-mm7rl\" (UID: \"ab27cc8e-1d04-43dd-bf34-0cfdb48daba7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398470-mm7rl" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074087 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/629f02dd-753b-46d2-b808-812468f6c9c5-console-config\") pod \"console-f9d7485db-b646l\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074115 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fbsv9\" (UniqueName: \"kubernetes.io/projected/f568ca0b-0692-482b-a80b-7c6c4f8f80ba-kube-api-access-fbsv9\") pod \"migrator-59844c95c7-n9kmn\" (UID: \"f568ca0b-0692-482b-a80b-7c6c4f8f80ba\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-n9kmn" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074142 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtfpc\" (UniqueName: \"kubernetes.io/projected/1af9e06d-2732-4be4-bff7-eee682c1d69d-kube-api-access-dtfpc\") pod \"dns-default-wdqwt\" (UID: \"1af9e06d-2732-4be4-bff7-eee682c1d69d\") " pod="openshift-dns/dns-default-wdqwt" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074170 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/05b57f72-3ec2-41f4-ba15-4bb73c15a1af-service-ca-bundle\") pod \"router-default-5444994796-2g8pj\" (UID: \"05b57f72-3ec2-41f4-ba15-4bb73c15a1af\") " pod="openshift-ingress/router-default-5444994796-2g8pj" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074203 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6e9d92c3-4f51-481c-adcb-e48f3e671025-trusted-ca\") pod \"ingress-operator-5b745b69d9-xwq4h\" (UID: \"6e9d92c3-4f51-481c-adcb-e48f3e671025\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xwq4h" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074235 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/005c682b-725d-471a-98ca-08f2760e6603-audit-dir\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074263 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1af9e06d-2732-4be4-bff7-eee682c1d69d-config-volume\") pod \"dns-default-wdqwt\" (UID: \"1af9e06d-2732-4be4-bff7-eee682c1d69d\") " pod="openshift-dns/dns-default-wdqwt" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074292 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9m2p\" (UniqueName: \"kubernetes.io/projected/650d8b46-fa24-4b73-8f95-a0fbfe331fa6-kube-api-access-z9m2p\") pod \"route-controller-manager-6576b87f9c-b9jzj\" (UID: \"650d8b46-fa24-4b73-8f95-a0fbfe331fa6\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074317 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-224cq\" (UniqueName: \"kubernetes.io/projected/005c682b-725d-471a-98ca-08f2760e6603-kube-api-access-224cq\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074342 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a9150bbc-0207-48f1-a027-d33d285b085b-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-ngz59\" (UID: \"a9150bbc-0207-48f1-a027-d33d285b085b\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ngz59" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074377 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/6b5f71c7-bb75-435e-ac44-3328aa8fe73d-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-dzz98\" (UID: \"6b5f71c7-bb75-435e-ac44-3328aa8fe73d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dzz98" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074404 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b1e1a221-b849-45e1-ad2c-d0b3b3eeccb5-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ngl6b\" (UID: \"b1e1a221-b849-45e1-ad2c-d0b3b3eeccb5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ngl6b" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074431 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f7pm4\" (UniqueName: \"kubernetes.io/projected/629f02dd-753b-46d2-b808-812468f6c9c5-kube-api-access-f7pm4\") pod \"console-f9d7485db-b646l\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074491 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/10e4194f-eb7d-4228-8cf8-4925e4a0ec4f-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-96dfp\" (UID: \"10e4194f-eb7d-4228-8cf8-4925e4a0ec4f\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-96dfp" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074520 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ae2bdaf0-ab3a-4822-b376-945d76181e8f-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-djc5v\" (UID: \"ae2bdaf0-ab3a-4822-b376-945d76181e8f\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-djc5v" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074551 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-57vw6\" (UniqueName: \"kubernetes.io/projected/ae2bdaf0-ab3a-4822-b376-945d76181e8f-kube-api-access-57vw6\") pod \"kube-storage-version-migrator-operator-b67b599dd-djc5v\" (UID: \"ae2bdaf0-ab3a-4822-b376-945d76181e8f\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-djc5v" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074584 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/005c682b-725d-471a-98ca-08f2760e6603-etcd-client\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074615 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6e9d92c3-4f51-481c-adcb-e48f3e671025-bound-sa-token\") pod \"ingress-operator-5b745b69d9-xwq4h\" (UID: \"6e9d92c3-4f51-481c-adcb-e48f3e671025\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xwq4h" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074641 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9150bbc-0207-48f1-a027-d33d285b085b-config\") pod \"kube-apiserver-operator-766d6c64bb-ngz59\" (UID: \"a9150bbc-0207-48f1-a027-d33d285b085b\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ngz59" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074672 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/629f02dd-753b-46d2-b808-812468f6c9c5-console-serving-cert\") pod \"console-f9d7485db-b646l\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074699 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jk6km\" (UniqueName: \"kubernetes.io/projected/05b57f72-3ec2-41f4-ba15-4bb73c15a1af-kube-api-access-jk6km\") pod \"router-default-5444994796-2g8pj\" (UID: \"05b57f72-3ec2-41f4-ba15-4bb73c15a1af\") " pod="openshift-ingress/router-default-5444994796-2g8pj" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074726 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1e1a221-b849-45e1-ad2c-d0b3b3eeccb5-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ngl6b\" (UID: \"b1e1a221-b849-45e1-ad2c-d0b3b3eeccb5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ngl6b" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074754 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4c1df8cb-7a6f-46da-9d4e-a1b88f85997e-auth-proxy-config\") pod \"machine-config-operator-74547568cd-jzg9r\" (UID: \"4c1df8cb-7a6f-46da-9d4e-a1b88f85997e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jzg9r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074760 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/005c682b-725d-471a-98ca-08f2760e6603-node-pullsecrets\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074783 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/05b57f72-3ec2-41f4-ba15-4bb73c15a1af-default-certificate\") pod \"router-default-5444994796-2g8pj\" (UID: \"05b57f72-3ec2-41f4-ba15-4bb73c15a1af\") " pod="openshift-ingress/router-default-5444994796-2g8pj" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074893 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/005c682b-725d-471a-98ca-08f2760e6603-etcd-serving-ca\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.074968 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6b5f71c7-bb75-435e-ac44-3328aa8fe73d-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-dzz98\" (UID: \"6b5f71c7-bb75-435e-ac44-3328aa8fe73d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dzz98" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.075015 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/629f02dd-753b-46d2-b808-812468f6c9c5-trusted-ca-bundle\") pod \"console-f9d7485db-b646l\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.075052 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/005c682b-725d-471a-98ca-08f2760e6603-encryption-config\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.075121 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1af9e06d-2732-4be4-bff7-eee682c1d69d-metrics-tls\") pod \"dns-default-wdqwt\" (UID: \"1af9e06d-2732-4be4-bff7-eee682c1d69d\") " pod="openshift-dns/dns-default-wdqwt" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.075213 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/650d8b46-fa24-4b73-8f95-a0fbfe331fa6-serving-cert\") pod \"route-controller-manager-6576b87f9c-b9jzj\" (UID: \"650d8b46-fa24-4b73-8f95-a0fbfe331fa6\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.075249 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/005c682b-725d-471a-98ca-08f2760e6603-trusted-ca-bundle\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.075332 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae2bdaf0-ab3a-4822-b376-945d76181e8f-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-djc5v\" (UID: \"ae2bdaf0-ab3a-4822-b376-945d76181e8f\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-djc5v" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.075372 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb9622cf-2c2f-4ed1-8fed-da0fc67be49a-serving-cert\") pod \"service-ca-operator-777779d784-9rdbb\" (UID: \"eb9622cf-2c2f-4ed1-8fed-da0fc67be49a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9rdbb" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.075422 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6b5f71c7-bb75-435e-ac44-3328aa8fe73d-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-dzz98\" (UID: \"6b5f71c7-bb75-435e-ac44-3328aa8fe73d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dzz98" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.075484 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe0bb0ba-8376-4437-9cec-fe95bbd77a9a-config\") pod \"console-operator-58897d9998-h2nqh\" (UID: \"fe0bb0ba-8376-4437-9cec-fe95bbd77a9a\") " pod="openshift-console-operator/console-operator-58897d9998-h2nqh" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.075519 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb9622cf-2c2f-4ed1-8fed-da0fc67be49a-config\") pod \"service-ca-operator-777779d784-9rdbb\" (UID: \"eb9622cf-2c2f-4ed1-8fed-da0fc67be49a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9rdbb" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.075561 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/650d8b46-fa24-4b73-8f95-a0fbfe331fa6-client-ca\") pod \"route-controller-manager-6576b87f9c-b9jzj\" (UID: \"650d8b46-fa24-4b73-8f95-a0fbfe331fa6\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.075567 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/005c682b-725d-471a-98ca-08f2760e6603-config\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.075593 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/629f02dd-753b-46d2-b808-812468f6c9c5-oauth-serving-cert\") pod \"console-f9d7485db-b646l\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.075654 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdknq\" (UniqueName: \"kubernetes.io/projected/6b5f71c7-bb75-435e-ac44-3328aa8fe73d-kube-api-access-pdknq\") pod \"cluster-image-registry-operator-dc59b4c8b-dzz98\" (UID: \"6b5f71c7-bb75-435e-ac44-3328aa8fe73d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dzz98" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.075693 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/05b57f72-3ec2-41f4-ba15-4bb73c15a1af-stats-auth\") pod \"router-default-5444994796-2g8pj\" (UID: \"05b57f72-3ec2-41f4-ba15-4bb73c15a1af\") " pod="openshift-ingress/router-default-5444994796-2g8pj" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.075724 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fe0bb0ba-8376-4437-9cec-fe95bbd77a9a-trusted-ca\") pod \"console-operator-58897d9998-h2nqh\" (UID: \"fe0bb0ba-8376-4437-9cec-fe95bbd77a9a\") " pod="openshift-console-operator/console-operator-58897d9998-h2nqh" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.075790 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hslq\" (UniqueName: \"kubernetes.io/projected/10e4194f-eb7d-4228-8cf8-4925e4a0ec4f-kube-api-access-6hslq\") pod \"openshift-controller-manager-operator-756b6f6bc6-96dfp\" (UID: \"10e4194f-eb7d-4228-8cf8-4925e4a0ec4f\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-96dfp" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.075825 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6e9d92c3-4f51-481c-adcb-e48f3e671025-metrics-tls\") pod \"ingress-operator-5b745b69d9-xwq4h\" (UID: \"6e9d92c3-4f51-481c-adcb-e48f3e671025\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xwq4h" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.075860 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9f57\" (UniqueName: \"kubernetes.io/projected/ab27cc8e-1d04-43dd-bf34-0cfdb48daba7-kube-api-access-l9f57\") pod \"collect-profiles-29398470-mm7rl\" (UID: \"ab27cc8e-1d04-43dd-bf34-0cfdb48daba7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398470-mm7rl" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.075893 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqtth\" (UniqueName: \"kubernetes.io/projected/eb9622cf-2c2f-4ed1-8fed-da0fc67be49a-kube-api-access-nqtth\") pod \"service-ca-operator-777779d784-9rdbb\" (UID: \"eb9622cf-2c2f-4ed1-8fed-da0fc67be49a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9rdbb" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.075947 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/629f02dd-753b-46d2-b808-812468f6c9c5-console-oauth-config\") pod \"console-f9d7485db-b646l\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.076542 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/629f02dd-753b-46d2-b808-812468f6c9c5-service-ca\") pod \"console-f9d7485db-b646l\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.076583 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/650d8b46-fa24-4b73-8f95-a0fbfe331fa6-config\") pod \"route-controller-manager-6576b87f9c-b9jzj\" (UID: \"650d8b46-fa24-4b73-8f95-a0fbfe331fa6\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.077571 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/629f02dd-753b-46d2-b808-812468f6c9c5-console-config\") pod \"console-f9d7485db-b646l\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.080009 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/629f02dd-753b-46d2-b808-812468f6c9c5-oauth-serving-cert\") pod \"console-f9d7485db-b646l\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.080009 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1e1a221-b849-45e1-ad2c-d0b3b3eeccb5-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ngl6b\" (UID: \"b1e1a221-b849-45e1-ad2c-d0b3b3eeccb5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ngl6b" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.080153 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/005c682b-725d-471a-98ca-08f2760e6603-audit\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.080193 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fe0bb0ba-8376-4437-9cec-fe95bbd77a9a-serving-cert\") pod \"console-operator-58897d9998-h2nqh\" (UID: \"fe0bb0ba-8376-4437-9cec-fe95bbd77a9a\") " pod="openshift-console-operator/console-operator-58897d9998-h2nqh" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.081035 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/6b5f71c7-bb75-435e-ac44-3328aa8fe73d-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-dzz98\" (UID: \"6b5f71c7-bb75-435e-ac44-3328aa8fe73d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dzz98" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.080215 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/005c682b-725d-471a-98ca-08f2760e6603-audit-dir\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.081319 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe0bb0ba-8376-4437-9cec-fe95bbd77a9a-config\") pod \"console-operator-58897d9998-h2nqh\" (UID: \"fe0bb0ba-8376-4437-9cec-fe95bbd77a9a\") " pod="openshift-console-operator/console-operator-58897d9998-h2nqh" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.081693 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/005c682b-725d-471a-98ca-08f2760e6603-trusted-ca-bundle\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.081786 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/005c682b-725d-471a-98ca-08f2760e6603-etcd-serving-ca\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.081843 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10e4194f-eb7d-4228-8cf8-4925e4a0ec4f-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-96dfp\" (UID: \"10e4194f-eb7d-4228-8cf8-4925e4a0ec4f\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-96dfp" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.082377 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/650d8b46-fa24-4b73-8f95-a0fbfe331fa6-client-ca\") pod \"route-controller-manager-6576b87f9c-b9jzj\" (UID: \"650d8b46-fa24-4b73-8f95-a0fbfe331fa6\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.082772 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/629f02dd-753b-46d2-b808-812468f6c9c5-trusted-ca-bundle\") pod \"console-f9d7485db-b646l\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.082968 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fe0bb0ba-8376-4437-9cec-fe95bbd77a9a-trusted-ca\") pod \"console-operator-58897d9998-h2nqh\" (UID: \"fe0bb0ba-8376-4437-9cec-fe95bbd77a9a\") " pod="openshift-console-operator/console-operator-58897d9998-h2nqh" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.082962 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/005c682b-725d-471a-98ca-08f2760e6603-serving-cert\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.083142 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/10e4194f-eb7d-4228-8cf8-4925e4a0ec4f-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-96dfp\" (UID: \"10e4194f-eb7d-4228-8cf8-4925e4a0ec4f\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-96dfp" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.083508 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/629f02dd-753b-46d2-b808-812468f6c9c5-console-oauth-config\") pod \"console-f9d7485db-b646l\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.084286 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6b5f71c7-bb75-435e-ac44-3328aa8fe73d-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-dzz98\" (UID: \"6b5f71c7-bb75-435e-ac44-3328aa8fe73d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dzz98" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.085146 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/650d8b46-fa24-4b73-8f95-a0fbfe331fa6-serving-cert\") pod \"route-controller-manager-6576b87f9c-b9jzj\" (UID: \"650d8b46-fa24-4b73-8f95-a0fbfe331fa6\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.085585 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b1e1a221-b849-45e1-ad2c-d0b3b3eeccb5-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ngl6b\" (UID: \"b1e1a221-b849-45e1-ad2c-d0b3b3eeccb5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ngl6b" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.086597 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/629f02dd-753b-46d2-b808-812468f6c9c5-console-serving-cert\") pod \"console-f9d7485db-b646l\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.087108 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/005c682b-725d-471a-98ca-08f2760e6603-encryption-config\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.087802 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.088732 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/005c682b-725d-471a-98ca-08f2760e6603-etcd-client\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.097739 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/005c682b-725d-471a-98ca-08f2760e6603-image-import-ca\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.142037 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8gnx\" (UniqueName: \"kubernetes.io/projected/1cd7d6f0-caf1-4640-87b1-95d6faf69aa4-kube-api-access-j8gnx\") pod \"machine-approver-56656f9798-hlskc\" (UID: \"1cd7d6f0-caf1-4640-87b1-95d6faf69aa4\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hlskc" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.163822 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvkg6\" (UniqueName: \"kubernetes.io/projected/faf517b0-a6d3-44ed-bdf9-b7430d956916-kube-api-access-vvkg6\") pod \"openshift-apiserver-operator-796bbdcf4f-46rrm\" (UID: \"faf517b0-a6d3-44ed-bdf9-b7430d956916\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-46rrm" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.167740 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.176709 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ab27cc8e-1d04-43dd-bf34-0cfdb48daba7-secret-volume\") pod \"collect-profiles-29398470-mm7rl\" (UID: \"ab27cc8e-1d04-43dd-bf34-0cfdb48daba7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398470-mm7rl" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.176772 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtfpc\" (UniqueName: \"kubernetes.io/projected/1af9e06d-2732-4be4-bff7-eee682c1d69d-kube-api-access-dtfpc\") pod \"dns-default-wdqwt\" (UID: \"1af9e06d-2732-4be4-bff7-eee682c1d69d\") " pod="openshift-dns/dns-default-wdqwt" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.176811 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1af9e06d-2732-4be4-bff7-eee682c1d69d-config-volume\") pod \"dns-default-wdqwt\" (UID: \"1af9e06d-2732-4be4-bff7-eee682c1d69d\") " pod="openshift-dns/dns-default-wdqwt" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.176842 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a9150bbc-0207-48f1-a027-d33d285b085b-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-ngz59\" (UID: \"a9150bbc-0207-48f1-a027-d33d285b085b\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ngz59" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.176914 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9150bbc-0207-48f1-a027-d33d285b085b-config\") pod \"kube-apiserver-operator-766d6c64bb-ngz59\" (UID: \"a9150bbc-0207-48f1-a027-d33d285b085b\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ngz59" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.176942 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4c1df8cb-7a6f-46da-9d4e-a1b88f85997e-auth-proxy-config\") pod \"machine-config-operator-74547568cd-jzg9r\" (UID: \"4c1df8cb-7a6f-46da-9d4e-a1b88f85997e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jzg9r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.176981 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1af9e06d-2732-4be4-bff7-eee682c1d69d-metrics-tls\") pod \"dns-default-wdqwt\" (UID: \"1af9e06d-2732-4be4-bff7-eee682c1d69d\") " pod="openshift-dns/dns-default-wdqwt" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.177028 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb9622cf-2c2f-4ed1-8fed-da0fc67be49a-serving-cert\") pod \"service-ca-operator-777779d784-9rdbb\" (UID: \"eb9622cf-2c2f-4ed1-8fed-da0fc67be49a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9rdbb" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.177055 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb9622cf-2c2f-4ed1-8fed-da0fc67be49a-config\") pod \"service-ca-operator-777779d784-9rdbb\" (UID: \"eb9622cf-2c2f-4ed1-8fed-da0fc67be49a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9rdbb" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.177119 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9f57\" (UniqueName: \"kubernetes.io/projected/ab27cc8e-1d04-43dd-bf34-0cfdb48daba7-kube-api-access-l9f57\") pod \"collect-profiles-29398470-mm7rl\" (UID: \"ab27cc8e-1d04-43dd-bf34-0cfdb48daba7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398470-mm7rl" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.177142 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqtth\" (UniqueName: \"kubernetes.io/projected/eb9622cf-2c2f-4ed1-8fed-da0fc67be49a-kube-api-access-nqtth\") pod \"service-ca-operator-777779d784-9rdbb\" (UID: \"eb9622cf-2c2f-4ed1-8fed-da0fc67be49a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9rdbb" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.177183 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/4c1df8cb-7a6f-46da-9d4e-a1b88f85997e-proxy-tls\") pod \"machine-config-operator-74547568cd-jzg9r\" (UID: \"4c1df8cb-7a6f-46da-9d4e-a1b88f85997e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jzg9r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.177201 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a9150bbc-0207-48f1-a027-d33d285b085b-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-ngz59\" (UID: \"a9150bbc-0207-48f1-a027-d33d285b085b\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ngz59" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.177227 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4c1df8cb-7a6f-46da-9d4e-a1b88f85997e-images\") pod \"machine-config-operator-74547568cd-jzg9r\" (UID: \"4c1df8cb-7a6f-46da-9d4e-a1b88f85997e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jzg9r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.177257 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ab27cc8e-1d04-43dd-bf34-0cfdb48daba7-config-volume\") pod \"collect-profiles-29398470-mm7rl\" (UID: \"ab27cc8e-1d04-43dd-bf34-0cfdb48daba7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398470-mm7rl" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.177283 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhb5x\" (UniqueName: \"kubernetes.io/projected/4c1df8cb-7a6f-46da-9d4e-a1b88f85997e-kube-api-access-nhb5x\") pod \"machine-config-operator-74547568cd-jzg9r\" (UID: \"4c1df8cb-7a6f-46da-9d4e-a1b88f85997e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jzg9r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.179346 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4c1df8cb-7a6f-46da-9d4e-a1b88f85997e-auth-proxy-config\") pod \"machine-config-operator-74547568cd-jzg9r\" (UID: \"4c1df8cb-7a6f-46da-9d4e-a1b88f85997e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jzg9r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.188054 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.208291 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.226910 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.231354 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae2bdaf0-ab3a-4822-b376-945d76181e8f-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-djc5v\" (UID: \"ae2bdaf0-ab3a-4822-b376-945d76181e8f\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-djc5v" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.247353 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.252376 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ae2bdaf0-ab3a-4822-b376-945d76181e8f-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-djc5v\" (UID: \"ae2bdaf0-ab3a-4822-b376-945d76181e8f\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-djc5v" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.266993 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.286920 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.292860 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6e9d92c3-4f51-481c-adcb-e48f3e671025-metrics-tls\") pod \"ingress-operator-5b745b69d9-xwq4h\" (UID: \"6e9d92c3-4f51-481c-adcb-e48f3e671025\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xwq4h" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.301839 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hlskc" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.308043 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.318957 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-46rrm" Nov 23 14:44:05 crc kubenswrapper[5050]: W1123 14:44:05.330706 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1cd7d6f0_caf1_4640_87b1_95d6faf69aa4.slice/crio-979093e54b79c273c8d283124a4432db7ab803337a6acacf0952a6573d1ed929 WatchSource:0}: Error finding container 979093e54b79c273c8d283124a4432db7ab803337a6acacf0952a6573d1ed929: Status 404 returned error can't find the container with id 979093e54b79c273c8d283124a4432db7ab803337a6acacf0952a6573d1ed929 Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.343971 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.347657 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.349623 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6e9d92c3-4f51-481c-adcb-e48f3e671025-trusted-ca\") pod \"ingress-operator-5b745b69d9-xwq4h\" (UID: \"6e9d92c3-4f51-481c-adcb-e48f3e671025\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xwq4h" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.367244 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.386968 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.392317 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hlskc" event={"ID":"1cd7d6f0-caf1-4640-87b1-95d6faf69aa4","Type":"ContainerStarted","Data":"979093e54b79c273c8d283124a4432db7ab803337a6acacf0952a6573d1ed929"} Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.407674 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.427806 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.450533 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.468955 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.485572 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/05b57f72-3ec2-41f4-ba15-4bb73c15a1af-default-certificate\") pod \"router-default-5444994796-2g8pj\" (UID: \"05b57f72-3ec2-41f4-ba15-4bb73c15a1af\") " pod="openshift-ingress/router-default-5444994796-2g8pj" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.488210 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.504108 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/05b57f72-3ec2-41f4-ba15-4bb73c15a1af-stats-auth\") pod \"router-default-5444994796-2g8pj\" (UID: \"05b57f72-3ec2-41f4-ba15-4bb73c15a1af\") " pod="openshift-ingress/router-default-5444994796-2g8pj" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.508708 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.555994 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.560120 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.571051 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.572470 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/05b57f72-3ec2-41f4-ba15-4bb73c15a1af-metrics-certs\") pod \"router-default-5444994796-2g8pj\" (UID: \"05b57f72-3ec2-41f4-ba15-4bb73c15a1af\") " pod="openshift-ingress/router-default-5444994796-2g8pj" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.572601 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/05b57f72-3ec2-41f4-ba15-4bb73c15a1af-service-ca-bundle\") pod \"router-default-5444994796-2g8pj\" (UID: \"05b57f72-3ec2-41f4-ba15-4bb73c15a1af\") " pod="openshift-ingress/router-default-5444994796-2g8pj" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.587125 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.609127 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.628000 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.633232 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-46rrm"] Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.647671 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 23 14:44:05 crc kubenswrapper[5050]: W1123 14:44:05.653022 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfaf517b0_a6d3_44ed_bdf9_b7430d956916.slice/crio-2741ff62ad71505f473b13e63b950824802968cdb2a36b9b99ac8f0ff3ada254 WatchSource:0}: Error finding container 2741ff62ad71505f473b13e63b950824802968cdb2a36b9b99ac8f0ff3ada254: Status 404 returned error can't find the container with id 2741ff62ad71505f473b13e63b950824802968cdb2a36b9b99ac8f0ff3ada254 Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.668267 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.689326 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.707920 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.728072 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.747522 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.767819 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.787848 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.807563 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.812771 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a9150bbc-0207-48f1-a027-d33d285b085b-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-ngz59\" (UID: \"a9150bbc-0207-48f1-a027-d33d285b085b\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ngz59" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.828360 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.848714 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.859337 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9150bbc-0207-48f1-a027-d33d285b085b-config\") pod \"kube-apiserver-operator-766d6c64bb-ngz59\" (UID: \"a9150bbc-0207-48f1-a027-d33d285b085b\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ngz59" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.868323 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.886245 5050 request.go:700] Waited for 1.002919998s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operator-lifecycle-manager/secrets?fieldSelector=metadata.name%3Dpprof-cert&limit=500&resourceVersion=0 Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.889013 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.901919 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ab27cc8e-1d04-43dd-bf34-0cfdb48daba7-secret-volume\") pod \"collect-profiles-29398470-mm7rl\" (UID: \"ab27cc8e-1d04-43dd-bf34-0cfdb48daba7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398470-mm7rl" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.908160 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.927959 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.947760 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.968199 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.978879 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4c1df8cb-7a6f-46da-9d4e-a1b88f85997e-images\") pod \"machine-config-operator-74547568cd-jzg9r\" (UID: \"4c1df8cb-7a6f-46da-9d4e-a1b88f85997e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jzg9r" Nov 23 14:44:05 crc kubenswrapper[5050]: I1123 14:44:05.988797 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.008332 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.023069 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/4c1df8cb-7a6f-46da-9d4e-a1b88f85997e-proxy-tls\") pod \"machine-config-operator-74547568cd-jzg9r\" (UID: \"4c1df8cb-7a6f-46da-9d4e-a1b88f85997e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jzg9r" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.027483 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.048684 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.069106 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.088979 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.108001 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.129045 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.148096 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.169223 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 23 14:44:06 crc kubenswrapper[5050]: E1123 14:44:06.177273 5050 secret.go:188] Couldn't get secret openshift-service-ca-operator/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 23 14:44:06 crc kubenswrapper[5050]: E1123 14:44:06.177307 5050 configmap.go:193] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 23 14:44:06 crc kubenswrapper[5050]: E1123 14:44:06.177312 5050 secret.go:188] Couldn't get secret openshift-dns/dns-default-metrics-tls: failed to sync secret cache: timed out waiting for the condition Nov 23 14:44:06 crc kubenswrapper[5050]: E1123 14:44:06.177412 5050 configmap.go:193] Couldn't get configMap openshift-operator-lifecycle-manager/collect-profiles-config: failed to sync configmap cache: timed out waiting for the condition Nov 23 14:44:06 crc kubenswrapper[5050]: E1123 14:44:06.177417 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/eb9622cf-2c2f-4ed1-8fed-da0fc67be49a-serving-cert podName:eb9622cf-2c2f-4ed1-8fed-da0fc67be49a nodeName:}" failed. No retries permitted until 2025-11-23 14:44:06.67737774 +0000 UTC m=+141.844374265 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/eb9622cf-2c2f-4ed1-8fed-da0fc67be49a-serving-cert") pod "service-ca-operator-777779d784-9rdbb" (UID: "eb9622cf-2c2f-4ed1-8fed-da0fc67be49a") : failed to sync secret cache: timed out waiting for the condition Nov 23 14:44:06 crc kubenswrapper[5050]: E1123 14:44:06.177607 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/eb9622cf-2c2f-4ed1-8fed-da0fc67be49a-config podName:eb9622cf-2c2f-4ed1-8fed-da0fc67be49a nodeName:}" failed. No retries permitted until 2025-11-23 14:44:06.677573255 +0000 UTC m=+141.844569770 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/eb9622cf-2c2f-4ed1-8fed-da0fc67be49a-config") pod "service-ca-operator-777779d784-9rdbb" (UID: "eb9622cf-2c2f-4ed1-8fed-da0fc67be49a") : failed to sync configmap cache: timed out waiting for the condition Nov 23 14:44:06 crc kubenswrapper[5050]: E1123 14:44:06.177643 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1af9e06d-2732-4be4-bff7-eee682c1d69d-metrics-tls podName:1af9e06d-2732-4be4-bff7-eee682c1d69d nodeName:}" failed. No retries permitted until 2025-11-23 14:44:06.677628177 +0000 UTC m=+141.844624692 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/1af9e06d-2732-4be4-bff7-eee682c1d69d-metrics-tls") pod "dns-default-wdqwt" (UID: "1af9e06d-2732-4be4-bff7-eee682c1d69d") : failed to sync secret cache: timed out waiting for the condition Nov 23 14:44:06 crc kubenswrapper[5050]: E1123 14:44:06.177683 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ab27cc8e-1d04-43dd-bf34-0cfdb48daba7-config-volume podName:ab27cc8e-1d04-43dd-bf34-0cfdb48daba7 nodeName:}" failed. No retries permitted until 2025-11-23 14:44:06.677668418 +0000 UTC m=+141.844664933 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/ab27cc8e-1d04-43dd-bf34-0cfdb48daba7-config-volume") pod "collect-profiles-29398470-mm7rl" (UID: "ab27cc8e-1d04-43dd-bf34-0cfdb48daba7") : failed to sync configmap cache: timed out waiting for the condition Nov 23 14:44:06 crc kubenswrapper[5050]: E1123 14:44:06.177713 5050 configmap.go:193] Couldn't get configMap openshift-dns/dns-default: failed to sync configmap cache: timed out waiting for the condition Nov 23 14:44:06 crc kubenswrapper[5050]: E1123 14:44:06.177790 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1af9e06d-2732-4be4-bff7-eee682c1d69d-config-volume podName:1af9e06d-2732-4be4-bff7-eee682c1d69d nodeName:}" failed. No retries permitted until 2025-11-23 14:44:06.677775181 +0000 UTC m=+141.844771706 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/1af9e06d-2732-4be4-bff7-eee682c1d69d-config-volume") pod "dns-default-wdqwt" (UID: "1af9e06d-2732-4be4-bff7-eee682c1d69d") : failed to sync configmap cache: timed out waiting for the condition Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.187824 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.207843 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.229484 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.249617 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.268748 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.288310 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.307983 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.329554 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.348866 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.368681 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.400164 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hlskc" event={"ID":"1cd7d6f0-caf1-4640-87b1-95d6faf69aa4","Type":"ContainerStarted","Data":"e807320070345fbfa512595f78c483f60348c37c26a932e54e488cb535eb9bdf"} Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.400238 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hlskc" event={"ID":"1cd7d6f0-caf1-4640-87b1-95d6faf69aa4","Type":"ContainerStarted","Data":"41797a23ab95c40b004701aaec087d2fec4b9a91707721e7b50cf206b5cab787"} Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.402658 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-46rrm" event={"ID":"faf517b0-a6d3-44ed-bdf9-b7430d956916","Type":"ContainerStarted","Data":"b6e92653628e24d85d7fd1e64388f46c7a4e75b164465f7cc0377ae55c10d0cc"} Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.402739 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-46rrm" event={"ID":"faf517b0-a6d3-44ed-bdf9-b7430d956916","Type":"ContainerStarted","Data":"2741ff62ad71505f473b13e63b950824802968cdb2a36b9b99ac8f0ff3ada254"} Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.403999 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.407942 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.427667 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.447934 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.468924 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.488063 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.508742 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.528038 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.549220 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.568606 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.588961 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.627427 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2hll\" (UniqueName: \"kubernetes.io/projected/72f33e56-9fe4-420b-aa31-8d49ed5c7584-kube-api-access-n2hll\") pod \"controller-manager-879f6c89f-2bsvx\" (UID: \"72f33e56-9fe4-420b-aa31-8d49ed5c7584\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.657967 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7vr67\" (UniqueName: \"kubernetes.io/projected/16e600fc-0484-4c1c-8800-bb3958374bad-kube-api-access-7vr67\") pod \"downloads-7954f5f757-gvb4q\" (UID: \"16e600fc-0484-4c1c-8800-bb3958374bad\") " pod="openshift-console/downloads-7954f5f757-gvb4q" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.667476 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2hw79\" (UniqueName: \"kubernetes.io/projected/74ee62a2-d057-4ddf-8e5f-619cd90645d0-kube-api-access-2hw79\") pod \"apiserver-7bbb656c7d-tt6kr\" (UID: \"74ee62a2-d057-4ddf-8e5f-619cd90645d0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.693296 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-72r27\" (UniqueName: \"kubernetes.io/projected/0d7bc9ed-da92-4305-bda8-78966e7451db-kube-api-access-72r27\") pod \"dns-operator-744455d44c-bglcd\" (UID: \"0d7bc9ed-da92-4305-bda8-78966e7451db\") " pod="openshift-dns-operator/dns-operator-744455d44c-bglcd" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.693479 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-bglcd" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.703086 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ab27cc8e-1d04-43dd-bf34-0cfdb48daba7-config-volume\") pod \"collect-profiles-29398470-mm7rl\" (UID: \"ab27cc8e-1d04-43dd-bf34-0cfdb48daba7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398470-mm7rl" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.703269 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1af9e06d-2732-4be4-bff7-eee682c1d69d-config-volume\") pod \"dns-default-wdqwt\" (UID: \"1af9e06d-2732-4be4-bff7-eee682c1d69d\") " pod="openshift-dns/dns-default-wdqwt" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.703486 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1af9e06d-2732-4be4-bff7-eee682c1d69d-metrics-tls\") pod \"dns-default-wdqwt\" (UID: \"1af9e06d-2732-4be4-bff7-eee682c1d69d\") " pod="openshift-dns/dns-default-wdqwt" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.703552 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb9622cf-2c2f-4ed1-8fed-da0fc67be49a-serving-cert\") pod \"service-ca-operator-777779d784-9rdbb\" (UID: \"eb9622cf-2c2f-4ed1-8fed-da0fc67be49a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9rdbb" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.703602 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb9622cf-2c2f-4ed1-8fed-da0fc67be49a-config\") pod \"service-ca-operator-777779d784-9rdbb\" (UID: \"eb9622cf-2c2f-4ed1-8fed-da0fc67be49a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9rdbb" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.704096 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ab27cc8e-1d04-43dd-bf34-0cfdb48daba7-config-volume\") pod \"collect-profiles-29398470-mm7rl\" (UID: \"ab27cc8e-1d04-43dd-bf34-0cfdb48daba7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398470-mm7rl" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.704265 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1af9e06d-2732-4be4-bff7-eee682c1d69d-config-volume\") pod \"dns-default-wdqwt\" (UID: \"1af9e06d-2732-4be4-bff7-eee682c1d69d\") " pod="openshift-dns/dns-default-wdqwt" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.704927 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb9622cf-2c2f-4ed1-8fed-da0fc67be49a-config\") pod \"service-ca-operator-777779d784-9rdbb\" (UID: \"eb9622cf-2c2f-4ed1-8fed-da0fc67be49a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9rdbb" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.707968 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1af9e06d-2732-4be4-bff7-eee682c1d69d-metrics-tls\") pod \"dns-default-wdqwt\" (UID: \"1af9e06d-2732-4be4-bff7-eee682c1d69d\") " pod="openshift-dns/dns-default-wdqwt" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.709005 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb9622cf-2c2f-4ed1-8fed-da0fc67be49a-serving-cert\") pod \"service-ca-operator-777779d784-9rdbb\" (UID: \"eb9622cf-2c2f-4ed1-8fed-da0fc67be49a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9rdbb" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.713551 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c59xw\" (UniqueName: \"kubernetes.io/projected/e118c084-6256-42a4-ad24-f06222a1b3b4-kube-api-access-c59xw\") pod \"authentication-operator-69f744f599-j5zvd\" (UID: \"e118c084-6256-42a4-ad24-f06222a1b3b4\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-j5zvd" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.743102 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.749398 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fpvmd\" (UniqueName: \"kubernetes.io/projected/2a07c9a0-b842-4f33-83a4-d6a07eeed391-kube-api-access-fpvmd\") pod \"etcd-operator-b45778765-cg8k9\" (UID: \"2a07c9a0-b842-4f33-83a4-d6a07eeed391\") " pod="openshift-etcd-operator/etcd-operator-b45778765-cg8k9" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.749399 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-crxcs\" (UniqueName: \"kubernetes.io/projected/5c3f6b64-15bf-4147-be4e-414a2569cb58-kube-api-access-crxcs\") pod \"oauth-openshift-558db77b4-cvknj\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.751764 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-j5zvd" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.765457 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2wpc6\" (UniqueName: \"kubernetes.io/projected/0a4f6fb7-314a-4896-bcba-c11f622a5e3f-kube-api-access-2wpc6\") pod \"openshift-config-operator-7777fb866f-ph6h8\" (UID: \"0a4f6fb7-314a-4896-bcba-c11f622a5e3f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-ph6h8" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.809034 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.840385 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.849979 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.856569 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-gvb4q" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.871950 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.883426 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wdw7p\" (UniqueName: \"kubernetes.io/projected/a934d47f-f19d-4d29-aed8-20141e5bcf2b-kube-api-access-wdw7p\") pod \"machine-api-operator-5694c8668f-zfnk9\" (UID: \"a934d47f-f19d-4d29-aed8-20141e5bcf2b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-zfnk9" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.887936 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.911706 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.912373 5050 request.go:700] Waited for 1.888707787s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/secrets?fieldSelector=metadata.name%3Dmachine-config-server-dockercfg-qx5rd&limit=500&resourceVersion=0 Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.915604 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.928964 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.936686 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-cg8k9" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.950859 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.970208 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-bglcd"] Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.970601 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.971139 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-zfnk9" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.973272 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:06 crc kubenswrapper[5050]: I1123 14:44:06.987561 5050 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.035475 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-ph6h8" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.031852 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55pb2\" (UniqueName: \"kubernetes.io/projected/6e9d92c3-4f51-481c-adcb-e48f3e671025-kube-api-access-55pb2\") pod \"ingress-operator-5b745b69d9-xwq4h\" (UID: \"6e9d92c3-4f51-481c-adcb-e48f3e671025\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xwq4h" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.045853 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr"] Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.048602 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bpl2x\" (UniqueName: \"kubernetes.io/projected/fe0bb0ba-8376-4437-9cec-fe95bbd77a9a-kube-api-access-bpl2x\") pod \"console-operator-58897d9998-h2nqh\" (UID: \"fe0bb0ba-8376-4437-9cec-fe95bbd77a9a\") " pod="openshift-console-operator/console-operator-58897d9998-h2nqh" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.068250 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b1e1a221-b849-45e1-ad2c-d0b3b3eeccb5-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ngl6b\" (UID: \"b1e1a221-b849-45e1-ad2c-d0b3b3eeccb5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ngl6b" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.077857 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-h2nqh" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.091122 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f7pm4\" (UniqueName: \"kubernetes.io/projected/629f02dd-753b-46d2-b808-812468f6c9c5-kube-api-access-f7pm4\") pod \"console-f9d7485db-b646l\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.102388 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-j5zvd"] Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.113063 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fbsv9\" (UniqueName: \"kubernetes.io/projected/f568ca0b-0692-482b-a80b-7c6c4f8f80ba-kube-api-access-fbsv9\") pod \"migrator-59844c95c7-n9kmn\" (UID: \"f568ca0b-0692-482b-a80b-7c6c4f8f80ba\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-n9kmn" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.124294 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.129321 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6e9d92c3-4f51-481c-adcb-e48f3e671025-bound-sa-token\") pod \"ingress-operator-5b745b69d9-xwq4h\" (UID: \"6e9d92c3-4f51-481c-adcb-e48f3e671025\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xwq4h" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.141858 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ngl6b" Nov 23 14:44:07 crc kubenswrapper[5050]: W1123 14:44:07.143491 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod74ee62a2_d057_4ddf_8e5f_619cd90645d0.slice/crio-fcd89fd446bf1f730c6be8dba47a0cf08629f8152fb7a38d5e2375fd486af765 WatchSource:0}: Error finding container fcd89fd446bf1f730c6be8dba47a0cf08629f8152fb7a38d5e2375fd486af765: Status 404 returned error can't find the container with id fcd89fd446bf1f730c6be8dba47a0cf08629f8152fb7a38d5e2375fd486af765 Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.145995 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-57vw6\" (UniqueName: \"kubernetes.io/projected/ae2bdaf0-ab3a-4822-b376-945d76181e8f-kube-api-access-57vw6\") pod \"kube-storage-version-migrator-operator-b67b599dd-djc5v\" (UID: \"ae2bdaf0-ab3a-4822-b376-945d76181e8f\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-djc5v" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.154272 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-djc5v" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.163919 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xwq4h" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.164943 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9m2p\" (UniqueName: \"kubernetes.io/projected/650d8b46-fa24-4b73-8f95-a0fbfe331fa6-kube-api-access-z9m2p\") pod \"route-controller-manager-6576b87f9c-b9jzj\" (UID: \"650d8b46-fa24-4b73-8f95-a0fbfe331fa6\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.171010 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-n9kmn" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.174394 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-gvb4q"] Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.186220 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdknq\" (UniqueName: \"kubernetes.io/projected/6b5f71c7-bb75-435e-ac44-3328aa8fe73d-kube-api-access-pdknq\") pod \"cluster-image-registry-operator-dc59b4c8b-dzz98\" (UID: \"6b5f71c7-bb75-435e-ac44-3328aa8fe73d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dzz98" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.202200 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6b5f71c7-bb75-435e-ac44-3328aa8fe73d-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-dzz98\" (UID: \"6b5f71c7-bb75-435e-ac44-3328aa8fe73d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dzz98" Nov 23 14:44:07 crc kubenswrapper[5050]: W1123 14:44:07.222234 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode118c084_6256_42a4_ad24_f06222a1b3b4.slice/crio-68b214b46dd0f3065443ade4466c0c81c2ac6cfcd9d5920adcc158b34cafaf5e WatchSource:0}: Error finding container 68b214b46dd0f3065443ade4466c0c81c2ac6cfcd9d5920adcc158b34cafaf5e: Status 404 returned error can't find the container with id 68b214b46dd0f3065443ade4466c0c81c2ac6cfcd9d5920adcc158b34cafaf5e Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.237577 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hslq\" (UniqueName: \"kubernetes.io/projected/10e4194f-eb7d-4228-8cf8-4925e4a0ec4f-kube-api-access-6hslq\") pod \"openshift-controller-manager-operator-756b6f6bc6-96dfp\" (UID: \"10e4194f-eb7d-4228-8cf8-4925e4a0ec4f\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-96dfp" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.252280 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jk6km\" (UniqueName: \"kubernetes.io/projected/05b57f72-3ec2-41f4-ba15-4bb73c15a1af-kube-api-access-jk6km\") pod \"router-default-5444994796-2g8pj\" (UID: \"05b57f72-3ec2-41f4-ba15-4bb73c15a1af\") " pod="openshift-ingress/router-default-5444994796-2g8pj" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.263138 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-cg8k9"] Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.271322 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-224cq\" (UniqueName: \"kubernetes.io/projected/005c682b-725d-471a-98ca-08f2760e6603-kube-api-access-224cq\") pod \"apiserver-76f77b778f-svd5r\" (UID: \"005c682b-725d-471a-98ca-08f2760e6603\") " pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.306518 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtfpc\" (UniqueName: \"kubernetes.io/projected/1af9e06d-2732-4be4-bff7-eee682c1d69d-kube-api-access-dtfpc\") pod \"dns-default-wdqwt\" (UID: \"1af9e06d-2732-4be4-bff7-eee682c1d69d\") " pod="openshift-dns/dns-default-wdqwt" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.311942 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-cvknj"] Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.312020 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-zfnk9"] Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.319830 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-wdqwt" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.325188 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a9150bbc-0207-48f1-a027-d33d285b085b-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-ngz59\" (UID: \"a9150bbc-0207-48f1-a027-d33d285b085b\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ngz59" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.346104 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqtth\" (UniqueName: \"kubernetes.io/projected/eb9622cf-2c2f-4ed1-8fed-da0fc67be49a-kube-api-access-nqtth\") pod \"service-ca-operator-777779d784-9rdbb\" (UID: \"eb9622cf-2c2f-4ed1-8fed-da0fc67be49a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9rdbb" Nov 23 14:44:07 crc kubenswrapper[5050]: W1123 14:44:07.352245 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2a07c9a0_b842_4f33_83a4_d6a07eeed391.slice/crio-b2c642aec5439edae796b64cbb09294c950be47eea97636a40002f0371f360ba WatchSource:0}: Error finding container b2c642aec5439edae796b64cbb09294c950be47eea97636a40002f0371f360ba: Status 404 returned error can't find the container with id b2c642aec5439edae796b64cbb09294c950be47eea97636a40002f0371f360ba Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.366409 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-96dfp" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.366806 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.368935 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhb5x\" (UniqueName: \"kubernetes.io/projected/4c1df8cb-7a6f-46da-9d4e-a1b88f85997e-kube-api-access-nhb5x\") pod \"machine-config-operator-74547568cd-jzg9r\" (UID: \"4c1df8cb-7a6f-46da-9d4e-a1b88f85997e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jzg9r" Nov 23 14:44:07 crc kubenswrapper[5050]: W1123 14:44:07.395747 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5c3f6b64_15bf_4147_be4e_414a2569cb58.slice/crio-6370bc4ed9c6cadc9e5d9fea45cb47d44dd25fc05bd084c59ee0b3695e1bc514 WatchSource:0}: Error finding container 6370bc4ed9c6cadc9e5d9fea45cb47d44dd25fc05bd084c59ee0b3695e1bc514: Status 404 returned error can't find the container with id 6370bc4ed9c6cadc9e5d9fea45cb47d44dd25fc05bd084c59ee0b3695e1bc514 Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.397789 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-h2nqh"] Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.399789 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9f57\" (UniqueName: \"kubernetes.io/projected/ab27cc8e-1d04-43dd-bf34-0cfdb48daba7-kube-api-access-l9f57\") pod \"collect-profiles-29398470-mm7rl\" (UID: \"ab27cc8e-1d04-43dd-bf34-0cfdb48daba7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398470-mm7rl" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.421798 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/766b2181-d531-40cf-8d30-5ab0190d9da3-signing-cabundle\") pod \"service-ca-9c57cc56f-2kj76\" (UID: \"766b2181-d531-40cf-8d30-5ab0190d9da3\") " pod="openshift-service-ca/service-ca-9c57cc56f-2kj76" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.421880 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e794c1d4-b3b8-48cc-97ad-75861e8021e6-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-n5v2l\" (UID: \"e794c1d4-b3b8-48cc-97ad-75861e8021e6\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-n5v2l" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.421914 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/8e3a5a3b-f908-4660-adf1-fe3bf70175f9-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-qjw5q\" (UID: \"8e3a5a3b-f908-4660-adf1-fe3bf70175f9\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qjw5q" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.421953 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/31ae7f30-0f40-4c17-ad54-33aeefcbeeb7-tmpfs\") pod \"packageserver-d55dfcdfc-5wl59\" (UID: \"31ae7f30-0f40-4c17-ad54-33aeefcbeeb7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5wl59" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422004 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9c29734c-66bd-4ac3-b26a-b5349d786018-registry-tls\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422061 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d97c8\" (UniqueName: \"kubernetes.io/projected/31ae7f30-0f40-4c17-ad54-33aeefcbeeb7-kube-api-access-d97c8\") pod \"packageserver-d55dfcdfc-5wl59\" (UID: \"31ae7f30-0f40-4c17-ad54-33aeefcbeeb7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5wl59" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422092 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e794c1d4-b3b8-48cc-97ad-75861e8021e6-proxy-tls\") pod \"machine-config-controller-84d6567774-n5v2l\" (UID: \"e794c1d4-b3b8-48cc-97ad-75861e8021e6\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-n5v2l" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422129 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmdkr\" (UniqueName: \"kubernetes.io/projected/10bf2f37-5f87-479d-a9b1-9e1bdf5b150a-kube-api-access-cmdkr\") pod \"catalog-operator-68c6474976-rgjfm\" (UID: \"10bf2f37-5f87-479d-a9b1-9e1bdf5b150a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rgjfm" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422150 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9c29734c-66bd-4ac3-b26a-b5349d786018-installation-pull-secrets\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422167 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/766b2181-d531-40cf-8d30-5ab0190d9da3-signing-key\") pod \"service-ca-9c57cc56f-2kj76\" (UID: \"766b2181-d531-40cf-8d30-5ab0190d9da3\") " pod="openshift-service-ca/service-ca-9c57cc56f-2kj76" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422186 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9c29734c-66bd-4ac3-b26a-b5349d786018-bound-sa-token\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422207 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6ck44\" (UniqueName: \"kubernetes.io/projected/400aa1f3-7e05-407e-90dc-ad2e99f54e61-kube-api-access-6ck44\") pod \"control-plane-machine-set-operator-78cbb6b69f-tcs6j\" (UID: \"400aa1f3-7e05-407e-90dc-ad2e99f54e61\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tcs6j" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422285 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/668e6166-ca3b-46c8-bdb4-b3b9e46f2636-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-wtbfr\" (UID: \"668e6166-ca3b-46c8-bdb4-b3b9e46f2636\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wtbfr" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422336 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422376 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/6c4283fe-0fc3-4c3a-bc02-b552e0efa62f-profile-collector-cert\") pod \"olm-operator-6b444d44fb-p4p66\" (UID: \"6c4283fe-0fc3-4c3a-bc02-b552e0efa62f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-p4p66" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422396 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzqtd\" (UniqueName: \"kubernetes.io/projected/9c29734c-66bd-4ac3-b26a-b5349d786018-kube-api-access-dzqtd\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422417 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/6f5dcf1c-e463-4bd7-ad34-f8e2dbcd5aac-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-rzj2p\" (UID: \"6f5dcf1c-e463-4bd7-ad34-f8e2dbcd5aac\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-rzj2p" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422470 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9c29734c-66bd-4ac3-b26a-b5349d786018-registry-certificates\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422496 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/10bf2f37-5f87-479d-a9b1-9e1bdf5b150a-srv-cert\") pod \"catalog-operator-68c6474976-rgjfm\" (UID: \"10bf2f37-5f87-479d-a9b1-9e1bdf5b150a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rgjfm" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422556 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/400aa1f3-7e05-407e-90dc-ad2e99f54e61-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-tcs6j\" (UID: \"400aa1f3-7e05-407e-90dc-ad2e99f54e61\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tcs6j" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422591 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9c29734c-66bd-4ac3-b26a-b5349d786018-ca-trust-extracted\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422611 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/668e6166-ca3b-46c8-bdb4-b3b9e46f2636-config\") pod \"kube-controller-manager-operator-78b949d7b-wtbfr\" (UID: \"668e6166-ca3b-46c8-bdb4-b3b9e46f2636\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wtbfr" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422648 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9c29734c-66bd-4ac3-b26a-b5349d786018-trusted-ca\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422664 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/31ae7f30-0f40-4c17-ad54-33aeefcbeeb7-webhook-cert\") pod \"packageserver-d55dfcdfc-5wl59\" (UID: \"31ae7f30-0f40-4c17-ad54-33aeefcbeeb7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5wl59" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422700 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/6c4283fe-0fc3-4c3a-bc02-b552e0efa62f-srv-cert\") pod \"olm-operator-6b444d44fb-p4p66\" (UID: \"6c4283fe-0fc3-4c3a-bc02-b552e0efa62f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-p4p66" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422737 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ppd8g\" (UniqueName: \"kubernetes.io/projected/e794c1d4-b3b8-48cc-97ad-75861e8021e6-kube-api-access-ppd8g\") pod \"machine-config-controller-84d6567774-n5v2l\" (UID: \"e794c1d4-b3b8-48cc-97ad-75861e8021e6\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-n5v2l" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422765 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xw44v\" (UniqueName: \"kubernetes.io/projected/fef4a22f-705c-4dda-8c74-ace62ff4ce68-kube-api-access-xw44v\") pod \"marketplace-operator-79b997595-wr6q4\" (UID: \"fef4a22f-705c-4dda-8c74-ace62ff4ce68\") " pod="openshift-marketplace/marketplace-operator-79b997595-wr6q4" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422822 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/d4ac164e-eff3-47b2-8cfe-05acd0b7c1e8-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-fcc5l\" (UID: \"d4ac164e-eff3-47b2-8cfe-05acd0b7c1e8\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fcc5l" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422838 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/fef4a22f-705c-4dda-8c74-ace62ff4ce68-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-wr6q4\" (UID: \"fef4a22f-705c-4dda-8c74-ace62ff4ce68\") " pod="openshift-marketplace/marketplace-operator-79b997595-wr6q4" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422854 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqx9x\" (UniqueName: \"kubernetes.io/projected/6f5dcf1c-e463-4bd7-ad34-f8e2dbcd5aac-kube-api-access-fqx9x\") pod \"multus-admission-controller-857f4d67dd-rzj2p\" (UID: \"6f5dcf1c-e463-4bd7-ad34-f8e2dbcd5aac\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-rzj2p" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422894 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fef4a22f-705c-4dda-8c74-ace62ff4ce68-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-wr6q4\" (UID: \"fef4a22f-705c-4dda-8c74-ace62ff4ce68\") " pod="openshift-marketplace/marketplace-operator-79b997595-wr6q4" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422912 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69p7m\" (UniqueName: \"kubernetes.io/projected/8e3a5a3b-f908-4660-adf1-fe3bf70175f9-kube-api-access-69p7m\") pod \"package-server-manager-789f6589d5-qjw5q\" (UID: \"8e3a5a3b-f908-4660-adf1-fe3bf70175f9\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qjw5q" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422939 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-65d8w\" (UniqueName: \"kubernetes.io/projected/6c4283fe-0fc3-4c3a-bc02-b552e0efa62f-kube-api-access-65d8w\") pod \"olm-operator-6b444d44fb-p4p66\" (UID: \"6c4283fe-0fc3-4c3a-bc02-b552e0efa62f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-p4p66" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422956 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-plmlz\" (UniqueName: \"kubernetes.io/projected/766b2181-d531-40cf-8d30-5ab0190d9da3-kube-api-access-plmlz\") pod \"service-ca-9c57cc56f-2kj76\" (UID: \"766b2181-d531-40cf-8d30-5ab0190d9da3\") " pod="openshift-service-ca/service-ca-9c57cc56f-2kj76" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.422987 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/31ae7f30-0f40-4c17-ad54-33aeefcbeeb7-apiservice-cert\") pod \"packageserver-d55dfcdfc-5wl59\" (UID: \"31ae7f30-0f40-4c17-ad54-33aeefcbeeb7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5wl59" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.423005 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9qvz7\" (UniqueName: \"kubernetes.io/projected/d4ac164e-eff3-47b2-8cfe-05acd0b7c1e8-kube-api-access-9qvz7\") pod \"cluster-samples-operator-665b6dd947-fcc5l\" (UID: \"d4ac164e-eff3-47b2-8cfe-05acd0b7c1e8\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fcc5l" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.423022 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/10bf2f37-5f87-479d-a9b1-9e1bdf5b150a-profile-collector-cert\") pod \"catalog-operator-68c6474976-rgjfm\" (UID: \"10bf2f37-5f87-479d-a9b1-9e1bdf5b150a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rgjfm" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.423060 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/668e6166-ca3b-46c8-bdb4-b3b9e46f2636-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-wtbfr\" (UID: \"668e6166-ca3b-46c8-bdb4-b3b9e46f2636\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wtbfr" Nov 23 14:44:07 crc kubenswrapper[5050]: E1123 14:44:07.425107 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:07.925077267 +0000 UTC m=+143.092073752 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.429746 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dzz98" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.446796 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.460337 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-cg8k9" event={"ID":"2a07c9a0-b842-4f33-83a4-d6a07eeed391","Type":"ContainerStarted","Data":"b2c642aec5439edae796b64cbb09294c950be47eea97636a40002f0371f360ba"} Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.461187 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" event={"ID":"5c3f6b64-15bf-4147-be4e-414a2569cb58","Type":"ContainerStarted","Data":"6370bc4ed9c6cadc9e5d9fea45cb47d44dd25fc05bd084c59ee0b3695e1bc514"} Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.462110 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-gvb4q" event={"ID":"16e600fc-0484-4c1c-8800-bb3958374bad","Type":"ContainerStarted","Data":"adf0e5c8901bec1a2957f44806e32c7b2110faf3fedc0dd15337ab09b4491f74"} Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.473480 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2bsvx"] Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.473763 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" event={"ID":"74ee62a2-d057-4ddf-8e5f-619cd90645d0","Type":"ContainerStarted","Data":"fcd89fd446bf1f730c6be8dba47a0cf08629f8152fb7a38d5e2375fd486af765"} Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.476568 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-ph6h8"] Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.476679 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-zfnk9" event={"ID":"a934d47f-f19d-4d29-aed8-20141e5bcf2b","Type":"ContainerStarted","Data":"88d45b7c3472d14043c4cd2d5a24c4b9a89585d4d6ce8dd21e74ade1b06512f7"} Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.480023 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-j5zvd" event={"ID":"e118c084-6256-42a4-ad24-f06222a1b3b4","Type":"ContainerStarted","Data":"68b214b46dd0f3065443ade4466c0c81c2ac6cfcd9d5920adcc158b34cafaf5e"} Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.483247 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-bglcd" event={"ID":"0d7bc9ed-da92-4305-bda8-78966e7451db","Type":"ContainerStarted","Data":"394227e1394b7db83821ead16bf703dbb768130df0a61124e0cc51e3cbb04f97"} Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.486033 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-2g8pj" Nov 23 14:44:07 crc kubenswrapper[5050]: W1123 14:44:07.494506 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod72f33e56_9fe4_420b_aa31_8d49ed5c7584.slice/crio-431fa29464358052ab2819f58db34cc71c30bcc46442a378e0821673f50b5e4b WatchSource:0}: Error finding container 431fa29464358052ab2819f58db34cc71c30bcc46442a378e0821673f50b5e4b: Status 404 returned error can't find the container with id 431fa29464358052ab2819f58db34cc71c30bcc46442a378e0821673f50b5e4b Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.505798 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-b646l"] Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.523930 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:07 crc kubenswrapper[5050]: E1123 14:44:07.524295 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:08.024252233 +0000 UTC m=+143.191248718 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.524483 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9c29734c-66bd-4ac3-b26a-b5349d786018-bound-sa-token\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.524543 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6ck44\" (UniqueName: \"kubernetes.io/projected/400aa1f3-7e05-407e-90dc-ad2e99f54e61-kube-api-access-6ck44\") pod \"control-plane-machine-set-operator-78cbb6b69f-tcs6j\" (UID: \"400aa1f3-7e05-407e-90dc-ad2e99f54e61\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tcs6j" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.524579 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/2fc00c67-c3ab-4b63-b068-3a41c027f6f3-csi-data-dir\") pod \"csi-hostpathplugin-jz58r\" (UID: \"2fc00c67-c3ab-4b63-b068-3a41c027f6f3\") " pod="hostpath-provisioner/csi-hostpathplugin-jz58r" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.524623 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-npcrt\" (UniqueName: \"kubernetes.io/projected/13b0f3d4-03aa-421b-9cb6-7fae67f80626-kube-api-access-npcrt\") pod \"ingress-canary-fkkng\" (UID: \"13b0f3d4-03aa-421b-9cb6-7fae67f80626\") " pod="openshift-ingress-canary/ingress-canary-fkkng" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.524711 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.524742 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/668e6166-ca3b-46c8-bdb4-b3b9e46f2636-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-wtbfr\" (UID: \"668e6166-ca3b-46c8-bdb4-b3b9e46f2636\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wtbfr" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.524774 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/2fc00c67-c3ab-4b63-b068-3a41c027f6f3-mountpoint-dir\") pod \"csi-hostpathplugin-jz58r\" (UID: \"2fc00c67-c3ab-4b63-b068-3a41c027f6f3\") " pod="hostpath-provisioner/csi-hostpathplugin-jz58r" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.524815 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/6c4283fe-0fc3-4c3a-bc02-b552e0efa62f-profile-collector-cert\") pod \"olm-operator-6b444d44fb-p4p66\" (UID: \"6c4283fe-0fc3-4c3a-bc02-b552e0efa62f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-p4p66" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.524858 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzqtd\" (UniqueName: \"kubernetes.io/projected/9c29734c-66bd-4ac3-b26a-b5349d786018-kube-api-access-dzqtd\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.524879 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/2fc00c67-c3ab-4b63-b068-3a41c027f6f3-plugins-dir\") pod \"csi-hostpathplugin-jz58r\" (UID: \"2fc00c67-c3ab-4b63-b068-3a41c027f6f3\") " pod="hostpath-provisioner/csi-hostpathplugin-jz58r" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.524915 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/6f5dcf1c-e463-4bd7-ad34-f8e2dbcd5aac-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-rzj2p\" (UID: \"6f5dcf1c-e463-4bd7-ad34-f8e2dbcd5aac\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-rzj2p" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.525523 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9c29734c-66bd-4ac3-b26a-b5349d786018-registry-certificates\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.525556 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/10bf2f37-5f87-479d-a9b1-9e1bdf5b150a-srv-cert\") pod \"catalog-operator-68c6474976-rgjfm\" (UID: \"10bf2f37-5f87-479d-a9b1-9e1bdf5b150a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rgjfm" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.525611 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/400aa1f3-7e05-407e-90dc-ad2e99f54e61-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-tcs6j\" (UID: \"400aa1f3-7e05-407e-90dc-ad2e99f54e61\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tcs6j" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.525669 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d6p6q\" (UniqueName: \"kubernetes.io/projected/2fc00c67-c3ab-4b63-b068-3a41c027f6f3-kube-api-access-d6p6q\") pod \"csi-hostpathplugin-jz58r\" (UID: \"2fc00c67-c3ab-4b63-b068-3a41c027f6f3\") " pod="hostpath-provisioner/csi-hostpathplugin-jz58r" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.525707 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9c29734c-66bd-4ac3-b26a-b5349d786018-ca-trust-extracted\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.525725 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/668e6166-ca3b-46c8-bdb4-b3b9e46f2636-config\") pod \"kube-controller-manager-operator-78b949d7b-wtbfr\" (UID: \"668e6166-ca3b-46c8-bdb4-b3b9e46f2636\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wtbfr" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.525747 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bzgv\" (UniqueName: \"kubernetes.io/projected/7defdad8-6ff2-452d-a144-e701d7b3baf6-kube-api-access-8bzgv\") pod \"machine-config-server-tq999\" (UID: \"7defdad8-6ff2-452d-a144-e701d7b3baf6\") " pod="openshift-machine-config-operator/machine-config-server-tq999" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.525806 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9c29734c-66bd-4ac3-b26a-b5349d786018-trusted-ca\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.525831 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/31ae7f30-0f40-4c17-ad54-33aeefcbeeb7-webhook-cert\") pod \"packageserver-d55dfcdfc-5wl59\" (UID: \"31ae7f30-0f40-4c17-ad54-33aeefcbeeb7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5wl59" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.525855 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/7defdad8-6ff2-452d-a144-e701d7b3baf6-node-bootstrap-token\") pod \"machine-config-server-tq999\" (UID: \"7defdad8-6ff2-452d-a144-e701d7b3baf6\") " pod="openshift-machine-config-operator/machine-config-server-tq999" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.525878 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/6c4283fe-0fc3-4c3a-bc02-b552e0efa62f-srv-cert\") pod \"olm-operator-6b444d44fb-p4p66\" (UID: \"6c4283fe-0fc3-4c3a-bc02-b552e0efa62f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-p4p66" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.525936 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ppd8g\" (UniqueName: \"kubernetes.io/projected/e794c1d4-b3b8-48cc-97ad-75861e8021e6-kube-api-access-ppd8g\") pod \"machine-config-controller-84d6567774-n5v2l\" (UID: \"e794c1d4-b3b8-48cc-97ad-75861e8021e6\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-n5v2l" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.525961 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xw44v\" (UniqueName: \"kubernetes.io/projected/fef4a22f-705c-4dda-8c74-ace62ff4ce68-kube-api-access-xw44v\") pod \"marketplace-operator-79b997595-wr6q4\" (UID: \"fef4a22f-705c-4dda-8c74-ace62ff4ce68\") " pod="openshift-marketplace/marketplace-operator-79b997595-wr6q4" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.526064 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/d4ac164e-eff3-47b2-8cfe-05acd0b7c1e8-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-fcc5l\" (UID: \"d4ac164e-eff3-47b2-8cfe-05acd0b7c1e8\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fcc5l" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.526086 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/fef4a22f-705c-4dda-8c74-ace62ff4ce68-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-wr6q4\" (UID: \"fef4a22f-705c-4dda-8c74-ace62ff4ce68\") " pod="openshift-marketplace/marketplace-operator-79b997595-wr6q4" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.526106 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqx9x\" (UniqueName: \"kubernetes.io/projected/6f5dcf1c-e463-4bd7-ad34-f8e2dbcd5aac-kube-api-access-fqx9x\") pod \"multus-admission-controller-857f4d67dd-rzj2p\" (UID: \"6f5dcf1c-e463-4bd7-ad34-f8e2dbcd5aac\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-rzj2p" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.526141 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fef4a22f-705c-4dda-8c74-ace62ff4ce68-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-wr6q4\" (UID: \"fef4a22f-705c-4dda-8c74-ace62ff4ce68\") " pod="openshift-marketplace/marketplace-operator-79b997595-wr6q4" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.526191 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69p7m\" (UniqueName: \"kubernetes.io/projected/8e3a5a3b-f908-4660-adf1-fe3bf70175f9-kube-api-access-69p7m\") pod \"package-server-manager-789f6589d5-qjw5q\" (UID: \"8e3a5a3b-f908-4660-adf1-fe3bf70175f9\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qjw5q" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.526257 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-65d8w\" (UniqueName: \"kubernetes.io/projected/6c4283fe-0fc3-4c3a-bc02-b552e0efa62f-kube-api-access-65d8w\") pod \"olm-operator-6b444d44fb-p4p66\" (UID: \"6c4283fe-0fc3-4c3a-bc02-b552e0efa62f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-p4p66" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.526292 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-plmlz\" (UniqueName: \"kubernetes.io/projected/766b2181-d531-40cf-8d30-5ab0190d9da3-kube-api-access-plmlz\") pod \"service-ca-9c57cc56f-2kj76\" (UID: \"766b2181-d531-40cf-8d30-5ab0190d9da3\") " pod="openshift-service-ca/service-ca-9c57cc56f-2kj76" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.526329 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/31ae7f30-0f40-4c17-ad54-33aeefcbeeb7-apiservice-cert\") pod \"packageserver-d55dfcdfc-5wl59\" (UID: \"31ae7f30-0f40-4c17-ad54-33aeefcbeeb7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5wl59" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.526369 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9qvz7\" (UniqueName: \"kubernetes.io/projected/d4ac164e-eff3-47b2-8cfe-05acd0b7c1e8-kube-api-access-9qvz7\") pod \"cluster-samples-operator-665b6dd947-fcc5l\" (UID: \"d4ac164e-eff3-47b2-8cfe-05acd0b7c1e8\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fcc5l" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.526397 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/10bf2f37-5f87-479d-a9b1-9e1bdf5b150a-profile-collector-cert\") pod \"catalog-operator-68c6474976-rgjfm\" (UID: \"10bf2f37-5f87-479d-a9b1-9e1bdf5b150a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rgjfm" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.526548 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/668e6166-ca3b-46c8-bdb4-b3b9e46f2636-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-wtbfr\" (UID: \"668e6166-ca3b-46c8-bdb4-b3b9e46f2636\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wtbfr" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.526678 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/766b2181-d531-40cf-8d30-5ab0190d9da3-signing-cabundle\") pod \"service-ca-9c57cc56f-2kj76\" (UID: \"766b2181-d531-40cf-8d30-5ab0190d9da3\") " pod="openshift-service-ca/service-ca-9c57cc56f-2kj76" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.526744 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/7defdad8-6ff2-452d-a144-e701d7b3baf6-certs\") pod \"machine-config-server-tq999\" (UID: \"7defdad8-6ff2-452d-a144-e701d7b3baf6\") " pod="openshift-machine-config-operator/machine-config-server-tq999" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.526859 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/2fc00c67-c3ab-4b63-b068-3a41c027f6f3-socket-dir\") pod \"csi-hostpathplugin-jz58r\" (UID: \"2fc00c67-c3ab-4b63-b068-3a41c027f6f3\") " pod="hostpath-provisioner/csi-hostpathplugin-jz58r" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.526934 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/2fc00c67-c3ab-4b63-b068-3a41c027f6f3-registration-dir\") pod \"csi-hostpathplugin-jz58r\" (UID: \"2fc00c67-c3ab-4b63-b068-3a41c027f6f3\") " pod="hostpath-provisioner/csi-hostpathplugin-jz58r" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.526982 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e794c1d4-b3b8-48cc-97ad-75861e8021e6-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-n5v2l\" (UID: \"e794c1d4-b3b8-48cc-97ad-75861e8021e6\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-n5v2l" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.527014 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/8e3a5a3b-f908-4660-adf1-fe3bf70175f9-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-qjw5q\" (UID: \"8e3a5a3b-f908-4660-adf1-fe3bf70175f9\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qjw5q" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.527103 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9c29734c-66bd-4ac3-b26a-b5349d786018-registry-tls\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.527135 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/31ae7f30-0f40-4c17-ad54-33aeefcbeeb7-tmpfs\") pod \"packageserver-d55dfcdfc-5wl59\" (UID: \"31ae7f30-0f40-4c17-ad54-33aeefcbeeb7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5wl59" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.527188 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d97c8\" (UniqueName: \"kubernetes.io/projected/31ae7f30-0f40-4c17-ad54-33aeefcbeeb7-kube-api-access-d97c8\") pod \"packageserver-d55dfcdfc-5wl59\" (UID: \"31ae7f30-0f40-4c17-ad54-33aeefcbeeb7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5wl59" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.527236 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e794c1d4-b3b8-48cc-97ad-75861e8021e6-proxy-tls\") pod \"machine-config-controller-84d6567774-n5v2l\" (UID: \"e794c1d4-b3b8-48cc-97ad-75861e8021e6\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-n5v2l" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.527267 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/13b0f3d4-03aa-421b-9cb6-7fae67f80626-cert\") pod \"ingress-canary-fkkng\" (UID: \"13b0f3d4-03aa-421b-9cb6-7fae67f80626\") " pod="openshift-ingress-canary/ingress-canary-fkkng" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.527370 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmdkr\" (UniqueName: \"kubernetes.io/projected/10bf2f37-5f87-479d-a9b1-9e1bdf5b150a-kube-api-access-cmdkr\") pod \"catalog-operator-68c6474976-rgjfm\" (UID: \"10bf2f37-5f87-479d-a9b1-9e1bdf5b150a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rgjfm" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.527421 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9c29734c-66bd-4ac3-b26a-b5349d786018-installation-pull-secrets\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.527466 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/766b2181-d531-40cf-8d30-5ab0190d9da3-signing-key\") pod \"service-ca-9c57cc56f-2kj76\" (UID: \"766b2181-d531-40cf-8d30-5ab0190d9da3\") " pod="openshift-service-ca/service-ca-9c57cc56f-2kj76" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.531254 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/10bf2f37-5f87-479d-a9b1-9e1bdf5b150a-profile-collector-cert\") pod \"catalog-operator-68c6474976-rgjfm\" (UID: \"10bf2f37-5f87-479d-a9b1-9e1bdf5b150a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rgjfm" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.532345 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ngz59" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.534693 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/6c4283fe-0fc3-4c3a-bc02-b552e0efa62f-profile-collector-cert\") pod \"olm-operator-6b444d44fb-p4p66\" (UID: \"6c4283fe-0fc3-4c3a-bc02-b552e0efa62f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-p4p66" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.539322 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/6f5dcf1c-e463-4bd7-ad34-f8e2dbcd5aac-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-rzj2p\" (UID: \"6f5dcf1c-e463-4bd7-ad34-f8e2dbcd5aac\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-rzj2p" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.540902 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fef4a22f-705c-4dda-8c74-ace62ff4ce68-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-wr6q4\" (UID: \"fef4a22f-705c-4dda-8c74-ace62ff4ce68\") " pod="openshift-marketplace/marketplace-operator-79b997595-wr6q4" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.562874 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/31ae7f30-0f40-4c17-ad54-33aeefcbeeb7-webhook-cert\") pod \"packageserver-d55dfcdfc-5wl59\" (UID: \"31ae7f30-0f40-4c17-ad54-33aeefcbeeb7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5wl59" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.569918 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/31ae7f30-0f40-4c17-ad54-33aeefcbeeb7-tmpfs\") pod \"packageserver-d55dfcdfc-5wl59\" (UID: \"31ae7f30-0f40-4c17-ad54-33aeefcbeeb7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5wl59" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.579610 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ngl6b"] Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.581199 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jzg9r" Nov 23 14:44:07 crc kubenswrapper[5050]: E1123 14:44:07.581482 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:08.081434483 +0000 UTC m=+143.248430968 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.582763 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/766b2181-d531-40cf-8d30-5ab0190d9da3-signing-cabundle\") pod \"service-ca-9c57cc56f-2kj76\" (UID: \"766b2181-d531-40cf-8d30-5ab0190d9da3\") " pod="openshift-service-ca/service-ca-9c57cc56f-2kj76" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.583421 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/668e6166-ca3b-46c8-bdb4-b3b9e46f2636-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-wtbfr\" (UID: \"668e6166-ca3b-46c8-bdb4-b3b9e46f2636\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wtbfr" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.584790 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/400aa1f3-7e05-407e-90dc-ad2e99f54e61-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-tcs6j\" (UID: \"400aa1f3-7e05-407e-90dc-ad2e99f54e61\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tcs6j" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.585347 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9c29734c-66bd-4ac3-b26a-b5349d786018-registry-tls\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.585813 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/10bf2f37-5f87-479d-a9b1-9e1bdf5b150a-srv-cert\") pod \"catalog-operator-68c6474976-rgjfm\" (UID: \"10bf2f37-5f87-479d-a9b1-9e1bdf5b150a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rgjfm" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.586360 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9c29734c-66bd-4ac3-b26a-b5349d786018-ca-trust-extracted\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.586919 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/668e6166-ca3b-46c8-bdb4-b3b9e46f2636-config\") pod \"kube-controller-manager-operator-78b949d7b-wtbfr\" (UID: \"668e6166-ca3b-46c8-bdb4-b3b9e46f2636\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wtbfr" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.587170 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e794c1d4-b3b8-48cc-97ad-75861e8021e6-proxy-tls\") pod \"machine-config-controller-84d6567774-n5v2l\" (UID: \"e794c1d4-b3b8-48cc-97ad-75861e8021e6\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-n5v2l" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.591242 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/766b2181-d531-40cf-8d30-5ab0190d9da3-signing-key\") pod \"service-ca-9c57cc56f-2kj76\" (UID: \"766b2181-d531-40cf-8d30-5ab0190d9da3\") " pod="openshift-service-ca/service-ca-9c57cc56f-2kj76" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.591525 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e794c1d4-b3b8-48cc-97ad-75861e8021e6-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-n5v2l\" (UID: \"e794c1d4-b3b8-48cc-97ad-75861e8021e6\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-n5v2l" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.592509 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/d4ac164e-eff3-47b2-8cfe-05acd0b7c1e8-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-fcc5l\" (UID: \"d4ac164e-eff3-47b2-8cfe-05acd0b7c1e8\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fcc5l" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.594435 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9c29734c-66bd-4ac3-b26a-b5349d786018-trusted-ca\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.595408 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/fef4a22f-705c-4dda-8c74-ace62ff4ce68-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-wr6q4\" (UID: \"fef4a22f-705c-4dda-8c74-ace62ff4ce68\") " pod="openshift-marketplace/marketplace-operator-79b997595-wr6q4" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.598214 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9c29734c-66bd-4ac3-b26a-b5349d786018-registry-certificates\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.600607 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9c29734c-66bd-4ac3-b26a-b5349d786018-installation-pull-secrets\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.620598 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/31ae7f30-0f40-4c17-ad54-33aeefcbeeb7-apiservice-cert\") pod \"packageserver-d55dfcdfc-5wl59\" (UID: \"31ae7f30-0f40-4c17-ad54-33aeefcbeeb7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5wl59" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.621218 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/8e3a5a3b-f908-4660-adf1-fe3bf70175f9-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-qjw5q\" (UID: \"8e3a5a3b-f908-4660-adf1-fe3bf70175f9\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qjw5q" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.622558 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398470-mm7rl" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.632035 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69p7m\" (UniqueName: \"kubernetes.io/projected/8e3a5a3b-f908-4660-adf1-fe3bf70175f9-kube-api-access-69p7m\") pod \"package-server-manager-789f6589d5-qjw5q\" (UID: \"8e3a5a3b-f908-4660-adf1-fe3bf70175f9\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qjw5q" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.635386 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-9rdbb" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.635576 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9c29734c-66bd-4ac3-b26a-b5349d786018-bound-sa-token\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.636295 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/6c4283fe-0fc3-4c3a-bc02-b552e0efa62f-srv-cert\") pod \"olm-operator-6b444d44fb-p4p66\" (UID: \"6c4283fe-0fc3-4c3a-bc02-b552e0efa62f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-p4p66" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.637049 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6ck44\" (UniqueName: \"kubernetes.io/projected/400aa1f3-7e05-407e-90dc-ad2e99f54e61-kube-api-access-6ck44\") pod \"control-plane-machine-set-operator-78cbb6b69f-tcs6j\" (UID: \"400aa1f3-7e05-407e-90dc-ad2e99f54e61\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tcs6j" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.638269 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzqtd\" (UniqueName: \"kubernetes.io/projected/9c29734c-66bd-4ac3-b26a-b5349d786018-kube-api-access-dzqtd\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.640378 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.641916 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqx9x\" (UniqueName: \"kubernetes.io/projected/6f5dcf1c-e463-4bd7-ad34-f8e2dbcd5aac-kube-api-access-fqx9x\") pod \"multus-admission-controller-857f4d67dd-rzj2p\" (UID: \"6f5dcf1c-e463-4bd7-ad34-f8e2dbcd5aac\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-rzj2p" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.642597 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/2fc00c67-c3ab-4b63-b068-3a41c027f6f3-csi-data-dir\") pod \"csi-hostpathplugin-jz58r\" (UID: \"2fc00c67-c3ab-4b63-b068-3a41c027f6f3\") " pod="hostpath-provisioner/csi-hostpathplugin-jz58r" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.642640 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-npcrt\" (UniqueName: \"kubernetes.io/projected/13b0f3d4-03aa-421b-9cb6-7fae67f80626-kube-api-access-npcrt\") pod \"ingress-canary-fkkng\" (UID: \"13b0f3d4-03aa-421b-9cb6-7fae67f80626\") " pod="openshift-ingress-canary/ingress-canary-fkkng" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.642697 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/2fc00c67-c3ab-4b63-b068-3a41c027f6f3-mountpoint-dir\") pod \"csi-hostpathplugin-jz58r\" (UID: \"2fc00c67-c3ab-4b63-b068-3a41c027f6f3\") " pod="hostpath-provisioner/csi-hostpathplugin-jz58r" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.642752 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/2fc00c67-c3ab-4b63-b068-3a41c027f6f3-plugins-dir\") pod \"csi-hostpathplugin-jz58r\" (UID: \"2fc00c67-c3ab-4b63-b068-3a41c027f6f3\") " pod="hostpath-provisioner/csi-hostpathplugin-jz58r" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.643142 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/2fc00c67-c3ab-4b63-b068-3a41c027f6f3-plugins-dir\") pod \"csi-hostpathplugin-jz58r\" (UID: \"2fc00c67-c3ab-4b63-b068-3a41c027f6f3\") " pod="hostpath-provisioner/csi-hostpathplugin-jz58r" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.643261 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/2fc00c67-c3ab-4b63-b068-3a41c027f6f3-csi-data-dir\") pod \"csi-hostpathplugin-jz58r\" (UID: \"2fc00c67-c3ab-4b63-b068-3a41c027f6f3\") " pod="hostpath-provisioner/csi-hostpathplugin-jz58r" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.643494 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/2fc00c67-c3ab-4b63-b068-3a41c027f6f3-mountpoint-dir\") pod \"csi-hostpathplugin-jz58r\" (UID: \"2fc00c67-c3ab-4b63-b068-3a41c027f6f3\") " pod="hostpath-provisioner/csi-hostpathplugin-jz58r" Nov 23 14:44:07 crc kubenswrapper[5050]: E1123 14:44:07.643969 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:08.143945203 +0000 UTC m=+143.310941868 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.694381 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-djc5v"] Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.706303 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmdkr\" (UniqueName: \"kubernetes.io/projected/10bf2f37-5f87-479d-a9b1-9e1bdf5b150a-kube-api-access-cmdkr\") pod \"catalog-operator-68c6474976-rgjfm\" (UID: \"10bf2f37-5f87-479d-a9b1-9e1bdf5b150a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rgjfm" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.707791 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d97c8\" (UniqueName: \"kubernetes.io/projected/31ae7f30-0f40-4c17-ad54-33aeefcbeeb7-kube-api-access-d97c8\") pod \"packageserver-d55dfcdfc-5wl59\" (UID: \"31ae7f30-0f40-4c17-ad54-33aeefcbeeb7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5wl59" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.731166 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/668e6166-ca3b-46c8-bdb4-b3b9e46f2636-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-wtbfr\" (UID: \"668e6166-ca3b-46c8-bdb4-b3b9e46f2636\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wtbfr" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.744248 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/2fc00c67-c3ab-4b63-b068-3a41c027f6f3-socket-dir\") pod \"csi-hostpathplugin-jz58r\" (UID: \"2fc00c67-c3ab-4b63-b068-3a41c027f6f3\") " pod="hostpath-provisioner/csi-hostpathplugin-jz58r" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.744297 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/2fc00c67-c3ab-4b63-b068-3a41c027f6f3-registration-dir\") pod \"csi-hostpathplugin-jz58r\" (UID: \"2fc00c67-c3ab-4b63-b068-3a41c027f6f3\") " pod="hostpath-provisioner/csi-hostpathplugin-jz58r" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.744333 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/13b0f3d4-03aa-421b-9cb6-7fae67f80626-cert\") pod \"ingress-canary-fkkng\" (UID: \"13b0f3d4-03aa-421b-9cb6-7fae67f80626\") " pod="openshift-ingress-canary/ingress-canary-fkkng" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.744409 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.744506 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d6p6q\" (UniqueName: \"kubernetes.io/projected/2fc00c67-c3ab-4b63-b068-3a41c027f6f3-kube-api-access-d6p6q\") pod \"csi-hostpathplugin-jz58r\" (UID: \"2fc00c67-c3ab-4b63-b068-3a41c027f6f3\") " pod="hostpath-provisioner/csi-hostpathplugin-jz58r" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.744532 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bzgv\" (UniqueName: \"kubernetes.io/projected/7defdad8-6ff2-452d-a144-e701d7b3baf6-kube-api-access-8bzgv\") pod \"machine-config-server-tq999\" (UID: \"7defdad8-6ff2-452d-a144-e701d7b3baf6\") " pod="openshift-machine-config-operator/machine-config-server-tq999" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.744563 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/7defdad8-6ff2-452d-a144-e701d7b3baf6-node-bootstrap-token\") pod \"machine-config-server-tq999\" (UID: \"7defdad8-6ff2-452d-a144-e701d7b3baf6\") " pod="openshift-machine-config-operator/machine-config-server-tq999" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.744667 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/7defdad8-6ff2-452d-a144-e701d7b3baf6-certs\") pod \"machine-config-server-tq999\" (UID: \"7defdad8-6ff2-452d-a144-e701d7b3baf6\") " pod="openshift-machine-config-operator/machine-config-server-tq999" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.745376 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-65d8w\" (UniqueName: \"kubernetes.io/projected/6c4283fe-0fc3-4c3a-bc02-b552e0efa62f-kube-api-access-65d8w\") pod \"olm-operator-6b444d44fb-p4p66\" (UID: \"6c4283fe-0fc3-4c3a-bc02-b552e0efa62f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-p4p66" Nov 23 14:44:07 crc kubenswrapper[5050]: E1123 14:44:07.745626 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:08.245607108 +0000 UTC m=+143.412603763 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.745797 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/2fc00c67-c3ab-4b63-b068-3a41c027f6f3-registration-dir\") pod \"csi-hostpathplugin-jz58r\" (UID: \"2fc00c67-c3ab-4b63-b068-3a41c027f6f3\") " pod="hostpath-provisioner/csi-hostpathplugin-jz58r" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.748897 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/2fc00c67-c3ab-4b63-b068-3a41c027f6f3-socket-dir\") pod \"csi-hostpathplugin-jz58r\" (UID: \"2fc00c67-c3ab-4b63-b068-3a41c027f6f3\") " pod="hostpath-provisioner/csi-hostpathplugin-jz58r" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.751396 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/7defdad8-6ff2-452d-a144-e701d7b3baf6-certs\") pod \"machine-config-server-tq999\" (UID: \"7defdad8-6ff2-452d-a144-e701d7b3baf6\") " pod="openshift-machine-config-operator/machine-config-server-tq999" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.753955 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/13b0f3d4-03aa-421b-9cb6-7fae67f80626-cert\") pod \"ingress-canary-fkkng\" (UID: \"13b0f3d4-03aa-421b-9cb6-7fae67f80626\") " pod="openshift-ingress-canary/ingress-canary-fkkng" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.755929 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/7defdad8-6ff2-452d-a144-e701d7b3baf6-node-bootstrap-token\") pod \"machine-config-server-tq999\" (UID: \"7defdad8-6ff2-452d-a144-e701d7b3baf6\") " pod="openshift-machine-config-operator/machine-config-server-tq999" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.769262 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-plmlz\" (UniqueName: \"kubernetes.io/projected/766b2181-d531-40cf-8d30-5ab0190d9da3-kube-api-access-plmlz\") pod \"service-ca-9c57cc56f-2kj76\" (UID: \"766b2181-d531-40cf-8d30-5ab0190d9da3\") " pod="openshift-service-ca/service-ca-9c57cc56f-2kj76" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.772565 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-xwq4h"] Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.774930 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-n9kmn"] Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.785192 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9qvz7\" (UniqueName: \"kubernetes.io/projected/d4ac164e-eff3-47b2-8cfe-05acd0b7c1e8-kube-api-access-9qvz7\") pod \"cluster-samples-operator-665b6dd947-fcc5l\" (UID: \"d4ac164e-eff3-47b2-8cfe-05acd0b7c1e8\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fcc5l" Nov 23 14:44:07 crc kubenswrapper[5050]: W1123 14:44:07.786664 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podae2bdaf0_ab3a_4822_b376_945d76181e8f.slice/crio-261266f72bf10a5e3b278739489c4ede328cf88c41677877475d3e585c30a5d3 WatchSource:0}: Error finding container 261266f72bf10a5e3b278739489c4ede328cf88c41677877475d3e585c30a5d3: Status 404 returned error can't find the container with id 261266f72bf10a5e3b278739489c4ede328cf88c41677877475d3e585c30a5d3 Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.792871 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-rzj2p" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.810579 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xw44v\" (UniqueName: \"kubernetes.io/projected/fef4a22f-705c-4dda-8c74-ace62ff4ce68-kube-api-access-xw44v\") pod \"marketplace-operator-79b997595-wr6q4\" (UID: \"fef4a22f-705c-4dda-8c74-ace62ff4ce68\") " pod="openshift-marketplace/marketplace-operator-79b997595-wr6q4" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.813855 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wtbfr" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.826373 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ppd8g\" (UniqueName: \"kubernetes.io/projected/e794c1d4-b3b8-48cc-97ad-75861e8021e6-kube-api-access-ppd8g\") pod \"machine-config-controller-84d6567774-n5v2l\" (UID: \"e794c1d4-b3b8-48cc-97ad-75861e8021e6\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-n5v2l" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.844337 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-npcrt\" (UniqueName: \"kubernetes.io/projected/13b0f3d4-03aa-421b-9cb6-7fae67f80626-kube-api-access-npcrt\") pod \"ingress-canary-fkkng\" (UID: \"13b0f3d4-03aa-421b-9cb6-7fae67f80626\") " pod="openshift-ingress-canary/ingress-canary-fkkng" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.845901 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rgjfm" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.846306 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:07 crc kubenswrapper[5050]: E1123 14:44:07.846845 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:08.346829151 +0000 UTC m=+143.513825636 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.853193 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-p4p66" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.862837 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5wl59" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.863396 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-96dfp"] Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.871287 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fcc5l" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.876876 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tcs6j" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.885020 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qjw5q" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.890582 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bzgv\" (UniqueName: \"kubernetes.io/projected/7defdad8-6ff2-452d-a144-e701d7b3baf6-kube-api-access-8bzgv\") pod \"machine-config-server-tq999\" (UID: \"7defdad8-6ff2-452d-a144-e701d7b3baf6\") " pod="openshift-machine-config-operator/machine-config-server-tq999" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.891403 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-2kj76" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.897003 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-wr6q4" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.917567 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d6p6q\" (UniqueName: \"kubernetes.io/projected/2fc00c67-c3ab-4b63-b068-3a41c027f6f3-kube-api-access-d6p6q\") pod \"csi-hostpathplugin-jz58r\" (UID: \"2fc00c67-c3ab-4b63-b068-3a41c027f6f3\") " pod="hostpath-provisioner/csi-hostpathplugin-jz58r" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.927599 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj"] Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.927604 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-fkkng" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.934647 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-tq999" Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.949034 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:07 crc kubenswrapper[5050]: E1123 14:44:07.949489 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:08.449471463 +0000 UTC m=+143.616467948 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:07 crc kubenswrapper[5050]: I1123 14:44:07.954115 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-jz58r" Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.034700 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-wdqwt"] Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.041589 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ngz59"] Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.052875 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:08 crc kubenswrapper[5050]: E1123 14:44:08.053265 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:08.553246237 +0000 UTC m=+143.720242722 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.127950 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-n5v2l" Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.145459 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-svd5r"] Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.153781 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-jzg9r"] Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.157741 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:08 crc kubenswrapper[5050]: E1123 14:44:08.158134 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:08.658119422 +0000 UTC m=+143.825115907 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.206853 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dzz98"] Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.236829 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398470-mm7rl"] Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.259185 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:08 crc kubenswrapper[5050]: E1123 14:44:08.259632 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:08.759607853 +0000 UTC m=+143.926604338 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:08 crc kubenswrapper[5050]: W1123 14:44:08.280169 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6b5f71c7_bb75_435e_ac44_3328aa8fe73d.slice/crio-68bae89babd133a326ee42961c189c407cae3398eeae4a96bb83d6be588e37b9 WatchSource:0}: Error finding container 68bae89babd133a326ee42961c189c407cae3398eeae4a96bb83d6be588e37b9: Status 404 returned error can't find the container with id 68bae89babd133a326ee42961c189c407cae3398eeae4a96bb83d6be588e37b9 Nov 23 14:44:08 crc kubenswrapper[5050]: W1123 14:44:08.292828 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podab27cc8e_1d04_43dd_bf34_0cfdb48daba7.slice/crio-b2975a3133e86cc7798cb39cf637183a7b9b38947b9bf9f9c9a9e9f6f39c5f37 WatchSource:0}: Error finding container b2975a3133e86cc7798cb39cf637183a7b9b38947b9bf9f9c9a9e9f6f39c5f37: Status 404 returned error can't find the container with id b2975a3133e86cc7798cb39cf637183a7b9b38947b9bf9f9c9a9e9f6f39c5f37 Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.370854 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-9rdbb"] Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.371211 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:08 crc kubenswrapper[5050]: E1123 14:44:08.371877 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:08.871861244 +0000 UTC m=+144.038857729 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.474224 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:08 crc kubenswrapper[5050]: E1123 14:44:08.475756 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:08.975726601 +0000 UTC m=+144.142723086 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.550130 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-2g8pj" event={"ID":"05b57f72-3ec2-41f4-ba15-4bb73c15a1af","Type":"ContainerStarted","Data":"5f6ff113c49a50d8cda784c917c6457422247f79078bebdc81169a55a303dd61"} Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.558046 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jzg9r" event={"ID":"4c1df8cb-7a6f-46da-9d4e-a1b88f85997e","Type":"ContainerStarted","Data":"fb94c7da4db2869166b905315da38b1754788611b123ce3e924c3184019928af"} Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.558116 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-djc5v" event={"ID":"ae2bdaf0-ab3a-4822-b376-945d76181e8f","Type":"ContainerStarted","Data":"261266f72bf10a5e3b278739489c4ede328cf88c41677877475d3e585c30a5d3"} Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.558134 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-gvb4q" event={"ID":"16e600fc-0484-4c1c-8800-bb3958374bad","Type":"ContainerStarted","Data":"9cc67052d0fabe6f2da4dca233f55551f72c43ea6f2803f8363a79d93bb0f1c8"} Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.558165 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-gvb4q" Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.558869 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-tq999" event={"ID":"7defdad8-6ff2-452d-a144-e701d7b3baf6","Type":"ContainerStarted","Data":"1622717320d06ad999668a1ebc0f5db6bb33cba420adcce0d91882790be2ea21"} Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.561172 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ngz59" event={"ID":"a9150bbc-0207-48f1-a027-d33d285b085b","Type":"ContainerStarted","Data":"b167cd495f9bd01c5398b6159c65de7ce2a5f6aadf36edf653a6653d3a5e36d2"} Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.562546 5050 patch_prober.go:28] interesting pod/downloads-7954f5f757-gvb4q container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" start-of-body= Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.562582 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-gvb4q" podUID="16e600fc-0484-4c1c-8800-bb3958374bad" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.577656 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:08 crc kubenswrapper[5050]: E1123 14:44:08.578083 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:09.078068795 +0000 UTC m=+144.245065280 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.580897 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj" event={"ID":"650d8b46-fa24-4b73-8f95-a0fbfe331fa6","Type":"ContainerStarted","Data":"71fffe767fce8cc5b95ed1cb1005b398b5efc8340a6674f2e1c7d8e046cf6dee"} Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.598685 5050 generic.go:334] "Generic (PLEG): container finished" podID="74ee62a2-d057-4ddf-8e5f-619cd90645d0" containerID="5f97d5ad0b62fdcb8dfd4d8dbd4bab3b5faaef8975b00bce51654100e30c986d" exitCode=0 Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.598801 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" event={"ID":"74ee62a2-d057-4ddf-8e5f-619cd90645d0","Type":"ContainerDied","Data":"5f97d5ad0b62fdcb8dfd4d8dbd4bab3b5faaef8975b00bce51654100e30c986d"} Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.632934 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-b646l" event={"ID":"629f02dd-753b-46d2-b808-812468f6c9c5","Type":"ContainerStarted","Data":"d7721e29652a6ded78d37b60fc87d9b4c0e4475020c9f0aea6a917080b70c986"} Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.640374 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-svd5r" event={"ID":"005c682b-725d-471a-98ca-08f2760e6603","Type":"ContainerStarted","Data":"ac406f558595002aece9a28298358eead80b6afbec9e5c03b8658f221103fa2d"} Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.655293 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-bglcd" event={"ID":"0d7bc9ed-da92-4305-bda8-78966e7451db","Type":"ContainerStarted","Data":"6966994e7c0254fbed8ed904971b31a272f93ca7a5d2a459570db1f15a7583ce"} Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.659942 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xwq4h" event={"ID":"6e9d92c3-4f51-481c-adcb-e48f3e671025","Type":"ContainerStarted","Data":"271be45603076f5014419717c3cc9eda5db98768ba25da023ff5597dcf8bdbf5"} Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.668239 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-h2nqh" event={"ID":"fe0bb0ba-8376-4437-9cec-fe95bbd77a9a","Type":"ContainerStarted","Data":"4c9cdda64027076585ada3adcb7189acc704d11b7994922f48d1bd4df6cf77a2"} Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.669417 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-h2nqh" Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.671146 5050 patch_prober.go:28] interesting pod/console-operator-58897d9998-h2nqh container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.17:8443/readyz\": dial tcp 10.217.0.17:8443: connect: connection refused" start-of-body= Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.671194 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-h2nqh" podUID="fe0bb0ba-8376-4437-9cec-fe95bbd77a9a" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.17:8443/readyz\": dial tcp 10.217.0.17:8443: connect: connection refused" Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.675425 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-46rrm" podStartSLOduration=122.675411769 podStartE2EDuration="2m2.675411769s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:08.67436227 +0000 UTC m=+143.841358755" watchObservedRunningTime="2025-11-23 14:44:08.675411769 +0000 UTC m=+143.842408244" Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.680320 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:08 crc kubenswrapper[5050]: E1123 14:44:08.681193 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:09.18116358 +0000 UTC m=+144.348160065 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.686483 5050 generic.go:334] "Generic (PLEG): container finished" podID="0a4f6fb7-314a-4896-bcba-c11f622a5e3f" containerID="1f52ce0161a815f286a3ff17ed25a613f3717e74392c487c7392d55cc58bfa56" exitCode=0 Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.686750 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-ph6h8" event={"ID":"0a4f6fb7-314a-4896-bcba-c11f622a5e3f","Type":"ContainerDied","Data":"1f52ce0161a815f286a3ff17ed25a613f3717e74392c487c7392d55cc58bfa56"} Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.686792 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-ph6h8" event={"ID":"0a4f6fb7-314a-4896-bcba-c11f622a5e3f","Type":"ContainerStarted","Data":"3ae7146ee0236a6ea46dee90758a64d530ee1341f7004f14b940dbfb3b11f3ac"} Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.694717 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-j5zvd" event={"ID":"e118c084-6256-42a4-ad24-f06222a1b3b4","Type":"ContainerStarted","Data":"8406ee69408e5a4e0cf0817144ab664e7aff06e7b5ead361ff0d3e40fccb7625"} Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.703245 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ngl6b" event={"ID":"b1e1a221-b849-45e1-ad2c-d0b3b3eeccb5","Type":"ContainerStarted","Data":"db55523a5a6b96fddb9cd591a53c4d2a362c69aac5621f8c7da7f3b09d8b1866"} Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.705438 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-cg8k9" event={"ID":"2a07c9a0-b842-4f33-83a4-d6a07eeed391","Type":"ContainerStarted","Data":"ec75dd5e859c542aaebaafb718b8966f89f35f487ef7d9f5fceeb7457d4dd190"} Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.706391 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-96dfp" event={"ID":"10e4194f-eb7d-4228-8cf8-4925e4a0ec4f","Type":"ContainerStarted","Data":"63fc1b5f730a3b62a83958b1b8278b0e62e283f8cbd335207a2b3755c9ee5fda"} Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.708220 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-n9kmn" event={"ID":"f568ca0b-0692-482b-a80b-7c6c4f8f80ba","Type":"ContainerStarted","Data":"85d9ed1d5012227434eebbff20d0ada2fc84b3e55064cf30011a8b282dcfecd2"} Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.713817 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.716357 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dzz98" event={"ID":"6b5f71c7-bb75-435e-ac44-3328aa8fe73d","Type":"ContainerStarted","Data":"68bae89babd133a326ee42961c189c407cae3398eeae4a96bb83d6be588e37b9"} Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.717304 5050 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-cvknj container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.10:6443/healthz\": dial tcp 10.217.0.10:6443: connect: connection refused" start-of-body= Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.717341 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" podUID="5c3f6b64-15bf-4147-be4e-414a2569cb58" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.10:6443/healthz\": dial tcp 10.217.0.10:6443: connect: connection refused" Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.717714 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398470-mm7rl" event={"ID":"ab27cc8e-1d04-43dd-bf34-0cfdb48daba7","Type":"ContainerStarted","Data":"b2975a3133e86cc7798cb39cf637183a7b9b38947b9bf9f9c9a9e9f6f39c5f37"} Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.718977 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-zfnk9" event={"ID":"a934d47f-f19d-4d29-aed8-20141e5bcf2b","Type":"ContainerStarted","Data":"dda28eb42280062700c84311c30cc0f53094d22ff3bd5829a531e406000cab94"} Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.721203 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-wdqwt" event={"ID":"1af9e06d-2732-4be4-bff7-eee682c1d69d","Type":"ContainerStarted","Data":"6422e6fb95d2ffb54ddef2444c1a9cea077132d91aac91f9636e2d22881f223d"} Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.725237 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" event={"ID":"72f33e56-9fe4-420b-aa31-8d49ed5c7584","Type":"ContainerStarted","Data":"b1ea10a32fe287bc765ea5e2ebeddac902aee6e6f6c57e856ee5909dd2806cef"} Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.725283 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" event={"ID":"72f33e56-9fe4-420b-aa31-8d49ed5c7584","Type":"ContainerStarted","Data":"431fa29464358052ab2819f58db34cc71c30bcc46442a378e0821673f50b5e4b"} Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.725508 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.727365 5050 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-2bsvx container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.727410 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" podUID="72f33e56-9fe4-420b-aa31-8d49ed5c7584" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.794695 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:08 crc kubenswrapper[5050]: E1123 14:44:08.800536 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:09.300517361 +0000 UTC m=+144.467513846 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.817261 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hlskc" podStartSLOduration=123.817234168 podStartE2EDuration="2m3.817234168s" podCreationTimestamp="2025-11-23 14:42:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:08.785291475 +0000 UTC m=+143.952287960" watchObservedRunningTime="2025-11-23 14:44:08.817234168 +0000 UTC m=+143.984230653" Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.819420 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-rzj2p"] Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.835549 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wr6q4"] Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.896005 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:08 crc kubenswrapper[5050]: E1123 14:44:08.896912 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:09.396885958 +0000 UTC m=+144.563882433 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.952365 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fcc5l"] Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.956138 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qjw5q"] Nov 23 14:44:08 crc kubenswrapper[5050]: I1123 14:44:08.998835 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:09 crc kubenswrapper[5050]: E1123 14:44:09.001855 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:09.501831915 +0000 UTC m=+144.668828400 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.100007 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:09 crc kubenswrapper[5050]: E1123 14:44:09.101049 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:09.601027321 +0000 UTC m=+144.768023806 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.205612 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:09 crc kubenswrapper[5050]: E1123 14:44:09.206139 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:09.706124052 +0000 UTC m=+144.873120537 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.263371 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rgjfm"] Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.266639 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-p4p66"] Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.306825 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:09 crc kubenswrapper[5050]: E1123 14:44:09.307308 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:09.807287973 +0000 UTC m=+144.974284458 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.382583 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-gvb4q" podStartSLOduration=123.38256089 podStartE2EDuration="2m3.38256089s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:09.380311257 +0000 UTC m=+144.547307742" watchObservedRunningTime="2025-11-23 14:44:09.38256089 +0000 UTC m=+144.549557375" Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.409371 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:09 crc kubenswrapper[5050]: E1123 14:44:09.409895 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:09.909870064 +0000 UTC m=+145.076866549 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.426305 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-cg8k9" podStartSLOduration=123.426280093 podStartE2EDuration="2m3.426280093s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:09.424506384 +0000 UTC m=+144.591502869" watchObservedRunningTime="2025-11-23 14:44:09.426280093 +0000 UTC m=+144.593276578" Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.426862 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wtbfr"] Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.440092 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tcs6j"] Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.465127 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-j5zvd" podStartSLOduration=123.46510779 podStartE2EDuration="2m3.46510779s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:09.462165318 +0000 UTC m=+144.629161813" watchObservedRunningTime="2025-11-23 14:44:09.46510779 +0000 UTC m=+144.632104275" Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.505130 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" podStartSLOduration=123.505104949 podStartE2EDuration="2m3.505104949s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:09.504169263 +0000 UTC m=+144.671165758" watchObservedRunningTime="2025-11-23 14:44:09.505104949 +0000 UTC m=+144.672101434" Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.514042 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:09 crc kubenswrapper[5050]: E1123 14:44:09.514161 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:10.014121662 +0000 UTC m=+145.181118147 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.536755 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:09 crc kubenswrapper[5050]: E1123 14:44:09.537954 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:10.037927468 +0000 UTC m=+145.204923953 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.603940 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" podStartSLOduration=123.603917275 podStartE2EDuration="2m3.603917275s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:09.573830893 +0000 UTC m=+144.740827398" watchObservedRunningTime="2025-11-23 14:44:09.603917275 +0000 UTC m=+144.770913760" Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.634021 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-h2nqh" podStartSLOduration=123.633996197 podStartE2EDuration="2m3.633996197s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:09.625935891 +0000 UTC m=+144.792932376" watchObservedRunningTime="2025-11-23 14:44:09.633996197 +0000 UTC m=+144.800992682" Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.638384 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:09 crc kubenswrapper[5050]: E1123 14:44:09.639156 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:10.1391276 +0000 UTC m=+145.306124115 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.658366 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-2kj76"] Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.658809 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-n5v2l"] Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.740573 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-9rdbb" event={"ID":"eb9622cf-2c2f-4ed1-8fed-da0fc67be49a","Type":"ContainerStarted","Data":"b8b6d301331b204179f1b62145211355befdc7220cfb6664b23d535535d70a45"} Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.740626 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-9rdbb" event={"ID":"eb9622cf-2c2f-4ed1-8fed-da0fc67be49a","Type":"ContainerStarted","Data":"a96be6753aeb65923b9b170ebe9e63a7cecdc6c7b8738de7f0e3b54ccdf600c3"} Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.741867 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:09 crc kubenswrapper[5050]: E1123 14:44:09.742339 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:10.242321648 +0000 UTC m=+145.409318133 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.745583 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fcc5l" event={"ID":"d4ac164e-eff3-47b2-8cfe-05acd0b7c1e8","Type":"ContainerStarted","Data":"3e310756998c0b13d2d1dd368efda8a4fbc1e413d47acb9a13d683b94fc34e9a"} Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.766304 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" event={"ID":"5c3f6b64-15bf-4147-be4e-414a2569cb58","Type":"ContainerStarted","Data":"7330fc53de879bb0854e2a368af2a31c33bd1bbda86c7c2442129bc4bbdd39ee"} Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.770041 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-p4p66" event={"ID":"6c4283fe-0fc3-4c3a-bc02-b552e0efa62f","Type":"ContainerStarted","Data":"159b86ffda5004bb7aeb61a6bca665a3a4361e20f36f484ba363cb4d48c37164"} Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.784161 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-h2nqh" event={"ID":"fe0bb0ba-8376-4437-9cec-fe95bbd77a9a","Type":"ContainerStarted","Data":"4526cdd0933f85832372a319f6d7eba2414ab8c2a6a1b660d66cb3f5415effff"} Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.785004 5050 patch_prober.go:28] interesting pod/console-operator-58897d9998-h2nqh container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.17:8443/readyz\": dial tcp 10.217.0.17:8443: connect: connection refused" start-of-body= Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.785080 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-h2nqh" podUID="fe0bb0ba-8376-4437-9cec-fe95bbd77a9a" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.17:8443/readyz\": dial tcp 10.217.0.17:8443: connect: connection refused" Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.791196 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-rzj2p" event={"ID":"6f5dcf1c-e463-4bd7-ad34-f8e2dbcd5aac","Type":"ContainerStarted","Data":"7095a37aa84f88bdf5d3e7d7e972c842b444e1a93bf31805646da835d242867f"} Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.797171 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-fkkng"] Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.807405 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-96dfp" event={"ID":"10e4194f-eb7d-4228-8cf8-4925e4a0ec4f","Type":"ContainerStarted","Data":"39e00ebe82c2a7fd5548597cfb4b674d7f0fffa7c4dd8fbad15d7b2a1d956521"} Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.853586 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:09 crc kubenswrapper[5050]: E1123 14:44:09.857402 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:10.357363938 +0000 UTC m=+145.524360423 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.857652 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tcs6j" event={"ID":"400aa1f3-7e05-407e-90dc-ad2e99f54e61","Type":"ContainerStarted","Data":"6bdc7dcf44e5d4394ec5aca7ed1b2f188b8a5673ac02d023996cc584d0a8a924"} Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.867342 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj" event={"ID":"650d8b46-fa24-4b73-8f95-a0fbfe331fa6","Type":"ContainerStarted","Data":"b239ecf0ee6b03c29ce08ce1a2494318f08e38a22392ec0cc3486552d4249b39"} Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.867772 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj" Nov 23 14:44:09 crc kubenswrapper[5050]: W1123 14:44:09.869181 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod13b0f3d4_03aa_421b_9cb6_7fae67f80626.slice/crio-1fd9451460d3fedec80f25ab551d80268c5b8941044a7cf1bb3f7486ed3db3e0 WatchSource:0}: Error finding container 1fd9451460d3fedec80f25ab551d80268c5b8941044a7cf1bb3f7486ed3db3e0: Status 404 returned error can't find the container with id 1fd9451460d3fedec80f25ab551d80268c5b8941044a7cf1bb3f7486ed3db3e0 Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.874187 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wtbfr" event={"ID":"668e6166-ca3b-46c8-bdb4-b3b9e46f2636","Type":"ContainerStarted","Data":"9132725e9b0aa876b3b2ee2f4a18928ff19f32a32bfd78484b6da979e0a58133"} Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.883990 5050 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-b9jzj container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.14:8443/healthz\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.884044 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj" podUID="650d8b46-fa24-4b73-8f95-a0fbfe331fa6" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.14:8443/healthz\": dial tcp 10.217.0.14:8443: connect: connection refused" Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.884766 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5wl59"] Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.926348 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-9rdbb" podStartSLOduration=123.926327138 podStartE2EDuration="2m3.926327138s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:09.922163471 +0000 UTC m=+145.089159956" watchObservedRunningTime="2025-11-23 14:44:09.926327138 +0000 UTC m=+145.093323623" Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.945318 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-djc5v" event={"ID":"ae2bdaf0-ab3a-4822-b376-945d76181e8f","Type":"ContainerStarted","Data":"13147524af88a24d2447d922687193bd1a1aca4ace7df7e3f152647d0da6ff75"} Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.957163 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:09 crc kubenswrapper[5050]: E1123 14:44:09.963113 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:10.463086697 +0000 UTC m=+145.630083182 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.977541 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dzz98" event={"ID":"6b5f71c7-bb75-435e-ac44-3328aa8fe73d","Type":"ContainerStarted","Data":"ed2ed1e60cd19590ada90c1a1e9e79ef50a1f52b2214fa70b700e2ae2074e55b"} Nov 23 14:44:09 crc kubenswrapper[5050]: I1123 14:44:09.983717 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rgjfm" event={"ID":"10bf2f37-5f87-479d-a9b1-9e1bdf5b150a","Type":"ContainerStarted","Data":"725ee7b28af9b8008407985b435d34aa5742548b6c5351ceb6848231c88574c0"} Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.014836 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-96dfp" podStartSLOduration=124.014804064 podStartE2EDuration="2m4.014804064s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:10.008535759 +0000 UTC m=+145.175532244" watchObservedRunningTime="2025-11-23 14:44:10.014804064 +0000 UTC m=+145.181800549" Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.016219 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj" podStartSLOduration=124.016211553 podStartE2EDuration="2m4.016211553s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:09.972249573 +0000 UTC m=+145.139246078" watchObservedRunningTime="2025-11-23 14:44:10.016211553 +0000 UTC m=+145.183208038" Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.022266 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-wr6q4" event={"ID":"fef4a22f-705c-4dda-8c74-ace62ff4ce68","Type":"ContainerStarted","Data":"8f1bfae0b30c257aa8fcbbb6436d37e468068c0ba499a4f361c59a19dbc1feaf"} Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.057604 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-b646l" event={"ID":"629f02dd-753b-46d2-b808-812468f6c9c5","Type":"ContainerStarted","Data":"d6e5465c9bce3be906df974e1359ec5addf250e00288f9197a7a3b2fcde806bd"} Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.059177 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:10 crc kubenswrapper[5050]: E1123 14:44:10.059596 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:10.559557476 +0000 UTC m=+145.726553971 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.061082 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:10 crc kubenswrapper[5050]: E1123 14:44:10.061945 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:10.561917852 +0000 UTC m=+145.728914507 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.092893 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-djc5v" podStartSLOduration=124.092861198 podStartE2EDuration="2m4.092861198s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:10.064815324 +0000 UTC m=+145.231811809" watchObservedRunningTime="2025-11-23 14:44:10.092861198 +0000 UTC m=+145.259857683" Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.110160 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-2g8pj" event={"ID":"05b57f72-3ec2-41f4-ba15-4bb73c15a1af","Type":"ContainerStarted","Data":"7cf295c9258ae8d2d21d33e3f9a1a8b7dbf0af126da6ad81d25aa2d4696c92f3"} Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.123212 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-n9kmn" event={"ID":"f568ca0b-0692-482b-a80b-7c6c4f8f80ba","Type":"ContainerStarted","Data":"4aad0e84f2fac43990ae249ff41ad8bd1e41c6bf2d8de3ef3e492681382f2f0c"} Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.125486 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qjw5q" event={"ID":"8e3a5a3b-f908-4660-adf1-fe3bf70175f9","Type":"ContainerStarted","Data":"49fedfd2fd54e75539b46c1fa46d26cc745139f4591c4bf26cac77a87b86e301"} Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.132718 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dzz98" podStartSLOduration=124.132672463 podStartE2EDuration="2m4.132672463s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:10.103159347 +0000 UTC m=+145.270155822" watchObservedRunningTime="2025-11-23 14:44:10.132672463 +0000 UTC m=+145.299668948" Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.139064 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-bglcd" event={"ID":"0d7bc9ed-da92-4305-bda8-78966e7451db","Type":"ContainerStarted","Data":"1b50193a3e83f464dcb076dd1f85848c7e7c2e96f52d6cfcbbb3eb26d4fce583"} Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.145139 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xwq4h" event={"ID":"6e9d92c3-4f51-481c-adcb-e48f3e671025","Type":"ContainerStarted","Data":"303a9b70c11b25932b41813b1b68169ed3d9666eca91b392d3abeadd327a2a33"} Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.153911 5050 patch_prober.go:28] interesting pod/downloads-7954f5f757-gvb4q container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" start-of-body= Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.153991 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-gvb4q" podUID="16e600fc-0484-4c1c-8800-bb3958374bad" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.176887 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:10 crc kubenswrapper[5050]: E1123 14:44:10.177341 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:10.677303002 +0000 UTC m=+145.844299487 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.178018 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-jz58r"] Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.179140 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.180524 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" Nov 23 14:44:10 crc kubenswrapper[5050]: E1123 14:44:10.181508 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:10.681495329 +0000 UTC m=+145.848491804 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.200030 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-zfnk9" podStartSLOduration=124.199995257 podStartE2EDuration="2m4.199995257s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:10.156283113 +0000 UTC m=+145.323279598" watchObservedRunningTime="2025-11-23 14:44:10.199995257 +0000 UTC m=+145.366991752" Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.225929 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-b646l" podStartSLOduration=124.225884251 podStartE2EDuration="2m4.225884251s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:10.211235001 +0000 UTC m=+145.378231506" watchObservedRunningTime="2025-11-23 14:44:10.225884251 +0000 UTC m=+145.392880746" Nov 23 14:44:10 crc kubenswrapper[5050]: W1123 14:44:10.226787 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fc00c67_c3ab_4b63_b068_3a41c027f6f3.slice/crio-46f70a3e129409e7413eace62db6312ca60ace8432a7f5db32e7e9c86e16c5ff WatchSource:0}: Error finding container 46f70a3e129409e7413eace62db6312ca60ace8432a7f5db32e7e9c86e16c5ff: Status 404 returned error can't find the container with id 46f70a3e129409e7413eace62db6312ca60ace8432a7f5db32e7e9c86e16c5ff Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.285283 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-2g8pj" podStartSLOduration=124.285264553 podStartE2EDuration="2m4.285264553s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:10.284645126 +0000 UTC m=+145.451641611" watchObservedRunningTime="2025-11-23 14:44:10.285264553 +0000 UTC m=+145.452261038" Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.291761 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:10 crc kubenswrapper[5050]: E1123 14:44:10.291831 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:10.791812556 +0000 UTC m=+145.958809041 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.300834 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:10 crc kubenswrapper[5050]: E1123 14:44:10.309663 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:10.809601214 +0000 UTC m=+145.976597699 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.318781 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-bglcd" podStartSLOduration=124.31875158 podStartE2EDuration="2m4.31875158s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:10.314758409 +0000 UTC m=+145.481754904" watchObservedRunningTime="2025-11-23 14:44:10.31875158 +0000 UTC m=+145.485748065" Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.412412 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:10 crc kubenswrapper[5050]: E1123 14:44:10.412930 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:10.912893775 +0000 UTC m=+146.079890260 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.413123 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:10 crc kubenswrapper[5050]: E1123 14:44:10.416513 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:10.916487026 +0000 UTC m=+146.083483521 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.487979 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-2g8pj" Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.498312 5050 patch_prober.go:28] interesting pod/router-default-5444994796-2g8pj container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.498435 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2g8pj" podUID="05b57f72-3ec2-41f4-ba15-4bb73c15a1af" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.514117 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:10 crc kubenswrapper[5050]: E1123 14:44:10.514615 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:11.01459324 +0000 UTC m=+146.181589725 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.619547 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:10 crc kubenswrapper[5050]: E1123 14:44:10.620725 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:11.120689749 +0000 UTC m=+146.287686264 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.720818 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:10 crc kubenswrapper[5050]: E1123 14:44:10.721312 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:11.221275224 +0000 UTC m=+146.388271709 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.771580 5050 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-cvknj container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.10:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.771642 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" podUID="5c3f6b64-15bf-4147-be4e-414a2569cb58" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.10:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.823070 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:10 crc kubenswrapper[5050]: E1123 14:44:10.823882 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:11.323855545 +0000 UTC m=+146.490852030 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.928949 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:10 crc kubenswrapper[5050]: E1123 14:44:10.929167 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:11.429133192 +0000 UTC m=+146.596129667 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:10 crc kubenswrapper[5050]: I1123 14:44:10.929688 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:10 crc kubenswrapper[5050]: E1123 14:44:10.930085 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:11.430077368 +0000 UTC m=+146.597073853 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.031362 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:11 crc kubenswrapper[5050]: E1123 14:44:11.031799 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:11.531781254 +0000 UTC m=+146.698777739 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.133378 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:11 crc kubenswrapper[5050]: E1123 14:44:11.134064 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:11.634038226 +0000 UTC m=+146.801034711 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.154423 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-jz58r" event={"ID":"2fc00c67-c3ab-4b63-b068-3a41c027f6f3","Type":"ContainerStarted","Data":"46f70a3e129409e7413eace62db6312ca60ace8432a7f5db32e7e9c86e16c5ff"} Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.160233 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qjw5q" event={"ID":"8e3a5a3b-f908-4660-adf1-fe3bf70175f9","Type":"ContainerStarted","Data":"baef7c0a60dd127a544e77dd114dccc5421cfed4a1215f9277d61730836e1e9a"} Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.160294 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qjw5q" event={"ID":"8e3a5a3b-f908-4660-adf1-fe3bf70175f9","Type":"ContainerStarted","Data":"3f05bd3080abea6834b868553b48ee746596e28d8edc990717db3eee24568913"} Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.160421 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qjw5q" Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.166588 5050 generic.go:334] "Generic (PLEG): container finished" podID="005c682b-725d-471a-98ca-08f2760e6603" containerID="35037458e3f7857b93e622e1646a20de298d66dbaa3587762c4934ae1fa5a8c3" exitCode=0 Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.166946 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-svd5r" event={"ID":"005c682b-725d-471a-98ca-08f2760e6603","Type":"ContainerDied","Data":"35037458e3f7857b93e622e1646a20de298d66dbaa3587762c4934ae1fa5a8c3"} Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.174886 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-2kj76" event={"ID":"766b2181-d531-40cf-8d30-5ab0190d9da3","Type":"ContainerStarted","Data":"c1b0f67b235960c50ae52c6375c5d34f6ec5a6ae8c3e4284cc251b59e3a56bfc"} Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.174948 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-2kj76" event={"ID":"766b2181-d531-40cf-8d30-5ab0190d9da3","Type":"ContainerStarted","Data":"48a44238ea6a74fa17d648c766bb02e2c256b8b95bdaad6dbacaba78768055bb"} Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.178759 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-fkkng" event={"ID":"13b0f3d4-03aa-421b-9cb6-7fae67f80626","Type":"ContainerStarted","Data":"f17dc04bac468350bda96cae69927795bf4bfabda9e6378b21d2603e888819fb"} Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.178835 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-fkkng" event={"ID":"13b0f3d4-03aa-421b-9cb6-7fae67f80626","Type":"ContainerStarted","Data":"1fd9451460d3fedec80f25ab551d80268c5b8941044a7cf1bb3f7486ed3db3e0"} Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.193984 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wtbfr" event={"ID":"668e6166-ca3b-46c8-bdb4-b3b9e46f2636","Type":"ContainerStarted","Data":"2563bf57a9b8feda1c691d95478fe8958a91456bb3998cef83523447285e75a5"} Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.203709 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rgjfm" event={"ID":"10bf2f37-5f87-479d-a9b1-9e1bdf5b150a","Type":"ContainerStarted","Data":"825d065c3e158ff41a5a79c6bde7390d69a1e257431d1b8857b1984ecbec48d9"} Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.204234 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rgjfm" Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.212977 5050 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-rgjfm container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" start-of-body= Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.213054 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rgjfm" podUID="10bf2f37-5f87-479d-a9b1-9e1bdf5b150a" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.214256 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qjw5q" podStartSLOduration=125.21422972 podStartE2EDuration="2m5.21422972s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:11.213830409 +0000 UTC m=+146.380826894" watchObservedRunningTime="2025-11-23 14:44:11.21422972 +0000 UTC m=+146.381226205" Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.225313 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-p4p66" event={"ID":"6c4283fe-0fc3-4c3a-bc02-b552e0efa62f","Type":"ContainerStarted","Data":"3f48e36f66455aa405b8b879bc16f32bd1a3379a974e3aa8d8de939072285c16"} Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.226393 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-p4p66" Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.230054 5050 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-p4p66 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" start-of-body= Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.230124 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-p4p66" podUID="6c4283fe-0fc3-4c3a-bc02-b552e0efa62f" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.237554 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:11 crc kubenswrapper[5050]: E1123 14:44:11.238041 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:11.738023256 +0000 UTC m=+146.905019741 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.241272 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-ph6h8" event={"ID":"0a4f6fb7-314a-4896-bcba-c11f622a5e3f","Type":"ContainerStarted","Data":"4022007dece4687d41740cae9904c103b1dd8a43a1111ee646eab191ad92cd5c"} Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.241364 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-ph6h8" Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.282816 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fcc5l" event={"ID":"d4ac164e-eff3-47b2-8cfe-05acd0b7c1e8","Type":"ContainerStarted","Data":"fee2518361ced265923abf9302ab893a41dffd63a83eed77c6ad40d9b3b12ccb"} Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.283339 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fcc5l" event={"ID":"d4ac164e-eff3-47b2-8cfe-05acd0b7c1e8","Type":"ContainerStarted","Data":"74659db27b8c45e86ffadf98b5f08b2332b0c33aa660350b76c591f2326b90cc"} Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.314553 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ngz59" event={"ID":"a9150bbc-0207-48f1-a027-d33d285b085b","Type":"ContainerStarted","Data":"4002486f53f112f1bb337ce31bc3ada6c69c2bf5a980fe055540f715f6f5e6cd"} Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.332778 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ngl6b" event={"ID":"b1e1a221-b849-45e1-ad2c-d0b3b3eeccb5","Type":"ContainerStarted","Data":"699be24d0f66b5a770f9ff039b27b371948e1b993c4352055553d1a61e5f25e3"} Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.344039 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.346712 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-tq999" event={"ID":"7defdad8-6ff2-452d-a144-e701d7b3baf6","Type":"ContainerStarted","Data":"7da0c114d2504d6215dec2af73371a9a334766eed8498f05b5d29eaa783cf39c"} Nov 23 14:44:11 crc kubenswrapper[5050]: E1123 14:44:11.361493 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:11.849426364 +0000 UTC m=+147.016422849 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.367484 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rgjfm" podStartSLOduration=125.367437398 podStartE2EDuration="2m5.367437398s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:11.271924625 +0000 UTC m=+146.438921100" watchObservedRunningTime="2025-11-23 14:44:11.367437398 +0000 UTC m=+146.534433883" Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.379999 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-zfnk9" event={"ID":"a934d47f-f19d-4d29-aed8-20141e5bcf2b","Type":"ContainerStarted","Data":"2b449952748f9496ab56e8c0db58fabd77fd0a7b120efea48ca65a98df80eeb8"} Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.399529 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" event={"ID":"74ee62a2-d057-4ddf-8e5f-619cd90645d0","Type":"ContainerStarted","Data":"8ae2c43f0e889a1f1c822e901707bdcc67488ad23be26213f797b410bec8cd0c"} Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.417077 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-2kj76" podStartSLOduration=125.417054077 podStartE2EDuration="2m5.417054077s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:11.416152541 +0000 UTC m=+146.583149016" watchObservedRunningTime="2025-11-23 14:44:11.417054077 +0000 UTC m=+146.584050562" Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.430574 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-n5v2l" event={"ID":"e794c1d4-b3b8-48cc-97ad-75861e8021e6","Type":"ContainerStarted","Data":"4aa9e1cf8ca138ad5a9e495b3dfbee0efe64fad7d264121e5665bf1d25a1077e"} Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.430637 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-n5v2l" event={"ID":"e794c1d4-b3b8-48cc-97ad-75861e8021e6","Type":"ContainerStarted","Data":"a599614cc0cb7c3457949d7cd08c4113fb8f2f3d3de2f7d034ecafc7c164c499"} Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.430653 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-n5v2l" event={"ID":"e794c1d4-b3b8-48cc-97ad-75861e8021e6","Type":"ContainerStarted","Data":"e8ae6ff43b810ca9dce70246806430c07c4668aab41befaf8a3328253ed7ba99"} Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.446032 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:11 crc kubenswrapper[5050]: E1123 14:44:11.446279 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:11.946220263 +0000 UTC m=+147.113216748 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.446487 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:11 crc kubenswrapper[5050]: E1123 14:44:11.449084 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:11.949074763 +0000 UTC m=+147.116071248 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.456494 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-fkkng" podStartSLOduration=7.45647992 podStartE2EDuration="7.45647992s" podCreationTimestamp="2025-11-23 14:44:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:11.454177266 +0000 UTC m=+146.621173751" watchObservedRunningTime="2025-11-23 14:44:11.45647992 +0000 UTC m=+146.623476405" Nov 23 14:44:11 crc kubenswrapper[5050]: I1123 14:44:11.487710 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-wtbfr" podStartSLOduration=125.487691093 podStartE2EDuration="2m5.487691093s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:11.486394637 +0000 UTC m=+146.653391122" watchObservedRunningTime="2025-11-23 14:44:11.487691093 +0000 UTC m=+146.654687578" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.504027 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jzg9r" event={"ID":"4c1df8cb-7a6f-46da-9d4e-a1b88f85997e","Type":"ContainerStarted","Data":"29cd1db129f7330efe8d8c55e4f721ba2a550bdaa835407dfa8dd12a4c0911c2"} Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.504085 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jzg9r" event={"ID":"4c1df8cb-7a6f-46da-9d4e-a1b88f85997e","Type":"ContainerStarted","Data":"22c67d8d80274ae18421ff75c0fb8818e47d45f99cc6699ce5d5b57013daeffe"} Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.508778 5050 patch_prober.go:28] interesting pod/router-default-5444994796-2g8pj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 23 14:44:12 crc kubenswrapper[5050]: [-]has-synced failed: reason withheld Nov 23 14:44:12 crc kubenswrapper[5050]: [+]process-running ok Nov 23 14:44:12 crc kubenswrapper[5050]: healthz check failed Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.508835 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2g8pj" podUID="05b57f72-3ec2-41f4-ba15-4bb73c15a1af" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.509614 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-wr6q4" event={"ID":"fef4a22f-705c-4dda-8c74-ace62ff4ce68","Type":"ContainerStarted","Data":"03d5c8091255edd0e6893789b73cb6b86e8ba0b5d04219d4eca6e11ed0df292f"} Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.510889 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-wr6q4" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.513567 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398470-mm7rl" event={"ID":"ab27cc8e-1d04-43dd-bf34-0cfdb48daba7","Type":"ContainerStarted","Data":"7fbe45687412126d64567ea868effc5a3dab98a0df53a4f3b2c087fa38c6edb6"} Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.521546 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tcs6j" event={"ID":"400aa1f3-7e05-407e-90dc-ad2e99f54e61","Type":"ContainerStarted","Data":"95aa2539ce24b03f7daacb2b7ee8a33bf894ddd5ee1d9fd37ada0375a35d3595"} Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.535149 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xwq4h" event={"ID":"6e9d92c3-4f51-481c-adcb-e48f3e671025","Type":"ContainerStarted","Data":"03cb6bb427bfe26f16265395adaf42dfc436f4f4dbb0b9a38ff70035d45c2ce0"} Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.546817 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-p4p66" podStartSLOduration=125.546797038 podStartE2EDuration="2m5.546797038s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:11.543645129 +0000 UTC m=+146.710641614" watchObservedRunningTime="2025-11-23 14:44:11.546797038 +0000 UTC m=+146.713793523" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.548239 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:12 crc kubenswrapper[5050]: E1123 14:44:11.548844 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:12.048826054 +0000 UTC m=+147.215822539 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.568794 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-n9kmn" event={"ID":"f568ca0b-0692-482b-a80b-7c6c4f8f80ba","Type":"ContainerStarted","Data":"089959d4f56aec50cdc6a338ae49541ad2354cd2c824eb34529b6a2ed0587294"} Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.573558 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5wl59" event={"ID":"31ae7f30-0f40-4c17-ad54-33aeefcbeeb7","Type":"ContainerStarted","Data":"5eccfd3f63c47a4f77ba5f1772a52d8fe80f04f528d6aaefa33b477c84c869da"} Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.573593 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5wl59" event={"ID":"31ae7f30-0f40-4c17-ad54-33aeefcbeeb7","Type":"ContainerStarted","Data":"86bfdcbc276ca39ca8ef9368ce8ded8271764aec9d7a2bac03e5eac2218b87ee"} Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.574608 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5wl59" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.575138 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ngz59" podStartSLOduration=125.57511722 podStartE2EDuration="2m5.57511722s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:11.572831186 +0000 UTC m=+146.739827671" watchObservedRunningTime="2025-11-23 14:44:11.57511722 +0000 UTC m=+146.742113705" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.575998 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-wdqwt" event={"ID":"1af9e06d-2732-4be4-bff7-eee682c1d69d","Type":"ContainerStarted","Data":"510292e643f9a21b20912311ebba363d517634fa1b4b9bf7a0270b7a32c57a6f"} Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.576020 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-wdqwt" event={"ID":"1af9e06d-2732-4be4-bff7-eee682c1d69d","Type":"ContainerStarted","Data":"82b2e932d033f1ad22ad0f07f9fe7359200a3ed7a98fce0201a15812f17e8d6e"} Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.576397 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-wdqwt" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.581392 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-rzj2p" event={"ID":"6f5dcf1c-e463-4bd7-ad34-f8e2dbcd5aac","Type":"ContainerStarted","Data":"7b602ec66f7eee891dcf437086cb3881548b836eddb1de87b8743da267fa9463"} Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.581458 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-rzj2p" event={"ID":"6f5dcf1c-e463-4bd7-ad34-f8e2dbcd5aac","Type":"ContainerStarted","Data":"6d7a9429c8195c822ebd2d4a5156b8a4b52aad44e5fce32d309727e8bf30ce00"} Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.590752 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.643519 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fcc5l" podStartSLOduration=125.643492684 podStartE2EDuration="2m5.643492684s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:11.605015667 +0000 UTC m=+146.772012142" watchObservedRunningTime="2025-11-23 14:44:11.643492684 +0000 UTC m=+146.810489169" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.644883 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ngl6b" podStartSLOduration=125.644874132 podStartE2EDuration="2m5.644874132s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:11.64480137 +0000 UTC m=+146.811797855" watchObservedRunningTime="2025-11-23 14:44:11.644874132 +0000 UTC m=+146.811870617" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.652521 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:12 crc kubenswrapper[5050]: E1123 14:44:11.653717 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:12.153694969 +0000 UTC m=+147.320691454 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.666856 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-n5v2l" podStartSLOduration=125.666834497 podStartE2EDuration="2m5.666834497s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:11.665080718 +0000 UTC m=+146.832077203" watchObservedRunningTime="2025-11-23 14:44:11.666834497 +0000 UTC m=+146.833830972" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.683740 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-ph6h8" podStartSLOduration=125.68372423 podStartE2EDuration="2m5.68372423s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:11.67981115 +0000 UTC m=+146.846807635" watchObservedRunningTime="2025-11-23 14:44:11.68372423 +0000 UTC m=+146.850720715" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.716933 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" podStartSLOduration=125.716907358 podStartE2EDuration="2m5.716907358s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:11.706354933 +0000 UTC m=+146.873351418" watchObservedRunningTime="2025-11-23 14:44:11.716907358 +0000 UTC m=+146.883903843" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.721925 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-tq999" podStartSLOduration=7.721905878 podStartE2EDuration="7.721905878s" podCreationTimestamp="2025-11-23 14:44:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:11.716766714 +0000 UTC m=+146.883763199" watchObservedRunningTime="2025-11-23 14:44:11.721905878 +0000 UTC m=+146.888902363" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.744142 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.746802 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.748456 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29398470-mm7rl" podStartSLOduration=126.748428691 podStartE2EDuration="2m6.748428691s" podCreationTimestamp="2025-11-23 14:42:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:11.746888867 +0000 UTC m=+146.913885352" watchObservedRunningTime="2025-11-23 14:44:11.748428691 +0000 UTC m=+146.915425176" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.755433 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:12 crc kubenswrapper[5050]: E1123 14:44:11.755760 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:12.255726885 +0000 UTC m=+147.422723400 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.755867 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:12 crc kubenswrapper[5050]: E1123 14:44:11.760936 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:12.26091681 +0000 UTC m=+147.427913295 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.797529 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-rzj2p" podStartSLOduration=125.797509264 podStartE2EDuration="2m5.797509264s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:11.771957049 +0000 UTC m=+146.938953534" watchObservedRunningTime="2025-11-23 14:44:11.797509264 +0000 UTC m=+146.964505749" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.842284 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5wl59" podStartSLOduration=125.842265677 podStartE2EDuration="2m5.842265677s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:11.818048069 +0000 UTC m=+146.985044554" watchObservedRunningTime="2025-11-23 14:44:11.842265677 +0000 UTC m=+147.009262162" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.867227 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-wr6q4" podStartSLOduration=125.867209695 podStartE2EDuration="2m5.867209695s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:11.844562851 +0000 UTC m=+147.011559336" watchObservedRunningTime="2025-11-23 14:44:11.867209695 +0000 UTC m=+147.034206180" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.897885 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xwq4h" podStartSLOduration=125.897860293 podStartE2EDuration="2m5.897860293s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:11.868589063 +0000 UTC m=+147.035585548" watchObservedRunningTime="2025-11-23 14:44:11.897860293 +0000 UTC m=+147.064856778" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:11.915065 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tcs6j" podStartSLOduration=125.915048554 podStartE2EDuration="2m5.915048554s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:11.898377007 +0000 UTC m=+147.065373512" watchObservedRunningTime="2025-11-23 14:44:11.915048554 +0000 UTC m=+147.082045039" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.178194 5050 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-5wl59 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.36:5443/healthz\": dial tcp 10.217.0.36:5443: connect: connection refused" start-of-body= Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.178261 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5wl59" podUID="31ae7f30-0f40-4c17-ad54-33aeefcbeeb7" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.36:5443/healthz\": dial tcp 10.217.0.36:5443: connect: connection refused" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.180023 5050 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-tt6kr container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.11:8443/livez\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.180060 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" podUID="74ee62a2-d057-4ddf-8e5f-619cd90645d0" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.11:8443/livez\": dial tcp 10.217.0.11:8443: connect: connection refused" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.180162 5050 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-wr6q4 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" start-of-body= Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.180180 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-wr6q4" podUID="fef4a22f-705c-4dda-8c74-ace62ff4ce68" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.184937 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:12 crc kubenswrapper[5050]: E1123 14:44:12.186692 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:12.686662485 +0000 UTC m=+147.853658960 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.233956 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-h2nqh" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.262633 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jzg9r" podStartSLOduration=126.26260645 podStartE2EDuration="2m6.26260645s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:12.178730033 +0000 UTC m=+147.345726518" watchObservedRunningTime="2025-11-23 14:44:12.26260645 +0000 UTC m=+147.429602935" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.267727 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-n9kmn" podStartSLOduration=126.267717154 podStartE2EDuration="2m6.267717154s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:12.264432952 +0000 UTC m=+147.431429447" watchObservedRunningTime="2025-11-23 14:44:12.267717154 +0000 UTC m=+147.434713639" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.290075 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:12 crc kubenswrapper[5050]: E1123 14:44:12.290509 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:12.790495991 +0000 UTC m=+147.957492476 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.304229 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-wdqwt" podStartSLOduration=8.304210715 podStartE2EDuration="8.304210715s" podCreationTimestamp="2025-11-23 14:44:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:12.298086613 +0000 UTC m=+147.465083088" watchObservedRunningTime="2025-11-23 14:44:12.304210715 +0000 UTC m=+147.471207200" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.375984 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.391760 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:12 crc kubenswrapper[5050]: E1123 14:44:12.392157 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:12.892137886 +0000 UTC m=+148.059134371 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.493076 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:12 crc kubenswrapper[5050]: E1123 14:44:12.494202 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:12.994173921 +0000 UTC m=+148.161170466 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.506992 5050 patch_prober.go:28] interesting pod/router-default-5444994796-2g8pj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 23 14:44:12 crc kubenswrapper[5050]: [-]has-synced failed: reason withheld Nov 23 14:44:12 crc kubenswrapper[5050]: [+]process-running ok Nov 23 14:44:12 crc kubenswrapper[5050]: healthz check failed Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.507058 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2g8pj" podUID="05b57f72-3ec2-41f4-ba15-4bb73c15a1af" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.596089 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:12 crc kubenswrapper[5050]: E1123 14:44:12.596461 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:13.096426183 +0000 UTC m=+148.263422658 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.624746 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-jz58r" event={"ID":"2fc00c67-c3ab-4b63-b068-3a41c027f6f3","Type":"ContainerStarted","Data":"c304d254d50e1b5fbe5daaa57caadc4b4ee39bac20d469e0bb7e431ede047a46"} Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.649399 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-svd5r" event={"ID":"005c682b-725d-471a-98ca-08f2760e6603","Type":"ContainerStarted","Data":"fd1688142115dc7ef56cf10634fa3c56ee076b57661e1df53b1e35a28f4e326d"} Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.654635 5050 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-wr6q4 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" start-of-body= Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.654718 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-wr6q4" podUID="fef4a22f-705c-4dda-8c74-ace62ff4ce68" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.659927 5050 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-5wl59 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.36:5443/healthz\": dial tcp 10.217.0.36:5443: connect: connection refused" start-of-body= Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.659973 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5wl59" podUID="31ae7f30-0f40-4c17-ad54-33aeefcbeeb7" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.36:5443/healthz\": dial tcp 10.217.0.36:5443: connect: connection refused" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.660056 5050 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-rgjfm container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" start-of-body= Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.660073 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rgjfm" podUID="10bf2f37-5f87-479d-a9b1-9e1bdf5b150a" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.680042 5050 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-ph6h8 container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.13:8443/healthz\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.680122 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-ph6h8" podUID="0a4f6fb7-314a-4896-bcba-c11f622a5e3f" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.13:8443/healthz\": dial tcp 10.217.0.13:8443: connect: connection refused" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.711555 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:12 crc kubenswrapper[5050]: E1123 14:44:12.716602 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:13.216579055 +0000 UTC m=+148.383575730 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.734058 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-p4p66" Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.815271 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:12 crc kubenswrapper[5050]: E1123 14:44:12.815406 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:13.315383521 +0000 UTC m=+148.482380006 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.816226 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:12 crc kubenswrapper[5050]: E1123 14:44:12.816712 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:13.316688607 +0000 UTC m=+148.483685092 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:12 crc kubenswrapper[5050]: I1123 14:44:12.918850 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:12 crc kubenswrapper[5050]: E1123 14:44:12.919125 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:13.419103753 +0000 UTC m=+148.586100238 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.020914 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:13 crc kubenswrapper[5050]: E1123 14:44:13.021324 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:13.521311004 +0000 UTC m=+148.688307489 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.059003 5050 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-ph6h8 container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.13:8443/healthz\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.070588 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-ph6h8" podUID="0a4f6fb7-314a-4896-bcba-c11f622a5e3f" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.13:8443/healthz\": dial tcp 10.217.0.13:8443: connect: connection refused" Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.071635 5050 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-ph6h8 container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.13:8443/healthz\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.071751 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-ph6h8" podUID="0a4f6fb7-314a-4896-bcba-c11f622a5e3f" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.13:8443/healthz\": dial tcp 10.217.0.13:8443: connect: connection refused" Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.121754 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:13 crc kubenswrapper[5050]: E1123 14:44:13.122098 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:13.622080224 +0000 UTC m=+148.789076699 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.223423 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:13 crc kubenswrapper[5050]: E1123 14:44:13.223846 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:13.723827111 +0000 UTC m=+148.890823596 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.324879 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:13 crc kubenswrapper[5050]: E1123 14:44:13.325096 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:13.825052044 +0000 UTC m=+148.992048529 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.325420 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:13 crc kubenswrapper[5050]: E1123 14:44:13.325844 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:13.825833126 +0000 UTC m=+148.992829611 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.426551 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:13 crc kubenswrapper[5050]: E1123 14:44:13.426746 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:13.926713519 +0000 UTC m=+149.093710004 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.427216 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:13 crc kubenswrapper[5050]: E1123 14:44:13.427813 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:13.92778678 +0000 UTC m=+149.094783265 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.491612 5050 patch_prober.go:28] interesting pod/router-default-5444994796-2g8pj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 23 14:44:13 crc kubenswrapper[5050]: [-]has-synced failed: reason withheld Nov 23 14:44:13 crc kubenswrapper[5050]: [+]process-running ok Nov 23 14:44:13 crc kubenswrapper[5050]: healthz check failed Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.491678 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2g8pj" podUID="05b57f72-3ec2-41f4-ba15-4bb73c15a1af" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.528407 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:13 crc kubenswrapper[5050]: E1123 14:44:13.528845 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:14.028828327 +0000 UTC m=+149.195824812 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.630279 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.630355 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.630384 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:44:13 crc kubenswrapper[5050]: E1123 14:44:13.631608 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:14.131591583 +0000 UTC m=+149.298588078 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.631855 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.637338 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.656464 5050 generic.go:334] "Generic (PLEG): container finished" podID="ab27cc8e-1d04-43dd-bf34-0cfdb48daba7" containerID="7fbe45687412126d64567ea868effc5a3dab98a0df53a4f3b2c087fa38c6edb6" exitCode=0 Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.656536 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398470-mm7rl" event={"ID":"ab27cc8e-1d04-43dd-bf34-0cfdb48daba7","Type":"ContainerDied","Data":"7fbe45687412126d64567ea868effc5a3dab98a0df53a4f3b2c087fa38c6edb6"} Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.661373 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-svd5r" event={"ID":"005c682b-725d-471a-98ca-08f2760e6603","Type":"ContainerStarted","Data":"fb93afcf37da8beea53638380c9c2324cca0cf13923cb8bae496132070391f78"} Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.664504 5050 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-wr6q4 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" start-of-body= Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.664579 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-wr6q4" podUID="fef4a22f-705c-4dda-8c74-ace62ff4ce68" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.678255 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.735576 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.735886 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.735919 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:44:13 crc kubenswrapper[5050]: E1123 14:44:13.736789 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:14.236767187 +0000 UTC m=+149.403763672 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.751178 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.763390 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.789461 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-svd5r" podStartSLOduration=127.78942536 podStartE2EDuration="2m7.78942536s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:13.785142731 +0000 UTC m=+148.952139216" watchObservedRunningTime="2025-11-23 14:44:13.78942536 +0000 UTC m=+148.956421845" Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.840652 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:13 crc kubenswrapper[5050]: E1123 14:44:13.841535 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:14.341519198 +0000 UTC m=+149.508515683 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.878793 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.942253 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:13 crc kubenswrapper[5050]: E1123 14:44:13.942508 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:14.442466873 +0000 UTC m=+149.609463358 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.942615 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:13 crc kubenswrapper[5050]: E1123 14:44:13.942949 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:14.442934397 +0000 UTC m=+149.609930882 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:13 crc kubenswrapper[5050]: I1123 14:44:13.990843 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:44:14 crc kubenswrapper[5050]: I1123 14:44:14.045998 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:14 crc kubenswrapper[5050]: E1123 14:44:14.046862 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:14.546837674 +0000 UTC m=+149.713834159 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:14 crc kubenswrapper[5050]: I1123 14:44:14.151495 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:14 crc kubenswrapper[5050]: E1123 14:44:14.151920 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:14.651905094 +0000 UTC m=+149.818901579 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:14 crc kubenswrapper[5050]: I1123 14:44:14.259118 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:14 crc kubenswrapper[5050]: E1123 14:44:14.259474 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:14.759433023 +0000 UTC m=+149.926429498 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:14 crc kubenswrapper[5050]: I1123 14:44:14.363101 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:14 crc kubenswrapper[5050]: E1123 14:44:14.363582 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:14.863563087 +0000 UTC m=+150.030559572 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:14 crc kubenswrapper[5050]: I1123 14:44:14.390813 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-5wl59" Nov 23 14:44:14 crc kubenswrapper[5050]: I1123 14:44:14.472660 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:14 crc kubenswrapper[5050]: E1123 14:44:14.473018 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:14.97299601 +0000 UTC m=+150.139992495 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:14 crc kubenswrapper[5050]: I1123 14:44:14.495770 5050 patch_prober.go:28] interesting pod/router-default-5444994796-2g8pj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 23 14:44:14 crc kubenswrapper[5050]: [-]has-synced failed: reason withheld Nov 23 14:44:14 crc kubenswrapper[5050]: [+]process-running ok Nov 23 14:44:14 crc kubenswrapper[5050]: healthz check failed Nov 23 14:44:14 crc kubenswrapper[5050]: I1123 14:44:14.495842 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2g8pj" podUID="05b57f72-3ec2-41f4-ba15-4bb73c15a1af" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 23 14:44:14 crc kubenswrapper[5050]: I1123 14:44:14.578896 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:14 crc kubenswrapper[5050]: E1123 14:44:14.579366 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:15.079345096 +0000 UTC m=+150.246341581 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:14 crc kubenswrapper[5050]: I1123 14:44:14.677612 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"ac5c2cf1d9700d3f1b7d234fb5466ba09c6a949d03e8af5705ad7639b84f3134"} Nov 23 14:44:14 crc kubenswrapper[5050]: I1123 14:44:14.679546 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-jz58r" event={"ID":"2fc00c67-c3ab-4b63-b068-3a41c027f6f3","Type":"ContainerStarted","Data":"fee1d94c199b7f84eac560144864820239340896390ecada10bd3fc0df6da1f6"} Nov 23 14:44:14 crc kubenswrapper[5050]: I1123 14:44:14.680019 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:14 crc kubenswrapper[5050]: E1123 14:44:14.680373 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:15.180356483 +0000 UTC m=+150.347352958 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:14 crc kubenswrapper[5050]: I1123 14:44:14.691360 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"a93bb65fe4ec2cef2e5f62492c30e0725244f1c9a8aaaa78bfd65e78dfd7d640"} Nov 23 14:44:14 crc kubenswrapper[5050]: I1123 14:44:14.783426 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:14 crc kubenswrapper[5050]: E1123 14:44:14.785121 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:15.285102175 +0000 UTC m=+150.452098660 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:14 crc kubenswrapper[5050]: I1123 14:44:14.864074 5050 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 23 14:44:14 crc kubenswrapper[5050]: I1123 14:44:14.886100 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:14 crc kubenswrapper[5050]: E1123 14:44:14.886458 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:15.38641517 +0000 UTC m=+150.553411645 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:14 crc kubenswrapper[5050]: I1123 14:44:14.900216 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-n9s79"] Nov 23 14:44:14 crc kubenswrapper[5050]: I1123 14:44:14.903609 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n9s79" Nov 23 14:44:14 crc kubenswrapper[5050]: I1123 14:44:14.910235 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 23 14:44:14 crc kubenswrapper[5050]: I1123 14:44:14.928271 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-n9s79"] Nov 23 14:44:14 crc kubenswrapper[5050]: I1123 14:44:14.988553 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/666a5515-92ff-44ef-8e85-f7205f823416-utilities\") pod \"community-operators-n9s79\" (UID: \"666a5515-92ff-44ef-8e85-f7205f823416\") " pod="openshift-marketplace/community-operators-n9s79" Nov 23 14:44:14 crc kubenswrapper[5050]: I1123 14:44:14.988603 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vmb4\" (UniqueName: \"kubernetes.io/projected/666a5515-92ff-44ef-8e85-f7205f823416-kube-api-access-6vmb4\") pod \"community-operators-n9s79\" (UID: \"666a5515-92ff-44ef-8e85-f7205f823416\") " pod="openshift-marketplace/community-operators-n9s79" Nov 23 14:44:14 crc kubenswrapper[5050]: I1123 14:44:14.988623 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/666a5515-92ff-44ef-8e85-f7205f823416-catalog-content\") pod \"community-operators-n9s79\" (UID: \"666a5515-92ff-44ef-8e85-f7205f823416\") " pod="openshift-marketplace/community-operators-n9s79" Nov 23 14:44:14 crc kubenswrapper[5050]: I1123 14:44:14.988667 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:14 crc kubenswrapper[5050]: E1123 14:44:14.989022 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:15.489009211 +0000 UTC m=+150.656005696 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.066061 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398470-mm7rl" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.074002 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tpll6"] Nov 23 14:44:15 crc kubenswrapper[5050]: E1123 14:44:15.074432 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab27cc8e-1d04-43dd-bf34-0cfdb48daba7" containerName="collect-profiles" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.074528 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab27cc8e-1d04-43dd-bf34-0cfdb48daba7" containerName="collect-profiles" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.074698 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab27cc8e-1d04-43dd-bf34-0cfdb48daba7" containerName="collect-profiles" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.075469 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tpll6" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.078578 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.090833 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:15 crc kubenswrapper[5050]: E1123 14:44:15.091107 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:15.591081998 +0000 UTC m=+150.758078473 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.091149 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/666a5515-92ff-44ef-8e85-f7205f823416-utilities\") pod \"community-operators-n9s79\" (UID: \"666a5515-92ff-44ef-8e85-f7205f823416\") " pod="openshift-marketplace/community-operators-n9s79" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.091223 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vmb4\" (UniqueName: \"kubernetes.io/projected/666a5515-92ff-44ef-8e85-f7205f823416-kube-api-access-6vmb4\") pod \"community-operators-n9s79\" (UID: \"666a5515-92ff-44ef-8e85-f7205f823416\") " pod="openshift-marketplace/community-operators-n9s79" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.091267 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/666a5515-92ff-44ef-8e85-f7205f823416-catalog-content\") pod \"community-operators-n9s79\" (UID: \"666a5515-92ff-44ef-8e85-f7205f823416\") " pod="openshift-marketplace/community-operators-n9s79" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.091303 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:15 crc kubenswrapper[5050]: E1123 14:44:15.091818 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:15.591811398 +0000 UTC m=+150.758807883 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.092320 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/666a5515-92ff-44ef-8e85-f7205f823416-catalog-content\") pod \"community-operators-n9s79\" (UID: \"666a5515-92ff-44ef-8e85-f7205f823416\") " pod="openshift-marketplace/community-operators-n9s79" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.092397 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/666a5515-92ff-44ef-8e85-f7205f823416-utilities\") pod \"community-operators-n9s79\" (UID: \"666a5515-92ff-44ef-8e85-f7205f823416\") " pod="openshift-marketplace/community-operators-n9s79" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.104696 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tpll6"] Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.145524 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vmb4\" (UniqueName: \"kubernetes.io/projected/666a5515-92ff-44ef-8e85-f7205f823416-kube-api-access-6vmb4\") pod \"community-operators-n9s79\" (UID: \"666a5515-92ff-44ef-8e85-f7205f823416\") " pod="openshift-marketplace/community-operators-n9s79" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.192490 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l9f57\" (UniqueName: \"kubernetes.io/projected/ab27cc8e-1d04-43dd-bf34-0cfdb48daba7-kube-api-access-l9f57\") pod \"ab27cc8e-1d04-43dd-bf34-0cfdb48daba7\" (UID: \"ab27cc8e-1d04-43dd-bf34-0cfdb48daba7\") " Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.192570 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ab27cc8e-1d04-43dd-bf34-0cfdb48daba7-config-volume\") pod \"ab27cc8e-1d04-43dd-bf34-0cfdb48daba7\" (UID: \"ab27cc8e-1d04-43dd-bf34-0cfdb48daba7\") " Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.192668 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ab27cc8e-1d04-43dd-bf34-0cfdb48daba7-secret-volume\") pod \"ab27cc8e-1d04-43dd-bf34-0cfdb48daba7\" (UID: \"ab27cc8e-1d04-43dd-bf34-0cfdb48daba7\") " Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.192908 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.193084 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c57083c1-0f6f-4e07-aa17-218073eaa9c0-catalog-content\") pod \"certified-operators-tpll6\" (UID: \"c57083c1-0f6f-4e07-aa17-218073eaa9c0\") " pod="openshift-marketplace/certified-operators-tpll6" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.193138 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qdh75\" (UniqueName: \"kubernetes.io/projected/c57083c1-0f6f-4e07-aa17-218073eaa9c0-kube-api-access-qdh75\") pod \"certified-operators-tpll6\" (UID: \"c57083c1-0f6f-4e07-aa17-218073eaa9c0\") " pod="openshift-marketplace/certified-operators-tpll6" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.193160 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c57083c1-0f6f-4e07-aa17-218073eaa9c0-utilities\") pod \"certified-operators-tpll6\" (UID: \"c57083c1-0f6f-4e07-aa17-218073eaa9c0\") " pod="openshift-marketplace/certified-operators-tpll6" Nov 23 14:44:15 crc kubenswrapper[5050]: E1123 14:44:15.193545 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:15.693508604 +0000 UTC m=+150.860505079 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.193977 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab27cc8e-1d04-43dd-bf34-0cfdb48daba7-config-volume" (OuterVolumeSpecName: "config-volume") pod "ab27cc8e-1d04-43dd-bf34-0cfdb48daba7" (UID: "ab27cc8e-1d04-43dd-bf34-0cfdb48daba7"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.200618 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab27cc8e-1d04-43dd-bf34-0cfdb48daba7-kube-api-access-l9f57" (OuterVolumeSpecName: "kube-api-access-l9f57") pod "ab27cc8e-1d04-43dd-bf34-0cfdb48daba7" (UID: "ab27cc8e-1d04-43dd-bf34-0cfdb48daba7"). InnerVolumeSpecName "kube-api-access-l9f57". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.210117 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab27cc8e-1d04-43dd-bf34-0cfdb48daba7-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ab27cc8e-1d04-43dd-bf34-0cfdb48daba7" (UID: "ab27cc8e-1d04-43dd-bf34-0cfdb48daba7"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.241758 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n9s79" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.277458 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-zz4fr"] Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.278645 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zz4fr" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.291388 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zz4fr"] Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.294248 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c57083c1-0f6f-4e07-aa17-218073eaa9c0-catalog-content\") pod \"certified-operators-tpll6\" (UID: \"c57083c1-0f6f-4e07-aa17-218073eaa9c0\") " pod="openshift-marketplace/certified-operators-tpll6" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.294321 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qdh75\" (UniqueName: \"kubernetes.io/projected/c57083c1-0f6f-4e07-aa17-218073eaa9c0-kube-api-access-qdh75\") pod \"certified-operators-tpll6\" (UID: \"c57083c1-0f6f-4e07-aa17-218073eaa9c0\") " pod="openshift-marketplace/certified-operators-tpll6" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.294349 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c57083c1-0f6f-4e07-aa17-218073eaa9c0-utilities\") pod \"certified-operators-tpll6\" (UID: \"c57083c1-0f6f-4e07-aa17-218073eaa9c0\") " pod="openshift-marketplace/certified-operators-tpll6" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.294399 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.294508 5050 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ab27cc8e-1d04-43dd-bf34-0cfdb48daba7-config-volume\") on node \"crc\" DevicePath \"\"" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.294532 5050 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ab27cc8e-1d04-43dd-bf34-0cfdb48daba7-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.294542 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l9f57\" (UniqueName: \"kubernetes.io/projected/ab27cc8e-1d04-43dd-bf34-0cfdb48daba7-kube-api-access-l9f57\") on node \"crc\" DevicePath \"\"" Nov 23 14:44:15 crc kubenswrapper[5050]: E1123 14:44:15.294920 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:15.794897602 +0000 UTC m=+150.961894087 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.296658 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c57083c1-0f6f-4e07-aa17-218073eaa9c0-utilities\") pod \"certified-operators-tpll6\" (UID: \"c57083c1-0f6f-4e07-aa17-218073eaa9c0\") " pod="openshift-marketplace/certified-operators-tpll6" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.297258 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c57083c1-0f6f-4e07-aa17-218073eaa9c0-catalog-content\") pod \"certified-operators-tpll6\" (UID: \"c57083c1-0f6f-4e07-aa17-218073eaa9c0\") " pod="openshift-marketplace/certified-operators-tpll6" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.324191 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qdh75\" (UniqueName: \"kubernetes.io/projected/c57083c1-0f6f-4e07-aa17-218073eaa9c0-kube-api-access-qdh75\") pod \"certified-operators-tpll6\" (UID: \"c57083c1-0f6f-4e07-aa17-218073eaa9c0\") " pod="openshift-marketplace/certified-operators-tpll6" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.395031 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:15 crc kubenswrapper[5050]: E1123 14:44:15.395239 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:15.895190869 +0000 UTC m=+151.062187364 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.395303 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4m7cl\" (UniqueName: \"kubernetes.io/projected/65bfd579-9076-4956-bd2b-e1c0c93775b5-kube-api-access-4m7cl\") pod \"community-operators-zz4fr\" (UID: \"65bfd579-9076-4956-bd2b-e1c0c93775b5\") " pod="openshift-marketplace/community-operators-zz4fr" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.395490 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.395576 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65bfd579-9076-4956-bd2b-e1c0c93775b5-catalog-content\") pod \"community-operators-zz4fr\" (UID: \"65bfd579-9076-4956-bd2b-e1c0c93775b5\") " pod="openshift-marketplace/community-operators-zz4fr" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.395614 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65bfd579-9076-4956-bd2b-e1c0c93775b5-utilities\") pod \"community-operators-zz4fr\" (UID: \"65bfd579-9076-4956-bd2b-e1c0c93775b5\") " pod="openshift-marketplace/community-operators-zz4fr" Nov 23 14:44:15 crc kubenswrapper[5050]: E1123 14:44:15.395879 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:15.895864648 +0000 UTC m=+151.062861123 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.424575 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tpll6" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.477736 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-gcpxp"] Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.486611 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gcpxp" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.492009 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gcpxp"] Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.499602 5050 patch_prober.go:28] interesting pod/router-default-5444994796-2g8pj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 23 14:44:15 crc kubenswrapper[5050]: [-]has-synced failed: reason withheld Nov 23 14:44:15 crc kubenswrapper[5050]: [+]process-running ok Nov 23 14:44:15 crc kubenswrapper[5050]: healthz check failed Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.499674 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2g8pj" podUID="05b57f72-3ec2-41f4-ba15-4bb73c15a1af" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.499868 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.500127 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4m7cl\" (UniqueName: \"kubernetes.io/projected/65bfd579-9076-4956-bd2b-e1c0c93775b5-kube-api-access-4m7cl\") pod \"community-operators-zz4fr\" (UID: \"65bfd579-9076-4956-bd2b-e1c0c93775b5\") " pod="openshift-marketplace/community-operators-zz4fr" Nov 23 14:44:15 crc kubenswrapper[5050]: E1123 14:44:15.500255 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:16.000217408 +0000 UTC m=+151.167213893 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.500338 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65bfd579-9076-4956-bd2b-e1c0c93775b5-catalog-content\") pod \"community-operators-zz4fr\" (UID: \"65bfd579-9076-4956-bd2b-e1c0c93775b5\") " pod="openshift-marketplace/community-operators-zz4fr" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.500367 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65bfd579-9076-4956-bd2b-e1c0c93775b5-utilities\") pod \"community-operators-zz4fr\" (UID: \"65bfd579-9076-4956-bd2b-e1c0c93775b5\") " pod="openshift-marketplace/community-operators-zz4fr" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.501295 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65bfd579-9076-4956-bd2b-e1c0c93775b5-catalog-content\") pod \"community-operators-zz4fr\" (UID: \"65bfd579-9076-4956-bd2b-e1c0c93775b5\") " pod="openshift-marketplace/community-operators-zz4fr" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.501396 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65bfd579-9076-4956-bd2b-e1c0c93775b5-utilities\") pod \"community-operators-zz4fr\" (UID: \"65bfd579-9076-4956-bd2b-e1c0c93775b5\") " pod="openshift-marketplace/community-operators-zz4fr" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.532209 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4m7cl\" (UniqueName: \"kubernetes.io/projected/65bfd579-9076-4956-bd2b-e1c0c93775b5-kube-api-access-4m7cl\") pod \"community-operators-zz4fr\" (UID: \"65bfd579-9076-4956-bd2b-e1c0c93775b5\") " pod="openshift-marketplace/community-operators-zz4fr" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.602972 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.603031 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bfl9\" (UniqueName: \"kubernetes.io/projected/fe09ca9a-811b-4126-a3bf-a43b0edbb475-kube-api-access-4bfl9\") pod \"certified-operators-gcpxp\" (UID: \"fe09ca9a-811b-4126-a3bf-a43b0edbb475\") " pod="openshift-marketplace/certified-operators-gcpxp" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.603061 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe09ca9a-811b-4126-a3bf-a43b0edbb475-utilities\") pod \"certified-operators-gcpxp\" (UID: \"fe09ca9a-811b-4126-a3bf-a43b0edbb475\") " pod="openshift-marketplace/certified-operators-gcpxp" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.603084 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe09ca9a-811b-4126-a3bf-a43b0edbb475-catalog-content\") pod \"certified-operators-gcpxp\" (UID: \"fe09ca9a-811b-4126-a3bf-a43b0edbb475\") " pod="openshift-marketplace/certified-operators-gcpxp" Nov 23 14:44:15 crc kubenswrapper[5050]: E1123 14:44:15.603474 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-23 14:44:16.103458927 +0000 UTC m=+151.270455412 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rw2cv" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.603712 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zz4fr" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.699706 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"a67ee0f6dc1c7bc978b9a8cbcc5f9ef865f2ac55b56bb4d559e90d39da90716f"} Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.703672 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"fb38dc935c5a626630cc216a3c2c37f0e870b18aad232dda3d1bc8c4a4c36a5a"} Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.703710 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"198049e4057fcf193bc01a85de4efb4b64ae36181ca8f122a85b0a59d6a0fd49"} Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.704207 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.707398 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.707673 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4bfl9\" (UniqueName: \"kubernetes.io/projected/fe09ca9a-811b-4126-a3bf-a43b0edbb475-kube-api-access-4bfl9\") pod \"certified-operators-gcpxp\" (UID: \"fe09ca9a-811b-4126-a3bf-a43b0edbb475\") " pod="openshift-marketplace/certified-operators-gcpxp" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.707709 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe09ca9a-811b-4126-a3bf-a43b0edbb475-utilities\") pod \"certified-operators-gcpxp\" (UID: \"fe09ca9a-811b-4126-a3bf-a43b0edbb475\") " pod="openshift-marketplace/certified-operators-gcpxp" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.707734 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe09ca9a-811b-4126-a3bf-a43b0edbb475-catalog-content\") pod \"certified-operators-gcpxp\" (UID: \"fe09ca9a-811b-4126-a3bf-a43b0edbb475\") " pod="openshift-marketplace/certified-operators-gcpxp" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.708266 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe09ca9a-811b-4126-a3bf-a43b0edbb475-catalog-content\") pod \"certified-operators-gcpxp\" (UID: \"fe09ca9a-811b-4126-a3bf-a43b0edbb475\") " pod="openshift-marketplace/certified-operators-gcpxp" Nov 23 14:44:15 crc kubenswrapper[5050]: E1123 14:44:15.708343 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-23 14:44:16.208323502 +0000 UTC m=+151.375319987 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.710521 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe09ca9a-811b-4126-a3bf-a43b0edbb475-utilities\") pod \"certified-operators-gcpxp\" (UID: \"fe09ca9a-811b-4126-a3bf-a43b0edbb475\") " pod="openshift-marketplace/certified-operators-gcpxp" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.737108 5050 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-23T14:44:14.864412084Z","Handler":null,"Name":""} Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.747216 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-jz58r" event={"ID":"2fc00c67-c3ab-4b63-b068-3a41c027f6f3","Type":"ContainerStarted","Data":"8ba3cd72d53024f1f768dcbb57c3460f4fdeae2978cf09fbf9233a637e301c38"} Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.747259 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-jz58r" event={"ID":"2fc00c67-c3ab-4b63-b068-3a41c027f6f3","Type":"ContainerStarted","Data":"8407958f1a59839d80b62f5e1dc231a443a763e78bb70e095e435303d83d96fd"} Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.750244 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"c5ad744cef98df122fd51c82efc377ecd946a0031a053068189d09274200d8af"} Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.752822 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398470-mm7rl" event={"ID":"ab27cc8e-1d04-43dd-bf34-0cfdb48daba7","Type":"ContainerDied","Data":"b2975a3133e86cc7798cb39cf637183a7b9b38947b9bf9f9c9a9e9f6f39c5f37"} Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.752845 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b2975a3133e86cc7798cb39cf637183a7b9b38947b9bf9f9c9a9e9f6f39c5f37" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.753145 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398470-mm7rl" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.755017 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4bfl9\" (UniqueName: \"kubernetes.io/projected/fe09ca9a-811b-4126-a3bf-a43b0edbb475-kube-api-access-4bfl9\") pod \"certified-operators-gcpxp\" (UID: \"fe09ca9a-811b-4126-a3bf-a43b0edbb475\") " pod="openshift-marketplace/certified-operators-gcpxp" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.758743 5050 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.758816 5050 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.813767 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.814951 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-jz58r" podStartSLOduration=11.814917965 podStartE2EDuration="11.814917965s" podCreationTimestamp="2025-11-23 14:44:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:15.807981671 +0000 UTC m=+150.974978156" watchObservedRunningTime="2025-11-23 14:44:15.814917965 +0000 UTC m=+150.981914450" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.821011 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-n9s79"] Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.821768 5050 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.821799 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.824808 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gcpxp" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.885503 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rw2cv\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.901732 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.918510 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 23 14:44:15 crc kubenswrapper[5050]: I1123 14:44:15.939554 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tpll6"] Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.016921 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zz4fr"] Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.028939 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.051970 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-ph6h8" Nov 23 14:44:16 crc kubenswrapper[5050]: W1123 14:44:16.075430 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod65bfd579_9076_4956_bd2b_e1c0c93775b5.slice/crio-1b4b44cca077f7edde1b56926751acb7ac87a92280231b0ad1b808eb6a860710 WatchSource:0}: Error finding container 1b4b44cca077f7edde1b56926751acb7ac87a92280231b0ad1b808eb6a860710: Status 404 returned error can't find the container with id 1b4b44cca077f7edde1b56926751acb7ac87a92280231b0ad1b808eb6a860710 Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.333907 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-rw2cv"] Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.426879 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gcpxp"] Nov 23 14:44:16 crc kubenswrapper[5050]: W1123 14:44:16.445536 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe09ca9a_811b_4126_a3bf_a43b0edbb475.slice/crio-3c6fc9f46cadb9047cd86707395965780442ebe36b85a8ec691d3524d495cada WatchSource:0}: Error finding container 3c6fc9f46cadb9047cd86707395965780442ebe36b85a8ec691d3524d495cada: Status 404 returned error can't find the container with id 3c6fc9f46cadb9047cd86707395965780442ebe36b85a8ec691d3524d495cada Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.492195 5050 patch_prober.go:28] interesting pod/router-default-5444994796-2g8pj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 23 14:44:16 crc kubenswrapper[5050]: [-]has-synced failed: reason withheld Nov 23 14:44:16 crc kubenswrapper[5050]: [+]process-running ok Nov 23 14:44:16 crc kubenswrapper[5050]: healthz check failed Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.492261 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2g8pj" podUID="05b57f72-3ec2-41f4-ba15-4bb73c15a1af" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.638125 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.639513 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.641824 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.642083 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.649284 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 23 14:44:16 crc kubenswrapper[5050]: E1123 14:44:16.671935 5050 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe09ca9a_811b_4126_a3bf_a43b0edbb475.slice/crio-86e1ef12c5c5fb5f82ce14b40fb93e2c0766bedb9b430b321e6e81b9e83ee2c6.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe09ca9a_811b_4126_a3bf_a43b0edbb475.slice/crio-conmon-86e1ef12c5c5fb5f82ce14b40fb93e2c0766bedb9b430b321e6e81b9e83ee2c6.scope\": RecentStats: unable to find data in memory cache]" Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.733794 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c0dcdbba-d3a5-47a2-a3e7-24acffe17074-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"c0dcdbba-d3a5-47a2-a3e7-24acffe17074\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.733845 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c0dcdbba-d3a5-47a2-a3e7-24acffe17074-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"c0dcdbba-d3a5-47a2-a3e7-24acffe17074\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.750797 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.760434 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tt6kr" Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.760892 5050 generic.go:334] "Generic (PLEG): container finished" podID="666a5515-92ff-44ef-8e85-f7205f823416" containerID="59fd1311c7d3c139db3ba5bfaba5d3cf1c80672ebbc4ba24a95b5a6069e7245d" exitCode=0 Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.760999 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n9s79" event={"ID":"666a5515-92ff-44ef-8e85-f7205f823416","Type":"ContainerDied","Data":"59fd1311c7d3c139db3ba5bfaba5d3cf1c80672ebbc4ba24a95b5a6069e7245d"} Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.761044 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n9s79" event={"ID":"666a5515-92ff-44ef-8e85-f7205f823416","Type":"ContainerStarted","Data":"4172b5725c18c0e47f60b0042bb7783d83c2793ae55b15c91d80413311df54e7"} Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.765398 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" event={"ID":"9c29734c-66bd-4ac3-b26a-b5349d786018","Type":"ContainerStarted","Data":"de687d78dbe8543db96ece8c3cb26fe2d4e2f9c7d538c59a18a7882f3cf241d1"} Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.765429 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" event={"ID":"9c29734c-66bd-4ac3-b26a-b5349d786018","Type":"ContainerStarted","Data":"adde299ff832bf08bcc6ac95f9084f648dc3fbdae25bf1c14f90f0363cd6845e"} Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.765700 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.765795 5050 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.769174 5050 generic.go:334] "Generic (PLEG): container finished" podID="fe09ca9a-811b-4126-a3bf-a43b0edbb475" containerID="86e1ef12c5c5fb5f82ce14b40fb93e2c0766bedb9b430b321e6e81b9e83ee2c6" exitCode=0 Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.769319 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gcpxp" event={"ID":"fe09ca9a-811b-4126-a3bf-a43b0edbb475","Type":"ContainerDied","Data":"86e1ef12c5c5fb5f82ce14b40fb93e2c0766bedb9b430b321e6e81b9e83ee2c6"} Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.769348 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gcpxp" event={"ID":"fe09ca9a-811b-4126-a3bf-a43b0edbb475","Type":"ContainerStarted","Data":"3c6fc9f46cadb9047cd86707395965780442ebe36b85a8ec691d3524d495cada"} Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.774031 5050 generic.go:334] "Generic (PLEG): container finished" podID="65bfd579-9076-4956-bd2b-e1c0c93775b5" containerID="95f21d52727a6f926ee944dd93e9ac8110802f07d506fcfd95c74640a65d2d33" exitCode=0 Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.774091 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zz4fr" event={"ID":"65bfd579-9076-4956-bd2b-e1c0c93775b5","Type":"ContainerDied","Data":"95f21d52727a6f926ee944dd93e9ac8110802f07d506fcfd95c74640a65d2d33"} Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.774112 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zz4fr" event={"ID":"65bfd579-9076-4956-bd2b-e1c0c93775b5","Type":"ContainerStarted","Data":"1b4b44cca077f7edde1b56926751acb7ac87a92280231b0ad1b808eb6a860710"} Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.778950 5050 generic.go:334] "Generic (PLEG): container finished" podID="c57083c1-0f6f-4e07-aa17-218073eaa9c0" containerID="d57b19fbc42c4856fb288efc6b0f797be2cece55dfc509511b167203ab58ef84" exitCode=0 Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.779053 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tpll6" event={"ID":"c57083c1-0f6f-4e07-aa17-218073eaa9c0","Type":"ContainerDied","Data":"d57b19fbc42c4856fb288efc6b0f797be2cece55dfc509511b167203ab58ef84"} Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.779127 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tpll6" event={"ID":"c57083c1-0f6f-4e07-aa17-218073eaa9c0","Type":"ContainerStarted","Data":"50686f234ada4a501532d7891e53c0491293fa9a1f6446e5064b377ec0db3310"} Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.834932 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c0dcdbba-d3a5-47a2-a3e7-24acffe17074-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"c0dcdbba-d3a5-47a2-a3e7-24acffe17074\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.834990 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c0dcdbba-d3a5-47a2-a3e7-24acffe17074-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"c0dcdbba-d3a5-47a2-a3e7-24acffe17074\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.836515 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c0dcdbba-d3a5-47a2-a3e7-24acffe17074-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"c0dcdbba-d3a5-47a2-a3e7-24acffe17074\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.859163 5050 patch_prober.go:28] interesting pod/downloads-7954f5f757-gvb4q container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" start-of-body= Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.859237 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-gvb4q" podUID="16e600fc-0484-4c1c-8800-bb3958374bad" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.859161 5050 patch_prober.go:28] interesting pod/downloads-7954f5f757-gvb4q container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" start-of-body= Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.859355 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-gvb4q" podUID="16e600fc-0484-4c1c-8800-bb3958374bad" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.859575 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c0dcdbba-d3a5-47a2-a3e7-24acffe17074-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"c0dcdbba-d3a5-47a2-a3e7-24acffe17074\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.888738 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" podStartSLOduration=130.888717157 podStartE2EDuration="2m10.888717157s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:16.886233917 +0000 UTC m=+152.053230402" watchObservedRunningTime="2025-11-23 14:44:16.888717157 +0000 UTC m=+152.055713642" Nov 23 14:44:16 crc kubenswrapper[5050]: I1123 14:44:16.996648 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.087778 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-7dz9b"] Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.089374 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7dz9b" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.092966 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.102972 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7dz9b"] Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.126101 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.126207 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.128055 5050 patch_prober.go:28] interesting pod/console-f9d7485db-b646l container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.24:8443/health\": dial tcp 10.217.0.24:8443: connect: connection refused" start-of-body= Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.128130 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-b646l" podUID="629f02dd-753b-46d2-b808-812468f6c9c5" containerName="console" probeResult="failure" output="Get \"https://10.217.0.24:8443/health\": dial tcp 10.217.0.24:8443: connect: connection refused" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.139249 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a408f71a-76c2-4c88-9b67-a08257da28eb-catalog-content\") pod \"redhat-marketplace-7dz9b\" (UID: \"a408f71a-76c2-4c88-9b67-a08257da28eb\") " pod="openshift-marketplace/redhat-marketplace-7dz9b" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.139310 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-htvz6\" (UniqueName: \"kubernetes.io/projected/a408f71a-76c2-4c88-9b67-a08257da28eb-kube-api-access-htvz6\") pod \"redhat-marketplace-7dz9b\" (UID: \"a408f71a-76c2-4c88-9b67-a08257da28eb\") " pod="openshift-marketplace/redhat-marketplace-7dz9b" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.139352 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a408f71a-76c2-4c88-9b67-a08257da28eb-utilities\") pod \"redhat-marketplace-7dz9b\" (UID: \"a408f71a-76c2-4c88-9b67-a08257da28eb\") " pod="openshift-marketplace/redhat-marketplace-7dz9b" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.240527 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a408f71a-76c2-4c88-9b67-a08257da28eb-catalog-content\") pod \"redhat-marketplace-7dz9b\" (UID: \"a408f71a-76c2-4c88-9b67-a08257da28eb\") " pod="openshift-marketplace/redhat-marketplace-7dz9b" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.240576 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-htvz6\" (UniqueName: \"kubernetes.io/projected/a408f71a-76c2-4c88-9b67-a08257da28eb-kube-api-access-htvz6\") pod \"redhat-marketplace-7dz9b\" (UID: \"a408f71a-76c2-4c88-9b67-a08257da28eb\") " pod="openshift-marketplace/redhat-marketplace-7dz9b" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.240645 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a408f71a-76c2-4c88-9b67-a08257da28eb-utilities\") pod \"redhat-marketplace-7dz9b\" (UID: \"a408f71a-76c2-4c88-9b67-a08257da28eb\") " pod="openshift-marketplace/redhat-marketplace-7dz9b" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.241062 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a408f71a-76c2-4c88-9b67-a08257da28eb-utilities\") pod \"redhat-marketplace-7dz9b\" (UID: \"a408f71a-76c2-4c88-9b67-a08257da28eb\") " pod="openshift-marketplace/redhat-marketplace-7dz9b" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.241281 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a408f71a-76c2-4c88-9b67-a08257da28eb-catalog-content\") pod \"redhat-marketplace-7dz9b\" (UID: \"a408f71a-76c2-4c88-9b67-a08257da28eb\") " pod="openshift-marketplace/redhat-marketplace-7dz9b" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.264134 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-htvz6\" (UniqueName: \"kubernetes.io/projected/a408f71a-76c2-4c88-9b67-a08257da28eb-kube-api-access-htvz6\") pod \"redhat-marketplace-7dz9b\" (UID: \"a408f71a-76c2-4c88-9b67-a08257da28eb\") " pod="openshift-marketplace/redhat-marketplace-7dz9b" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.329337 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.411323 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7dz9b" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.447668 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.447749 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.454304 5050 patch_prober.go:28] interesting pod/apiserver-76f77b778f-svd5r container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 23 14:44:17 crc kubenswrapper[5050]: [+]log ok Nov 23 14:44:17 crc kubenswrapper[5050]: [+]etcd ok Nov 23 14:44:17 crc kubenswrapper[5050]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 23 14:44:17 crc kubenswrapper[5050]: [+]poststarthook/generic-apiserver-start-informers ok Nov 23 14:44:17 crc kubenswrapper[5050]: [+]poststarthook/max-in-flight-filter ok Nov 23 14:44:17 crc kubenswrapper[5050]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 23 14:44:17 crc kubenswrapper[5050]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 23 14:44:17 crc kubenswrapper[5050]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Nov 23 14:44:17 crc kubenswrapper[5050]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Nov 23 14:44:17 crc kubenswrapper[5050]: [+]poststarthook/project.openshift.io-projectcache ok Nov 23 14:44:17 crc kubenswrapper[5050]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 23 14:44:17 crc kubenswrapper[5050]: [+]poststarthook/openshift.io-startinformers ok Nov 23 14:44:17 crc kubenswrapper[5050]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 23 14:44:17 crc kubenswrapper[5050]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 23 14:44:17 crc kubenswrapper[5050]: livez check failed Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.454398 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-svd5r" podUID="005c682b-725d-471a-98ca-08f2760e6603" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.475412 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-t6xmn"] Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.476522 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-t6xmn" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.486986 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-2g8pj" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.489390 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-t6xmn"] Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.494704 5050 patch_prober.go:28] interesting pod/router-default-5444994796-2g8pj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 23 14:44:17 crc kubenswrapper[5050]: [-]has-synced failed: reason withheld Nov 23 14:44:17 crc kubenswrapper[5050]: [+]process-running ok Nov 23 14:44:17 crc kubenswrapper[5050]: healthz check failed Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.494768 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2g8pj" podUID="05b57f72-3ec2-41f4-ba15-4bb73c15a1af" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.545557 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cndhk\" (UniqueName: \"kubernetes.io/projected/20491310-05b1-4f61-9574-163a5463be0e-kube-api-access-cndhk\") pod \"redhat-marketplace-t6xmn\" (UID: \"20491310-05b1-4f61-9574-163a5463be0e\") " pod="openshift-marketplace/redhat-marketplace-t6xmn" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.545656 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20491310-05b1-4f61-9574-163a5463be0e-catalog-content\") pod \"redhat-marketplace-t6xmn\" (UID: \"20491310-05b1-4f61-9574-163a5463be0e\") " pod="openshift-marketplace/redhat-marketplace-t6xmn" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.545678 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20491310-05b1-4f61-9574-163a5463be0e-utilities\") pod \"redhat-marketplace-t6xmn\" (UID: \"20491310-05b1-4f61-9574-163a5463be0e\") " pod="openshift-marketplace/redhat-marketplace-t6xmn" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.582296 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.647366 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cndhk\" (UniqueName: \"kubernetes.io/projected/20491310-05b1-4f61-9574-163a5463be0e-kube-api-access-cndhk\") pod \"redhat-marketplace-t6xmn\" (UID: \"20491310-05b1-4f61-9574-163a5463be0e\") " pod="openshift-marketplace/redhat-marketplace-t6xmn" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.647425 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20491310-05b1-4f61-9574-163a5463be0e-catalog-content\") pod \"redhat-marketplace-t6xmn\" (UID: \"20491310-05b1-4f61-9574-163a5463be0e\") " pod="openshift-marketplace/redhat-marketplace-t6xmn" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.647471 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20491310-05b1-4f61-9574-163a5463be0e-utilities\") pod \"redhat-marketplace-t6xmn\" (UID: \"20491310-05b1-4f61-9574-163a5463be0e\") " pod="openshift-marketplace/redhat-marketplace-t6xmn" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.648572 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20491310-05b1-4f61-9574-163a5463be0e-utilities\") pod \"redhat-marketplace-t6xmn\" (UID: \"20491310-05b1-4f61-9574-163a5463be0e\") " pod="openshift-marketplace/redhat-marketplace-t6xmn" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.650468 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20491310-05b1-4f61-9574-163a5463be0e-catalog-content\") pod \"redhat-marketplace-t6xmn\" (UID: \"20491310-05b1-4f61-9574-163a5463be0e\") " pod="openshift-marketplace/redhat-marketplace-t6xmn" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.693937 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7dz9b"] Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.699633 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cndhk\" (UniqueName: \"kubernetes.io/projected/20491310-05b1-4f61-9574-163a5463be0e-kube-api-access-cndhk\") pod \"redhat-marketplace-t6xmn\" (UID: \"20491310-05b1-4f61-9574-163a5463be0e\") " pod="openshift-marketplace/redhat-marketplace-t6xmn" Nov 23 14:44:17 crc kubenswrapper[5050]: W1123 14:44:17.713802 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda408f71a_76c2_4c88_9b67_a08257da28eb.slice/crio-663ff4a50afca895907b1c4a0a7a3c625d1ecbe4a06296e3c7a38a1715842020 WatchSource:0}: Error finding container 663ff4a50afca895907b1c4a0a7a3c625d1ecbe4a06296e3c7a38a1715842020: Status 404 returned error can't find the container with id 663ff4a50afca895907b1c4a0a7a3c625d1ecbe4a06296e3c7a38a1715842020 Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.791670 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7dz9b" event={"ID":"a408f71a-76c2-4c88-9b67-a08257da28eb","Type":"ContainerStarted","Data":"663ff4a50afca895907b1c4a0a7a3c625d1ecbe4a06296e3c7a38a1715842020"} Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.794203 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"c0dcdbba-d3a5-47a2-a3e7-24acffe17074","Type":"ContainerStarted","Data":"91b70aa3fcf542157a5e81a20d406dcdb73e73344e43fddad653b42a03f82619"} Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.799035 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-t6xmn" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.864436 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rgjfm" Nov 23 14:44:17 crc kubenswrapper[5050]: I1123 14:44:17.935106 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-wr6q4" Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.077173 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vpr78"] Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.086786 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vpr78" Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.098236 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.098314 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vpr78"] Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.168978 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrtjh\" (UniqueName: \"kubernetes.io/projected/085c7e8e-434a-4bfa-b629-59f2191bc2e9-kube-api-access-wrtjh\") pod \"redhat-operators-vpr78\" (UID: \"085c7e8e-434a-4bfa-b629-59f2191bc2e9\") " pod="openshift-marketplace/redhat-operators-vpr78" Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.169043 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/085c7e8e-434a-4bfa-b629-59f2191bc2e9-utilities\") pod \"redhat-operators-vpr78\" (UID: \"085c7e8e-434a-4bfa-b629-59f2191bc2e9\") " pod="openshift-marketplace/redhat-operators-vpr78" Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.169159 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/085c7e8e-434a-4bfa-b629-59f2191bc2e9-catalog-content\") pod \"redhat-operators-vpr78\" (UID: \"085c7e8e-434a-4bfa-b629-59f2191bc2e9\") " pod="openshift-marketplace/redhat-operators-vpr78" Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.272818 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrtjh\" (UniqueName: \"kubernetes.io/projected/085c7e8e-434a-4bfa-b629-59f2191bc2e9-kube-api-access-wrtjh\") pod \"redhat-operators-vpr78\" (UID: \"085c7e8e-434a-4bfa-b629-59f2191bc2e9\") " pod="openshift-marketplace/redhat-operators-vpr78" Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.272878 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/085c7e8e-434a-4bfa-b629-59f2191bc2e9-utilities\") pod \"redhat-operators-vpr78\" (UID: \"085c7e8e-434a-4bfa-b629-59f2191bc2e9\") " pod="openshift-marketplace/redhat-operators-vpr78" Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.272936 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/085c7e8e-434a-4bfa-b629-59f2191bc2e9-catalog-content\") pod \"redhat-operators-vpr78\" (UID: \"085c7e8e-434a-4bfa-b629-59f2191bc2e9\") " pod="openshift-marketplace/redhat-operators-vpr78" Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.273634 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/085c7e8e-434a-4bfa-b629-59f2191bc2e9-catalog-content\") pod \"redhat-operators-vpr78\" (UID: \"085c7e8e-434a-4bfa-b629-59f2191bc2e9\") " pod="openshift-marketplace/redhat-operators-vpr78" Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.273635 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/085c7e8e-434a-4bfa-b629-59f2191bc2e9-utilities\") pod \"redhat-operators-vpr78\" (UID: \"085c7e8e-434a-4bfa-b629-59f2191bc2e9\") " pod="openshift-marketplace/redhat-operators-vpr78" Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.291646 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrtjh\" (UniqueName: \"kubernetes.io/projected/085c7e8e-434a-4bfa-b629-59f2191bc2e9-kube-api-access-wrtjh\") pod \"redhat-operators-vpr78\" (UID: \"085c7e8e-434a-4bfa-b629-59f2191bc2e9\") " pod="openshift-marketplace/redhat-operators-vpr78" Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.384722 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-t6xmn"] Nov 23 14:44:18 crc kubenswrapper[5050]: W1123 14:44:18.440402 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod20491310_05b1_4f61_9574_163a5463be0e.slice/crio-aa50adc1c4d7e5de82c598c8d204b6c0a0f00fb93cd6a922502961f48330f21b WatchSource:0}: Error finding container aa50adc1c4d7e5de82c598c8d204b6c0a0f00fb93cd6a922502961f48330f21b: Status 404 returned error can't find the container with id aa50adc1c4d7e5de82c598c8d204b6c0a0f00fb93cd6a922502961f48330f21b Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.469532 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vpr78" Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.477167 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-ppggf"] Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.479004 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ppggf" Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.502282 5050 patch_prober.go:28] interesting pod/router-default-5444994796-2g8pj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 23 14:44:18 crc kubenswrapper[5050]: [-]has-synced failed: reason withheld Nov 23 14:44:18 crc kubenswrapper[5050]: [+]process-running ok Nov 23 14:44:18 crc kubenswrapper[5050]: healthz check failed Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.502368 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2g8pj" podUID="05b57f72-3ec2-41f4-ba15-4bb73c15a1af" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.503992 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ppggf"] Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.583146 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxv86\" (UniqueName: \"kubernetes.io/projected/f89a2bf0-1a38-420d-9dba-d1a797364724-kube-api-access-wxv86\") pod \"redhat-operators-ppggf\" (UID: \"f89a2bf0-1a38-420d-9dba-d1a797364724\") " pod="openshift-marketplace/redhat-operators-ppggf" Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.583212 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f89a2bf0-1a38-420d-9dba-d1a797364724-catalog-content\") pod \"redhat-operators-ppggf\" (UID: \"f89a2bf0-1a38-420d-9dba-d1a797364724\") " pod="openshift-marketplace/redhat-operators-ppggf" Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.583984 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f89a2bf0-1a38-420d-9dba-d1a797364724-utilities\") pod \"redhat-operators-ppggf\" (UID: \"f89a2bf0-1a38-420d-9dba-d1a797364724\") " pod="openshift-marketplace/redhat-operators-ppggf" Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.685662 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wxv86\" (UniqueName: \"kubernetes.io/projected/f89a2bf0-1a38-420d-9dba-d1a797364724-kube-api-access-wxv86\") pod \"redhat-operators-ppggf\" (UID: \"f89a2bf0-1a38-420d-9dba-d1a797364724\") " pod="openshift-marketplace/redhat-operators-ppggf" Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.686048 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f89a2bf0-1a38-420d-9dba-d1a797364724-catalog-content\") pod \"redhat-operators-ppggf\" (UID: \"f89a2bf0-1a38-420d-9dba-d1a797364724\") " pod="openshift-marketplace/redhat-operators-ppggf" Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.686174 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f89a2bf0-1a38-420d-9dba-d1a797364724-utilities\") pod \"redhat-operators-ppggf\" (UID: \"f89a2bf0-1a38-420d-9dba-d1a797364724\") " pod="openshift-marketplace/redhat-operators-ppggf" Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.686591 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f89a2bf0-1a38-420d-9dba-d1a797364724-utilities\") pod \"redhat-operators-ppggf\" (UID: \"f89a2bf0-1a38-420d-9dba-d1a797364724\") " pod="openshift-marketplace/redhat-operators-ppggf" Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.686654 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f89a2bf0-1a38-420d-9dba-d1a797364724-catalog-content\") pod \"redhat-operators-ppggf\" (UID: \"f89a2bf0-1a38-420d-9dba-d1a797364724\") " pod="openshift-marketplace/redhat-operators-ppggf" Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.718251 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxv86\" (UniqueName: \"kubernetes.io/projected/f89a2bf0-1a38-420d-9dba-d1a797364724-kube-api-access-wxv86\") pod \"redhat-operators-ppggf\" (UID: \"f89a2bf0-1a38-420d-9dba-d1a797364724\") " pod="openshift-marketplace/redhat-operators-ppggf" Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.805124 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t6xmn" event={"ID":"20491310-05b1-4f61-9574-163a5463be0e","Type":"ContainerStarted","Data":"aa50adc1c4d7e5de82c598c8d204b6c0a0f00fb93cd6a922502961f48330f21b"} Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.808740 5050 generic.go:334] "Generic (PLEG): container finished" podID="c0dcdbba-d3a5-47a2-a3e7-24acffe17074" containerID="f9205bb5b53cbcee7b53e5a97cb495910321a47af1b4e7063a7241421ab5b27c" exitCode=0 Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.808806 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"c0dcdbba-d3a5-47a2-a3e7-24acffe17074","Type":"ContainerDied","Data":"f9205bb5b53cbcee7b53e5a97cb495910321a47af1b4e7063a7241421ab5b27c"} Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.811497 5050 generic.go:334] "Generic (PLEG): container finished" podID="a408f71a-76c2-4c88-9b67-a08257da28eb" containerID="f2cd6510174cb6aa7413144286a84cf1ddd5146b921742d13021aca7fe58c412" exitCode=0 Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.811530 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7dz9b" event={"ID":"a408f71a-76c2-4c88-9b67-a08257da28eb","Type":"ContainerDied","Data":"f2cd6510174cb6aa7413144286a84cf1ddd5146b921742d13021aca7fe58c412"} Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.815951 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ppggf" Nov 23 14:44:18 crc kubenswrapper[5050]: I1123 14:44:18.900572 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vpr78"] Nov 23 14:44:19 crc kubenswrapper[5050]: I1123 14:44:19.232917 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ppggf"] Nov 23 14:44:19 crc kubenswrapper[5050]: I1123 14:44:19.324019 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-wdqwt" Nov 23 14:44:19 crc kubenswrapper[5050]: I1123 14:44:19.491320 5050 patch_prober.go:28] interesting pod/router-default-5444994796-2g8pj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 23 14:44:19 crc kubenswrapper[5050]: [-]has-synced failed: reason withheld Nov 23 14:44:19 crc kubenswrapper[5050]: [+]process-running ok Nov 23 14:44:19 crc kubenswrapper[5050]: healthz check failed Nov 23 14:44:19 crc kubenswrapper[5050]: I1123 14:44:19.491398 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2g8pj" podUID="05b57f72-3ec2-41f4-ba15-4bb73c15a1af" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 23 14:44:19 crc kubenswrapper[5050]: I1123 14:44:19.837121 5050 generic.go:334] "Generic (PLEG): container finished" podID="085c7e8e-434a-4bfa-b629-59f2191bc2e9" containerID="8911b3a870cbc75cd9e32880d18a25eb6e3d3f118517cd5b15087a9811b36ad5" exitCode=0 Nov 23 14:44:19 crc kubenswrapper[5050]: I1123 14:44:19.837296 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vpr78" event={"ID":"085c7e8e-434a-4bfa-b629-59f2191bc2e9","Type":"ContainerDied","Data":"8911b3a870cbc75cd9e32880d18a25eb6e3d3f118517cd5b15087a9811b36ad5"} Nov 23 14:44:19 crc kubenswrapper[5050]: I1123 14:44:19.837765 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vpr78" event={"ID":"085c7e8e-434a-4bfa-b629-59f2191bc2e9","Type":"ContainerStarted","Data":"c2189e1b7605af172c0543a8f8d3d01a4cb19febd2c928e49f0529c78fdb50bb"} Nov 23 14:44:19 crc kubenswrapper[5050]: I1123 14:44:19.841125 5050 generic.go:334] "Generic (PLEG): container finished" podID="f89a2bf0-1a38-420d-9dba-d1a797364724" containerID="9580398a5276d41a87eb211f9b29cc92ef376f4d3668907426b39cad6a148a1b" exitCode=0 Nov 23 14:44:19 crc kubenswrapper[5050]: I1123 14:44:19.841225 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ppggf" event={"ID":"f89a2bf0-1a38-420d-9dba-d1a797364724","Type":"ContainerDied","Data":"9580398a5276d41a87eb211f9b29cc92ef376f4d3668907426b39cad6a148a1b"} Nov 23 14:44:19 crc kubenswrapper[5050]: I1123 14:44:19.841334 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ppggf" event={"ID":"f89a2bf0-1a38-420d-9dba-d1a797364724","Type":"ContainerStarted","Data":"e52bf925a29fb71fd4b18cdc5c1dc175bc284b1f78bc142906202d9b0a5e736f"} Nov 23 14:44:19 crc kubenswrapper[5050]: I1123 14:44:19.847680 5050 generic.go:334] "Generic (PLEG): container finished" podID="20491310-05b1-4f61-9574-163a5463be0e" containerID="a0abcb7182d465be0a10d785a373ed85d2d1463f53d0cd6f588483da4ea90b9c" exitCode=0 Nov 23 14:44:19 crc kubenswrapper[5050]: I1123 14:44:19.847765 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t6xmn" event={"ID":"20491310-05b1-4f61-9574-163a5463be0e","Type":"ContainerDied","Data":"a0abcb7182d465be0a10d785a373ed85d2d1463f53d0cd6f588483da4ea90b9c"} Nov 23 14:44:19 crc kubenswrapper[5050]: I1123 14:44:19.977153 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 23 14:44:19 crc kubenswrapper[5050]: I1123 14:44:19.991801 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 23 14:44:19 crc kubenswrapper[5050]: I1123 14:44:19.996864 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 23 14:44:19 crc kubenswrapper[5050]: I1123 14:44:19.997421 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 23 14:44:20 crc kubenswrapper[5050]: I1123 14:44:20.000869 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 23 14:44:20 crc kubenswrapper[5050]: I1123 14:44:20.010560 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a78c1086-e3cf-408b-95fa-bdf7b8eb417b-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"a78c1086-e3cf-408b-95fa-bdf7b8eb417b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 23 14:44:20 crc kubenswrapper[5050]: I1123 14:44:20.010777 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a78c1086-e3cf-408b-95fa-bdf7b8eb417b-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"a78c1086-e3cf-408b-95fa-bdf7b8eb417b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 23 14:44:20 crc kubenswrapper[5050]: I1123 14:44:20.111959 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a78c1086-e3cf-408b-95fa-bdf7b8eb417b-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"a78c1086-e3cf-408b-95fa-bdf7b8eb417b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 23 14:44:20 crc kubenswrapper[5050]: I1123 14:44:20.112106 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a78c1086-e3cf-408b-95fa-bdf7b8eb417b-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"a78c1086-e3cf-408b-95fa-bdf7b8eb417b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 23 14:44:20 crc kubenswrapper[5050]: I1123 14:44:20.112205 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a78c1086-e3cf-408b-95fa-bdf7b8eb417b-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"a78c1086-e3cf-408b-95fa-bdf7b8eb417b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 23 14:44:20 crc kubenswrapper[5050]: I1123 14:44:20.135001 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a78c1086-e3cf-408b-95fa-bdf7b8eb417b-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"a78c1086-e3cf-408b-95fa-bdf7b8eb417b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 23 14:44:20 crc kubenswrapper[5050]: I1123 14:44:20.192646 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 23 14:44:20 crc kubenswrapper[5050]: I1123 14:44:20.217992 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c0dcdbba-d3a5-47a2-a3e7-24acffe17074-kube-api-access\") pod \"c0dcdbba-d3a5-47a2-a3e7-24acffe17074\" (UID: \"c0dcdbba-d3a5-47a2-a3e7-24acffe17074\") " Nov 23 14:44:20 crc kubenswrapper[5050]: I1123 14:44:20.218141 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c0dcdbba-d3a5-47a2-a3e7-24acffe17074-kubelet-dir\") pod \"c0dcdbba-d3a5-47a2-a3e7-24acffe17074\" (UID: \"c0dcdbba-d3a5-47a2-a3e7-24acffe17074\") " Nov 23 14:44:20 crc kubenswrapper[5050]: I1123 14:44:20.218682 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c0dcdbba-d3a5-47a2-a3e7-24acffe17074-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "c0dcdbba-d3a5-47a2-a3e7-24acffe17074" (UID: "c0dcdbba-d3a5-47a2-a3e7-24acffe17074"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 14:44:20 crc kubenswrapper[5050]: I1123 14:44:20.249813 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0dcdbba-d3a5-47a2-a3e7-24acffe17074-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "c0dcdbba-d3a5-47a2-a3e7-24acffe17074" (UID: "c0dcdbba-d3a5-47a2-a3e7-24acffe17074"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:44:20 crc kubenswrapper[5050]: I1123 14:44:20.318315 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 23 14:44:20 crc kubenswrapper[5050]: I1123 14:44:20.319296 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c0dcdbba-d3a5-47a2-a3e7-24acffe17074-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 23 14:44:20 crc kubenswrapper[5050]: I1123 14:44:20.319318 5050 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c0dcdbba-d3a5-47a2-a3e7-24acffe17074-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 23 14:44:20 crc kubenswrapper[5050]: I1123 14:44:20.491762 5050 patch_prober.go:28] interesting pod/router-default-5444994796-2g8pj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 23 14:44:20 crc kubenswrapper[5050]: [-]has-synced failed: reason withheld Nov 23 14:44:20 crc kubenswrapper[5050]: [+]process-running ok Nov 23 14:44:20 crc kubenswrapper[5050]: healthz check failed Nov 23 14:44:20 crc kubenswrapper[5050]: I1123 14:44:20.491812 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2g8pj" podUID="05b57f72-3ec2-41f4-ba15-4bb73c15a1af" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 23 14:44:20 crc kubenswrapper[5050]: I1123 14:44:20.850256 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 23 14:44:20 crc kubenswrapper[5050]: I1123 14:44:20.882110 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 23 14:44:20 crc kubenswrapper[5050]: I1123 14:44:20.882262 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"c0dcdbba-d3a5-47a2-a3e7-24acffe17074","Type":"ContainerDied","Data":"91b70aa3fcf542157a5e81a20d406dcdb73e73344e43fddad653b42a03f82619"} Nov 23 14:44:20 crc kubenswrapper[5050]: I1123 14:44:20.882310 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="91b70aa3fcf542157a5e81a20d406dcdb73e73344e43fddad653b42a03f82619" Nov 23 14:44:21 crc kubenswrapper[5050]: I1123 14:44:21.490691 5050 patch_prober.go:28] interesting pod/router-default-5444994796-2g8pj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 23 14:44:21 crc kubenswrapper[5050]: [-]has-synced failed: reason withheld Nov 23 14:44:21 crc kubenswrapper[5050]: [+]process-running ok Nov 23 14:44:21 crc kubenswrapper[5050]: healthz check failed Nov 23 14:44:21 crc kubenswrapper[5050]: I1123 14:44:21.491416 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2g8pj" podUID="05b57f72-3ec2-41f4-ba15-4bb73c15a1af" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 23 14:44:21 crc kubenswrapper[5050]: I1123 14:44:21.901102 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"a78c1086-e3cf-408b-95fa-bdf7b8eb417b","Type":"ContainerStarted","Data":"9d0687f786a83bcef03fe8d9f823bac05b4c76092ae95acd6305ff2184c4345e"} Nov 23 14:44:21 crc kubenswrapper[5050]: I1123 14:44:21.901169 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"a78c1086-e3cf-408b-95fa-bdf7b8eb417b","Type":"ContainerStarted","Data":"8465b06069d242f78bbb63ad180827b4b450b35a0f5adc02c7d9d96fc61d99e5"} Nov 23 14:44:22 crc kubenswrapper[5050]: I1123 14:44:22.452631 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:22 crc kubenswrapper[5050]: I1123 14:44:22.459564 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-svd5r" Nov 23 14:44:22 crc kubenswrapper[5050]: I1123 14:44:22.492775 5050 patch_prober.go:28] interesting pod/router-default-5444994796-2g8pj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 23 14:44:22 crc kubenswrapper[5050]: [-]has-synced failed: reason withheld Nov 23 14:44:22 crc kubenswrapper[5050]: [+]process-running ok Nov 23 14:44:22 crc kubenswrapper[5050]: healthz check failed Nov 23 14:44:22 crc kubenswrapper[5050]: I1123 14:44:22.492833 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2g8pj" podUID="05b57f72-3ec2-41f4-ba15-4bb73c15a1af" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 23 14:44:22 crc kubenswrapper[5050]: I1123 14:44:22.914673 5050 generic.go:334] "Generic (PLEG): container finished" podID="a78c1086-e3cf-408b-95fa-bdf7b8eb417b" containerID="9d0687f786a83bcef03fe8d9f823bac05b4c76092ae95acd6305ff2184c4345e" exitCode=0 Nov 23 14:44:22 crc kubenswrapper[5050]: I1123 14:44:22.914784 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"a78c1086-e3cf-408b-95fa-bdf7b8eb417b","Type":"ContainerDied","Data":"9d0687f786a83bcef03fe8d9f823bac05b4c76092ae95acd6305ff2184c4345e"} Nov 23 14:44:23 crc kubenswrapper[5050]: I1123 14:44:23.490402 5050 patch_prober.go:28] interesting pod/router-default-5444994796-2g8pj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 23 14:44:23 crc kubenswrapper[5050]: [-]has-synced failed: reason withheld Nov 23 14:44:23 crc kubenswrapper[5050]: [+]process-running ok Nov 23 14:44:23 crc kubenswrapper[5050]: healthz check failed Nov 23 14:44:23 crc kubenswrapper[5050]: I1123 14:44:23.490542 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2g8pj" podUID="05b57f72-3ec2-41f4-ba15-4bb73c15a1af" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 23 14:44:24 crc kubenswrapper[5050]: I1123 14:44:24.490143 5050 patch_prober.go:28] interesting pod/router-default-5444994796-2g8pj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 23 14:44:24 crc kubenswrapper[5050]: [-]has-synced failed: reason withheld Nov 23 14:44:24 crc kubenswrapper[5050]: [+]process-running ok Nov 23 14:44:24 crc kubenswrapper[5050]: healthz check failed Nov 23 14:44:24 crc kubenswrapper[5050]: I1123 14:44:24.490670 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2g8pj" podUID="05b57f72-3ec2-41f4-ba15-4bb73c15a1af" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 23 14:44:25 crc kubenswrapper[5050]: I1123 14:44:25.489574 5050 patch_prober.go:28] interesting pod/router-default-5444994796-2g8pj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 23 14:44:25 crc kubenswrapper[5050]: [-]has-synced failed: reason withheld Nov 23 14:44:25 crc kubenswrapper[5050]: [+]process-running ok Nov 23 14:44:25 crc kubenswrapper[5050]: healthz check failed Nov 23 14:44:25 crc kubenswrapper[5050]: I1123 14:44:25.489640 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2g8pj" podUID="05b57f72-3ec2-41f4-ba15-4bb73c15a1af" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 23 14:44:26 crc kubenswrapper[5050]: I1123 14:44:26.489819 5050 patch_prober.go:28] interesting pod/router-default-5444994796-2g8pj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 23 14:44:26 crc kubenswrapper[5050]: [-]has-synced failed: reason withheld Nov 23 14:44:26 crc kubenswrapper[5050]: [+]process-running ok Nov 23 14:44:26 crc kubenswrapper[5050]: healthz check failed Nov 23 14:44:26 crc kubenswrapper[5050]: I1123 14:44:26.489917 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2g8pj" podUID="05b57f72-3ec2-41f4-ba15-4bb73c15a1af" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 23 14:44:26 crc kubenswrapper[5050]: I1123 14:44:26.878087 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-gvb4q" Nov 23 14:44:27 crc kubenswrapper[5050]: I1123 14:44:27.126109 5050 patch_prober.go:28] interesting pod/console-f9d7485db-b646l container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.24:8443/health\": dial tcp 10.217.0.24:8443: connect: connection refused" start-of-body= Nov 23 14:44:27 crc kubenswrapper[5050]: I1123 14:44:27.126189 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-b646l" podUID="629f02dd-753b-46d2-b808-812468f6c9c5" containerName="console" probeResult="failure" output="Get \"https://10.217.0.24:8443/health\": dial tcp 10.217.0.24:8443: connect: connection refused" Nov 23 14:44:27 crc kubenswrapper[5050]: I1123 14:44:27.489606 5050 patch_prober.go:28] interesting pod/router-default-5444994796-2g8pj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 23 14:44:27 crc kubenswrapper[5050]: [-]has-synced failed: reason withheld Nov 23 14:44:27 crc kubenswrapper[5050]: [+]process-running ok Nov 23 14:44:27 crc kubenswrapper[5050]: healthz check failed Nov 23 14:44:27 crc kubenswrapper[5050]: I1123 14:44:27.489740 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2g8pj" podUID="05b57f72-3ec2-41f4-ba15-4bb73c15a1af" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 23 14:44:28 crc kubenswrapper[5050]: I1123 14:44:28.357185 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cc69bd19-1f49-486e-a510-d5b8461fb172-metrics-certs\") pod \"network-metrics-daemon-gtj96\" (UID: \"cc69bd19-1f49-486e-a510-d5b8461fb172\") " pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:44:28 crc kubenswrapper[5050]: I1123 14:44:28.370556 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cc69bd19-1f49-486e-a510-d5b8461fb172-metrics-certs\") pod \"network-metrics-daemon-gtj96\" (UID: \"cc69bd19-1f49-486e-a510-d5b8461fb172\") " pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:44:28 crc kubenswrapper[5050]: I1123 14:44:28.490921 5050 patch_prober.go:28] interesting pod/router-default-5444994796-2g8pj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 23 14:44:28 crc kubenswrapper[5050]: [-]has-synced failed: reason withheld Nov 23 14:44:28 crc kubenswrapper[5050]: [+]process-running ok Nov 23 14:44:28 crc kubenswrapper[5050]: healthz check failed Nov 23 14:44:28 crc kubenswrapper[5050]: I1123 14:44:28.491001 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2g8pj" podUID="05b57f72-3ec2-41f4-ba15-4bb73c15a1af" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 23 14:44:28 crc kubenswrapper[5050]: I1123 14:44:28.594556 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gtj96" Nov 23 14:44:29 crc kubenswrapper[5050]: I1123 14:44:29.224865 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 14:44:29 crc kubenswrapper[5050]: I1123 14:44:29.225382 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 14:44:29 crc kubenswrapper[5050]: I1123 14:44:29.491614 5050 patch_prober.go:28] interesting pod/router-default-5444994796-2g8pj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 23 14:44:29 crc kubenswrapper[5050]: [-]has-synced failed: reason withheld Nov 23 14:44:29 crc kubenswrapper[5050]: [+]process-running ok Nov 23 14:44:29 crc kubenswrapper[5050]: healthz check failed Nov 23 14:44:29 crc kubenswrapper[5050]: I1123 14:44:29.491693 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2g8pj" podUID="05b57f72-3ec2-41f4-ba15-4bb73c15a1af" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 23 14:44:29 crc kubenswrapper[5050]: I1123 14:44:29.839645 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 23 14:44:29 crc kubenswrapper[5050]: I1123 14:44:29.880067 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a78c1086-e3cf-408b-95fa-bdf7b8eb417b-kubelet-dir\") pod \"a78c1086-e3cf-408b-95fa-bdf7b8eb417b\" (UID: \"a78c1086-e3cf-408b-95fa-bdf7b8eb417b\") " Nov 23 14:44:29 crc kubenswrapper[5050]: I1123 14:44:29.880357 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a78c1086-e3cf-408b-95fa-bdf7b8eb417b-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "a78c1086-e3cf-408b-95fa-bdf7b8eb417b" (UID: "a78c1086-e3cf-408b-95fa-bdf7b8eb417b"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 14:44:29 crc kubenswrapper[5050]: I1123 14:44:29.880942 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a78c1086-e3cf-408b-95fa-bdf7b8eb417b-kube-api-access\") pod \"a78c1086-e3cf-408b-95fa-bdf7b8eb417b\" (UID: \"a78c1086-e3cf-408b-95fa-bdf7b8eb417b\") " Nov 23 14:44:29 crc kubenswrapper[5050]: I1123 14:44:29.881378 5050 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a78c1086-e3cf-408b-95fa-bdf7b8eb417b-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 23 14:44:29 crc kubenswrapper[5050]: I1123 14:44:29.897880 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a78c1086-e3cf-408b-95fa-bdf7b8eb417b-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "a78c1086-e3cf-408b-95fa-bdf7b8eb417b" (UID: "a78c1086-e3cf-408b-95fa-bdf7b8eb417b"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:44:29 crc kubenswrapper[5050]: I1123 14:44:29.982491 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a78c1086-e3cf-408b-95fa-bdf7b8eb417b-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 23 14:44:29 crc kubenswrapper[5050]: I1123 14:44:29.986020 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-gtj96"] Nov 23 14:44:29 crc kubenswrapper[5050]: I1123 14:44:29.990857 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"a78c1086-e3cf-408b-95fa-bdf7b8eb417b","Type":"ContainerDied","Data":"8465b06069d242f78bbb63ad180827b4b450b35a0f5adc02c7d9d96fc61d99e5"} Nov 23 14:44:29 crc kubenswrapper[5050]: I1123 14:44:29.990902 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8465b06069d242f78bbb63ad180827b4b450b35a0f5adc02c7d9d96fc61d99e5" Nov 23 14:44:29 crc kubenswrapper[5050]: I1123 14:44:29.990981 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 23 14:44:30 crc kubenswrapper[5050]: I1123 14:44:30.490685 5050 patch_prober.go:28] interesting pod/router-default-5444994796-2g8pj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 23 14:44:30 crc kubenswrapper[5050]: [-]has-synced failed: reason withheld Nov 23 14:44:30 crc kubenswrapper[5050]: [+]process-running ok Nov 23 14:44:30 crc kubenswrapper[5050]: healthz check failed Nov 23 14:44:30 crc kubenswrapper[5050]: I1123 14:44:30.490764 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2g8pj" podUID="05b57f72-3ec2-41f4-ba15-4bb73c15a1af" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 23 14:44:31 crc kubenswrapper[5050]: I1123 14:44:31.490714 5050 patch_prober.go:28] interesting pod/router-default-5444994796-2g8pj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 23 14:44:31 crc kubenswrapper[5050]: [+]has-synced ok Nov 23 14:44:31 crc kubenswrapper[5050]: [+]process-running ok Nov 23 14:44:31 crc kubenswrapper[5050]: healthz check failed Nov 23 14:44:31 crc kubenswrapper[5050]: I1123 14:44:31.490806 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2g8pj" podUID="05b57f72-3ec2-41f4-ba15-4bb73c15a1af" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 23 14:44:32 crc kubenswrapper[5050]: I1123 14:44:32.498017 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-2g8pj" Nov 23 14:44:32 crc kubenswrapper[5050]: I1123 14:44:32.502970 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-2g8pj" Nov 23 14:44:34 crc kubenswrapper[5050]: I1123 14:44:34.020249 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-gtj96" event={"ID":"cc69bd19-1f49-486e-a510-d5b8461fb172","Type":"ContainerStarted","Data":"807a263f8567208c88e764313977a1bad814492e48a35140ae1ec79accc2756d"} Nov 23 14:44:35 crc kubenswrapper[5050]: I1123 14:44:35.908960 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:44:37 crc kubenswrapper[5050]: I1123 14:44:37.131486 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:44:37 crc kubenswrapper[5050]: I1123 14:44:37.137034 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:44:40 crc kubenswrapper[5050]: E1123 14:44:40.387382 5050 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 23 14:44:40 crc kubenswrapper[5050]: E1123 14:44:40.387947 5050 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-htvz6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-7dz9b_openshift-marketplace(a408f71a-76c2-4c88-9b67-a08257da28eb): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 23 14:44:40 crc kubenswrapper[5050]: E1123 14:44:40.390311 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-7dz9b" podUID="a408f71a-76c2-4c88-9b67-a08257da28eb" Nov 23 14:44:47 crc kubenswrapper[5050]: E1123 14:44:47.676101 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-7dz9b" podUID="a408f71a-76c2-4c88-9b67-a08257da28eb" Nov 23 14:44:47 crc kubenswrapper[5050]: I1123 14:44:47.889363 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qjw5q" Nov 23 14:44:50 crc kubenswrapper[5050]: E1123 14:44:50.838727 5050 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 23 14:44:50 crc kubenswrapper[5050]: E1123 14:44:50.839005 5050 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6vmb4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-n9s79_openshift-marketplace(666a5515-92ff-44ef-8e85-f7205f823416): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 23 14:44:50 crc kubenswrapper[5050]: E1123 14:44:50.840473 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-n9s79" podUID="666a5515-92ff-44ef-8e85-f7205f823416" Nov 23 14:44:52 crc kubenswrapper[5050]: E1123 14:44:52.865008 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-n9s79" podUID="666a5515-92ff-44ef-8e85-f7205f823416" Nov 23 14:44:52 crc kubenswrapper[5050]: E1123 14:44:52.960546 5050 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 23 14:44:52 crc kubenswrapper[5050]: E1123 14:44:52.960791 5050 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4m7cl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-zz4fr_openshift-marketplace(65bfd579-9076-4956-bd2b-e1c0c93775b5): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 23 14:44:52 crc kubenswrapper[5050]: E1123 14:44:52.963264 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-zz4fr" podUID="65bfd579-9076-4956-bd2b-e1c0c93775b5" Nov 23 14:44:53 crc kubenswrapper[5050]: I1123 14:44:53.995954 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 23 14:44:54 crc kubenswrapper[5050]: E1123 14:44:54.403731 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-zz4fr" podUID="65bfd579-9076-4956-bd2b-e1c0c93775b5" Nov 23 14:44:54 crc kubenswrapper[5050]: E1123 14:44:54.444709 5050 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 23 14:44:54 crc kubenswrapper[5050]: E1123 14:44:54.444911 5050 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wxv86,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-ppggf_openshift-marketplace(f89a2bf0-1a38-420d-9dba-d1a797364724): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 23 14:44:54 crc kubenswrapper[5050]: E1123 14:44:54.446478 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-ppggf" podUID="f89a2bf0-1a38-420d-9dba-d1a797364724" Nov 23 14:44:54 crc kubenswrapper[5050]: E1123 14:44:54.520205 5050 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 23 14:44:54 crc kubenswrapper[5050]: E1123 14:44:54.520471 5050 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qdh75,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-tpll6_openshift-marketplace(c57083c1-0f6f-4e07-aa17-218073eaa9c0): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 23 14:44:54 crc kubenswrapper[5050]: E1123 14:44:54.522382 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-tpll6" podUID="c57083c1-0f6f-4e07-aa17-218073eaa9c0" Nov 23 14:44:54 crc kubenswrapper[5050]: E1123 14:44:54.539702 5050 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 23 14:44:54 crc kubenswrapper[5050]: E1123 14:44:54.539859 5050 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4bfl9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-gcpxp_openshift-marketplace(fe09ca9a-811b-4126-a3bf-a43b0edbb475): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 23 14:44:54 crc kubenswrapper[5050]: E1123 14:44:54.541344 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-gcpxp" podUID="fe09ca9a-811b-4126-a3bf-a43b0edbb475" Nov 23 14:44:55 crc kubenswrapper[5050]: I1123 14:44:55.174633 5050 generic.go:334] "Generic (PLEG): container finished" podID="20491310-05b1-4f61-9574-163a5463be0e" containerID="0fda1d257dec07053430a2ec86c07ba51b18489bebe849f4562ff54d71a97c55" exitCode=0 Nov 23 14:44:55 crc kubenswrapper[5050]: I1123 14:44:55.174711 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t6xmn" event={"ID":"20491310-05b1-4f61-9574-163a5463be0e","Type":"ContainerDied","Data":"0fda1d257dec07053430a2ec86c07ba51b18489bebe849f4562ff54d71a97c55"} Nov 23 14:44:55 crc kubenswrapper[5050]: I1123 14:44:55.180726 5050 generic.go:334] "Generic (PLEG): container finished" podID="085c7e8e-434a-4bfa-b629-59f2191bc2e9" containerID="3e520e7710257ec2fa279f927149d3d2cc091679172a5d88d4a30d65dd1182eb" exitCode=0 Nov 23 14:44:55 crc kubenswrapper[5050]: I1123 14:44:55.180790 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vpr78" event={"ID":"085c7e8e-434a-4bfa-b629-59f2191bc2e9","Type":"ContainerDied","Data":"3e520e7710257ec2fa279f927149d3d2cc091679172a5d88d4a30d65dd1182eb"} Nov 23 14:44:55 crc kubenswrapper[5050]: I1123 14:44:55.194508 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-gtj96" event={"ID":"cc69bd19-1f49-486e-a510-d5b8461fb172","Type":"ContainerStarted","Data":"8d7e571740e6fbb5a090c1b83046900968b790e0a4a06d122edbe2f1789dcf7d"} Nov 23 14:44:55 crc kubenswrapper[5050]: I1123 14:44:55.194602 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-gtj96" event={"ID":"cc69bd19-1f49-486e-a510-d5b8461fb172","Type":"ContainerStarted","Data":"ec0659ade3ef6f73176a5598783ccc90db127a0388162b5435e3f000b8e5615f"} Nov 23 14:44:55 crc kubenswrapper[5050]: E1123 14:44:55.202755 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-tpll6" podUID="c57083c1-0f6f-4e07-aa17-218073eaa9c0" Nov 23 14:44:55 crc kubenswrapper[5050]: E1123 14:44:55.202846 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-gcpxp" podUID="fe09ca9a-811b-4126-a3bf-a43b0edbb475" Nov 23 14:44:55 crc kubenswrapper[5050]: I1123 14:44:55.265977 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-gtj96" podStartSLOduration=169.265947117 podStartE2EDuration="2m49.265947117s" podCreationTimestamp="2025-11-23 14:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:44:55.265465454 +0000 UTC m=+190.432461949" watchObservedRunningTime="2025-11-23 14:44:55.265947117 +0000 UTC m=+190.432943602" Nov 23 14:44:56 crc kubenswrapper[5050]: I1123 14:44:56.204662 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t6xmn" event={"ID":"20491310-05b1-4f61-9574-163a5463be0e","Type":"ContainerStarted","Data":"dfdd96177e2968ce33524d283310b64515aa7cb4f21f331a2d63897d8330e211"} Nov 23 14:44:56 crc kubenswrapper[5050]: I1123 14:44:56.208469 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vpr78" event={"ID":"085c7e8e-434a-4bfa-b629-59f2191bc2e9","Type":"ContainerStarted","Data":"e15fdfb5b6f09ba8053589fb059eadfc64ecad68cf69e66583789f27b8602cda"} Nov 23 14:44:56 crc kubenswrapper[5050]: I1123 14:44:56.227470 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-t6xmn" podStartSLOduration=3.382409001 podStartE2EDuration="39.227413425s" podCreationTimestamp="2025-11-23 14:44:17 +0000 UTC" firstStartedPulling="2025-11-23 14:44:19.850098564 +0000 UTC m=+155.017095049" lastFinishedPulling="2025-11-23 14:44:55.695102988 +0000 UTC m=+190.862099473" observedRunningTime="2025-11-23 14:44:56.225903063 +0000 UTC m=+191.392899608" watchObservedRunningTime="2025-11-23 14:44:56.227413425 +0000 UTC m=+191.394409940" Nov 23 14:44:56 crc kubenswrapper[5050]: I1123 14:44:56.249990 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vpr78" podStartSLOduration=2.4581886920000002 podStartE2EDuration="38.249954406s" podCreationTimestamp="2025-11-23 14:44:18 +0000 UTC" firstStartedPulling="2025-11-23 14:44:19.84317002 +0000 UTC m=+155.010166505" lastFinishedPulling="2025-11-23 14:44:55.634935704 +0000 UTC m=+190.801932219" observedRunningTime="2025-11-23 14:44:56.244003809 +0000 UTC m=+191.411000324" watchObservedRunningTime="2025-11-23 14:44:56.249954406 +0000 UTC m=+191.416950921" Nov 23 14:44:57 crc kubenswrapper[5050]: I1123 14:44:57.799291 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-t6xmn" Nov 23 14:44:57 crc kubenswrapper[5050]: I1123 14:44:57.801833 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-t6xmn" Nov 23 14:44:57 crc kubenswrapper[5050]: I1123 14:44:57.976565 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-t6xmn" Nov 23 14:44:58 crc kubenswrapper[5050]: I1123 14:44:58.470599 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vpr78" Nov 23 14:44:58 crc kubenswrapper[5050]: I1123 14:44:58.471040 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vpr78" Nov 23 14:44:59 crc kubenswrapper[5050]: I1123 14:44:59.224267 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 14:44:59 crc kubenswrapper[5050]: I1123 14:44:59.224820 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 14:44:59 crc kubenswrapper[5050]: I1123 14:44:59.530522 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vpr78" podUID="085c7e8e-434a-4bfa-b629-59f2191bc2e9" containerName="registry-server" probeResult="failure" output=< Nov 23 14:44:59 crc kubenswrapper[5050]: timeout: failed to connect service ":50051" within 1s Nov 23 14:44:59 crc kubenswrapper[5050]: > Nov 23 14:45:00 crc kubenswrapper[5050]: I1123 14:45:00.143277 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398485-p4nwd"] Nov 23 14:45:00 crc kubenswrapper[5050]: E1123 14:45:00.144035 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0dcdbba-d3a5-47a2-a3e7-24acffe17074" containerName="pruner" Nov 23 14:45:00 crc kubenswrapper[5050]: I1123 14:45:00.144054 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0dcdbba-d3a5-47a2-a3e7-24acffe17074" containerName="pruner" Nov 23 14:45:00 crc kubenswrapper[5050]: E1123 14:45:00.144064 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a78c1086-e3cf-408b-95fa-bdf7b8eb417b" containerName="pruner" Nov 23 14:45:00 crc kubenswrapper[5050]: I1123 14:45:00.144072 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="a78c1086-e3cf-408b-95fa-bdf7b8eb417b" containerName="pruner" Nov 23 14:45:00 crc kubenswrapper[5050]: I1123 14:45:00.144207 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="a78c1086-e3cf-408b-95fa-bdf7b8eb417b" containerName="pruner" Nov 23 14:45:00 crc kubenswrapper[5050]: I1123 14:45:00.144221 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0dcdbba-d3a5-47a2-a3e7-24acffe17074" containerName="pruner" Nov 23 14:45:00 crc kubenswrapper[5050]: I1123 14:45:00.151227 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398485-p4nwd" Nov 23 14:45:00 crc kubenswrapper[5050]: I1123 14:45:00.154113 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 23 14:45:00 crc kubenswrapper[5050]: I1123 14:45:00.159341 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 23 14:45:00 crc kubenswrapper[5050]: I1123 14:45:00.164504 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398485-p4nwd"] Nov 23 14:45:00 crc kubenswrapper[5050]: I1123 14:45:00.222839 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c9ec5edf-785a-4b69-bf12-857345ceb036-secret-volume\") pod \"collect-profiles-29398485-p4nwd\" (UID: \"c9ec5edf-785a-4b69-bf12-857345ceb036\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398485-p4nwd" Nov 23 14:45:00 crc kubenswrapper[5050]: I1123 14:45:00.223043 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9cfg\" (UniqueName: \"kubernetes.io/projected/c9ec5edf-785a-4b69-bf12-857345ceb036-kube-api-access-m9cfg\") pod \"collect-profiles-29398485-p4nwd\" (UID: \"c9ec5edf-785a-4b69-bf12-857345ceb036\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398485-p4nwd" Nov 23 14:45:00 crc kubenswrapper[5050]: I1123 14:45:00.223115 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c9ec5edf-785a-4b69-bf12-857345ceb036-config-volume\") pod \"collect-profiles-29398485-p4nwd\" (UID: \"c9ec5edf-785a-4b69-bf12-857345ceb036\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398485-p4nwd" Nov 23 14:45:00 crc kubenswrapper[5050]: I1123 14:45:00.234851 5050 generic.go:334] "Generic (PLEG): container finished" podID="a408f71a-76c2-4c88-9b67-a08257da28eb" containerID="334a33cefa4534c0d81b9693c7e6fea419c5a02d6388f55be64585d162f9e659" exitCode=0 Nov 23 14:45:00 crc kubenswrapper[5050]: I1123 14:45:00.234909 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7dz9b" event={"ID":"a408f71a-76c2-4c88-9b67-a08257da28eb","Type":"ContainerDied","Data":"334a33cefa4534c0d81b9693c7e6fea419c5a02d6388f55be64585d162f9e659"} Nov 23 14:45:00 crc kubenswrapper[5050]: I1123 14:45:00.325810 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c9ec5edf-785a-4b69-bf12-857345ceb036-secret-volume\") pod \"collect-profiles-29398485-p4nwd\" (UID: \"c9ec5edf-785a-4b69-bf12-857345ceb036\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398485-p4nwd" Nov 23 14:45:00 crc kubenswrapper[5050]: I1123 14:45:00.326046 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9cfg\" (UniqueName: \"kubernetes.io/projected/c9ec5edf-785a-4b69-bf12-857345ceb036-kube-api-access-m9cfg\") pod \"collect-profiles-29398485-p4nwd\" (UID: \"c9ec5edf-785a-4b69-bf12-857345ceb036\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398485-p4nwd" Nov 23 14:45:00 crc kubenswrapper[5050]: I1123 14:45:00.326126 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c9ec5edf-785a-4b69-bf12-857345ceb036-config-volume\") pod \"collect-profiles-29398485-p4nwd\" (UID: \"c9ec5edf-785a-4b69-bf12-857345ceb036\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398485-p4nwd" Nov 23 14:45:00 crc kubenswrapper[5050]: I1123 14:45:00.327596 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c9ec5edf-785a-4b69-bf12-857345ceb036-config-volume\") pod \"collect-profiles-29398485-p4nwd\" (UID: \"c9ec5edf-785a-4b69-bf12-857345ceb036\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398485-p4nwd" Nov 23 14:45:00 crc kubenswrapper[5050]: I1123 14:45:00.335619 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c9ec5edf-785a-4b69-bf12-857345ceb036-secret-volume\") pod \"collect-profiles-29398485-p4nwd\" (UID: \"c9ec5edf-785a-4b69-bf12-857345ceb036\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398485-p4nwd" Nov 23 14:45:00 crc kubenswrapper[5050]: I1123 14:45:00.347708 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9cfg\" (UniqueName: \"kubernetes.io/projected/c9ec5edf-785a-4b69-bf12-857345ceb036-kube-api-access-m9cfg\") pod \"collect-profiles-29398485-p4nwd\" (UID: \"c9ec5edf-785a-4b69-bf12-857345ceb036\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398485-p4nwd" Nov 23 14:45:00 crc kubenswrapper[5050]: I1123 14:45:00.470032 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398485-p4nwd" Nov 23 14:45:00 crc kubenswrapper[5050]: I1123 14:45:00.900211 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398485-p4nwd"] Nov 23 14:45:00 crc kubenswrapper[5050]: W1123 14:45:00.908691 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc9ec5edf_785a_4b69_bf12_857345ceb036.slice/crio-0e30f4a6e0663ef2ffce693c91124d38a3853079c93524b54ee15dcaaf21728a WatchSource:0}: Error finding container 0e30f4a6e0663ef2ffce693c91124d38a3853079c93524b54ee15dcaaf21728a: Status 404 returned error can't find the container with id 0e30f4a6e0663ef2ffce693c91124d38a3853079c93524b54ee15dcaaf21728a Nov 23 14:45:01 crc kubenswrapper[5050]: I1123 14:45:01.244781 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7dz9b" event={"ID":"a408f71a-76c2-4c88-9b67-a08257da28eb","Type":"ContainerStarted","Data":"3085f65409adad610cc9a5992ccb91e34cb001dbedd474fed92a98b9e3e1f81a"} Nov 23 14:45:01 crc kubenswrapper[5050]: I1123 14:45:01.248979 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398485-p4nwd" event={"ID":"c9ec5edf-785a-4b69-bf12-857345ceb036","Type":"ContainerStarted","Data":"1669a05290c08d7db83bd2181ec08bc5120f0d9f5f5c1362d28a3b73177398c0"} Nov 23 14:45:01 crc kubenswrapper[5050]: I1123 14:45:01.249045 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398485-p4nwd" event={"ID":"c9ec5edf-785a-4b69-bf12-857345ceb036","Type":"ContainerStarted","Data":"0e30f4a6e0663ef2ffce693c91124d38a3853079c93524b54ee15dcaaf21728a"} Nov 23 14:45:01 crc kubenswrapper[5050]: I1123 14:45:01.272559 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-7dz9b" podStartSLOduration=2.152809329 podStartE2EDuration="44.272526936s" podCreationTimestamp="2025-11-23 14:44:17 +0000 UTC" firstStartedPulling="2025-11-23 14:44:18.817918127 +0000 UTC m=+153.984914612" lastFinishedPulling="2025-11-23 14:45:00.937635734 +0000 UTC m=+196.104632219" observedRunningTime="2025-11-23 14:45:01.270020456 +0000 UTC m=+196.437016951" watchObservedRunningTime="2025-11-23 14:45:01.272526936 +0000 UTC m=+196.439523441" Nov 23 14:45:01 crc kubenswrapper[5050]: I1123 14:45:01.293884 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29398485-p4nwd" podStartSLOduration=1.293862943 podStartE2EDuration="1.293862943s" podCreationTimestamp="2025-11-23 14:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:45:01.289000187 +0000 UTC m=+196.455996682" watchObservedRunningTime="2025-11-23 14:45:01.293862943 +0000 UTC m=+196.460859428" Nov 23 14:45:01 crc kubenswrapper[5050]: I1123 14:45:01.838252 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-cvknj"] Nov 23 14:45:02 crc kubenswrapper[5050]: I1123 14:45:02.256087 5050 generic.go:334] "Generic (PLEG): container finished" podID="c9ec5edf-785a-4b69-bf12-857345ceb036" containerID="1669a05290c08d7db83bd2181ec08bc5120f0d9f5f5c1362d28a3b73177398c0" exitCode=0 Nov 23 14:45:02 crc kubenswrapper[5050]: I1123 14:45:02.256187 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398485-p4nwd" event={"ID":"c9ec5edf-785a-4b69-bf12-857345ceb036","Type":"ContainerDied","Data":"1669a05290c08d7db83bd2181ec08bc5120f0d9f5f5c1362d28a3b73177398c0"} Nov 23 14:45:03 crc kubenswrapper[5050]: I1123 14:45:03.610547 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398485-p4nwd" Nov 23 14:45:03 crc kubenswrapper[5050]: I1123 14:45:03.775563 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c9ec5edf-785a-4b69-bf12-857345ceb036-secret-volume\") pod \"c9ec5edf-785a-4b69-bf12-857345ceb036\" (UID: \"c9ec5edf-785a-4b69-bf12-857345ceb036\") " Nov 23 14:45:03 crc kubenswrapper[5050]: I1123 14:45:03.776254 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c9ec5edf-785a-4b69-bf12-857345ceb036-config-volume\") pod \"c9ec5edf-785a-4b69-bf12-857345ceb036\" (UID: \"c9ec5edf-785a-4b69-bf12-857345ceb036\") " Nov 23 14:45:03 crc kubenswrapper[5050]: I1123 14:45:03.776316 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m9cfg\" (UniqueName: \"kubernetes.io/projected/c9ec5edf-785a-4b69-bf12-857345ceb036-kube-api-access-m9cfg\") pod \"c9ec5edf-785a-4b69-bf12-857345ceb036\" (UID: \"c9ec5edf-785a-4b69-bf12-857345ceb036\") " Nov 23 14:45:03 crc kubenswrapper[5050]: I1123 14:45:03.777198 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9ec5edf-785a-4b69-bf12-857345ceb036-config-volume" (OuterVolumeSpecName: "config-volume") pod "c9ec5edf-785a-4b69-bf12-857345ceb036" (UID: "c9ec5edf-785a-4b69-bf12-857345ceb036"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:45:03 crc kubenswrapper[5050]: I1123 14:45:03.783688 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9ec5edf-785a-4b69-bf12-857345ceb036-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "c9ec5edf-785a-4b69-bf12-857345ceb036" (UID: "c9ec5edf-785a-4b69-bf12-857345ceb036"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:45:03 crc kubenswrapper[5050]: I1123 14:45:03.786567 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9ec5edf-785a-4b69-bf12-857345ceb036-kube-api-access-m9cfg" (OuterVolumeSpecName: "kube-api-access-m9cfg") pod "c9ec5edf-785a-4b69-bf12-857345ceb036" (UID: "c9ec5edf-785a-4b69-bf12-857345ceb036"). InnerVolumeSpecName "kube-api-access-m9cfg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:45:03 crc kubenswrapper[5050]: I1123 14:45:03.877584 5050 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c9ec5edf-785a-4b69-bf12-857345ceb036-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:03 crc kubenswrapper[5050]: I1123 14:45:03.877623 5050 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c9ec5edf-785a-4b69-bf12-857345ceb036-config-volume\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:03 crc kubenswrapper[5050]: I1123 14:45:03.877637 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m9cfg\" (UniqueName: \"kubernetes.io/projected/c9ec5edf-785a-4b69-bf12-857345ceb036-kube-api-access-m9cfg\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:04 crc kubenswrapper[5050]: I1123 14:45:04.277865 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398485-p4nwd" event={"ID":"c9ec5edf-785a-4b69-bf12-857345ceb036","Type":"ContainerDied","Data":"0e30f4a6e0663ef2ffce693c91124d38a3853079c93524b54ee15dcaaf21728a"} Nov 23 14:45:04 crc kubenswrapper[5050]: I1123 14:45:04.277914 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0e30f4a6e0663ef2ffce693c91124d38a3853079c93524b54ee15dcaaf21728a" Nov 23 14:45:04 crc kubenswrapper[5050]: I1123 14:45:04.277955 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398485-p4nwd" Nov 23 14:45:07 crc kubenswrapper[5050]: I1123 14:45:07.412741 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-7dz9b" Nov 23 14:45:07 crc kubenswrapper[5050]: I1123 14:45:07.413240 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-7dz9b" Nov 23 14:45:07 crc kubenswrapper[5050]: I1123 14:45:07.467476 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-7dz9b" Nov 23 14:45:07 crc kubenswrapper[5050]: I1123 14:45:07.844612 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-t6xmn" Nov 23 14:45:08 crc kubenswrapper[5050]: I1123 14:45:08.361887 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-7dz9b" Nov 23 14:45:08 crc kubenswrapper[5050]: I1123 14:45:08.516763 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vpr78" Nov 23 14:45:08 crc kubenswrapper[5050]: I1123 14:45:08.567880 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vpr78" Nov 23 14:45:10 crc kubenswrapper[5050]: I1123 14:45:10.201301 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-t6xmn"] Nov 23 14:45:10 crc kubenswrapper[5050]: I1123 14:45:10.202591 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-t6xmn" podUID="20491310-05b1-4f61-9574-163a5463be0e" containerName="registry-server" containerID="cri-o://dfdd96177e2968ce33524d283310b64515aa7cb4f21f331a2d63897d8330e211" gracePeriod=2 Nov 23 14:45:10 crc kubenswrapper[5050]: I1123 14:45:10.324776 5050 generic.go:334] "Generic (PLEG): container finished" podID="fe09ca9a-811b-4126-a3bf-a43b0edbb475" containerID="28bfdb0bb4104972b4c28510b1f1e62c1387b24595e403f9381f73e66b13c46f" exitCode=0 Nov 23 14:45:10 crc kubenswrapper[5050]: I1123 14:45:10.324868 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gcpxp" event={"ID":"fe09ca9a-811b-4126-a3bf-a43b0edbb475","Type":"ContainerDied","Data":"28bfdb0bb4104972b4c28510b1f1e62c1387b24595e403f9381f73e66b13c46f"} Nov 23 14:45:10 crc kubenswrapper[5050]: I1123 14:45:10.331331 5050 generic.go:334] "Generic (PLEG): container finished" podID="65bfd579-9076-4956-bd2b-e1c0c93775b5" containerID="582009a24f1bc05ef5512383d3d8da4a8e99e9af0e8ba0d573cf2106dcec96ba" exitCode=0 Nov 23 14:45:10 crc kubenswrapper[5050]: I1123 14:45:10.331411 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zz4fr" event={"ID":"65bfd579-9076-4956-bd2b-e1c0c93775b5","Type":"ContainerDied","Data":"582009a24f1bc05ef5512383d3d8da4a8e99e9af0e8ba0d573cf2106dcec96ba"} Nov 23 14:45:10 crc kubenswrapper[5050]: I1123 14:45:10.336892 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ppggf" event={"ID":"f89a2bf0-1a38-420d-9dba-d1a797364724","Type":"ContainerStarted","Data":"1bd9a7d112cb602d0ef4778f90a13e3e5ccd5d6f75a9c1e8d04abd5306626133"} Nov 23 14:45:10 crc kubenswrapper[5050]: I1123 14:45:10.340976 5050 generic.go:334] "Generic (PLEG): container finished" podID="666a5515-92ff-44ef-8e85-f7205f823416" containerID="498486eb7d78da24d749e0f1d0707cc71d162d9363e354a83b684af58f5cd1b6" exitCode=0 Nov 23 14:45:10 crc kubenswrapper[5050]: I1123 14:45:10.341012 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n9s79" event={"ID":"666a5515-92ff-44ef-8e85-f7205f823416","Type":"ContainerDied","Data":"498486eb7d78da24d749e0f1d0707cc71d162d9363e354a83b684af58f5cd1b6"} Nov 23 14:45:10 crc kubenswrapper[5050]: I1123 14:45:10.562630 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-t6xmn" Nov 23 14:45:10 crc kubenswrapper[5050]: I1123 14:45:10.676843 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20491310-05b1-4f61-9574-163a5463be0e-catalog-content\") pod \"20491310-05b1-4f61-9574-163a5463be0e\" (UID: \"20491310-05b1-4f61-9574-163a5463be0e\") " Nov 23 14:45:10 crc kubenswrapper[5050]: I1123 14:45:10.676974 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cndhk\" (UniqueName: \"kubernetes.io/projected/20491310-05b1-4f61-9574-163a5463be0e-kube-api-access-cndhk\") pod \"20491310-05b1-4f61-9574-163a5463be0e\" (UID: \"20491310-05b1-4f61-9574-163a5463be0e\") " Nov 23 14:45:10 crc kubenswrapper[5050]: I1123 14:45:10.677084 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20491310-05b1-4f61-9574-163a5463be0e-utilities\") pod \"20491310-05b1-4f61-9574-163a5463be0e\" (UID: \"20491310-05b1-4f61-9574-163a5463be0e\") " Nov 23 14:45:10 crc kubenswrapper[5050]: I1123 14:45:10.677859 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20491310-05b1-4f61-9574-163a5463be0e-utilities" (OuterVolumeSpecName: "utilities") pod "20491310-05b1-4f61-9574-163a5463be0e" (UID: "20491310-05b1-4f61-9574-163a5463be0e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:45:10 crc kubenswrapper[5050]: I1123 14:45:10.686871 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20491310-05b1-4f61-9574-163a5463be0e-kube-api-access-cndhk" (OuterVolumeSpecName: "kube-api-access-cndhk") pod "20491310-05b1-4f61-9574-163a5463be0e" (UID: "20491310-05b1-4f61-9574-163a5463be0e"). InnerVolumeSpecName "kube-api-access-cndhk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:45:10 crc kubenswrapper[5050]: I1123 14:45:10.697934 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20491310-05b1-4f61-9574-163a5463be0e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "20491310-05b1-4f61-9574-163a5463be0e" (UID: "20491310-05b1-4f61-9574-163a5463be0e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:45:10 crc kubenswrapper[5050]: I1123 14:45:10.784810 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cndhk\" (UniqueName: \"kubernetes.io/projected/20491310-05b1-4f61-9574-163a5463be0e-kube-api-access-cndhk\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:10 crc kubenswrapper[5050]: I1123 14:45:10.784851 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20491310-05b1-4f61-9574-163a5463be0e-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:10 crc kubenswrapper[5050]: I1123 14:45:10.784864 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20491310-05b1-4f61-9574-163a5463be0e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:11 crc kubenswrapper[5050]: I1123 14:45:11.350485 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n9s79" event={"ID":"666a5515-92ff-44ef-8e85-f7205f823416","Type":"ContainerStarted","Data":"25f735542c1bd1e83ab5944a3ee7501458c50cc2c7d146b70581a081ace8bad0"} Nov 23 14:45:11 crc kubenswrapper[5050]: I1123 14:45:11.354189 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gcpxp" event={"ID":"fe09ca9a-811b-4126-a3bf-a43b0edbb475","Type":"ContainerStarted","Data":"ad4a174116aaa0e006ea066dd82f0e7b17f59a5c302be7203d65341fb90d9d2f"} Nov 23 14:45:11 crc kubenswrapper[5050]: I1123 14:45:11.359521 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zz4fr" event={"ID":"65bfd579-9076-4956-bd2b-e1c0c93775b5","Type":"ContainerStarted","Data":"ad373a5b486e70d543cf1fbac140806016ba4fbd41b8e700b160011c3299924f"} Nov 23 14:45:11 crc kubenswrapper[5050]: I1123 14:45:11.368976 5050 generic.go:334] "Generic (PLEG): container finished" podID="f89a2bf0-1a38-420d-9dba-d1a797364724" containerID="1bd9a7d112cb602d0ef4778f90a13e3e5ccd5d6f75a9c1e8d04abd5306626133" exitCode=0 Nov 23 14:45:11 crc kubenswrapper[5050]: I1123 14:45:11.369034 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ppggf" event={"ID":"f89a2bf0-1a38-420d-9dba-d1a797364724","Type":"ContainerDied","Data":"1bd9a7d112cb602d0ef4778f90a13e3e5ccd5d6f75a9c1e8d04abd5306626133"} Nov 23 14:45:11 crc kubenswrapper[5050]: I1123 14:45:11.377469 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-n9s79" podStartSLOduration=3.169604106 podStartE2EDuration="57.377426991s" podCreationTimestamp="2025-11-23 14:44:14 +0000 UTC" firstStartedPulling="2025-11-23 14:44:16.765495158 +0000 UTC m=+151.932491643" lastFinishedPulling="2025-11-23 14:45:10.973318043 +0000 UTC m=+206.140314528" observedRunningTime="2025-11-23 14:45:11.374025566 +0000 UTC m=+206.541022051" watchObservedRunningTime="2025-11-23 14:45:11.377426991 +0000 UTC m=+206.544423476" Nov 23 14:45:11 crc kubenswrapper[5050]: I1123 14:45:11.377753 5050 generic.go:334] "Generic (PLEG): container finished" podID="20491310-05b1-4f61-9574-163a5463be0e" containerID="dfdd96177e2968ce33524d283310b64515aa7cb4f21f331a2d63897d8330e211" exitCode=0 Nov 23 14:45:11 crc kubenswrapper[5050]: I1123 14:45:11.377978 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t6xmn" event={"ID":"20491310-05b1-4f61-9574-163a5463be0e","Type":"ContainerDied","Data":"dfdd96177e2968ce33524d283310b64515aa7cb4f21f331a2d63897d8330e211"} Nov 23 14:45:11 crc kubenswrapper[5050]: I1123 14:45:11.378000 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t6xmn" event={"ID":"20491310-05b1-4f61-9574-163a5463be0e","Type":"ContainerDied","Data":"aa50adc1c4d7e5de82c598c8d204b6c0a0f00fb93cd6a922502961f48330f21b"} Nov 23 14:45:11 crc kubenswrapper[5050]: I1123 14:45:11.378021 5050 scope.go:117] "RemoveContainer" containerID="dfdd96177e2968ce33524d283310b64515aa7cb4f21f331a2d63897d8330e211" Nov 23 14:45:11 crc kubenswrapper[5050]: I1123 14:45:11.378027 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-t6xmn" Nov 23 14:45:11 crc kubenswrapper[5050]: I1123 14:45:11.381999 5050 generic.go:334] "Generic (PLEG): container finished" podID="c57083c1-0f6f-4e07-aa17-218073eaa9c0" containerID="4465eaae7961fc5e10a757db9c37ae8b6ba0715cc67d3733dcdb5338aba5be7a" exitCode=0 Nov 23 14:45:11 crc kubenswrapper[5050]: I1123 14:45:11.382072 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tpll6" event={"ID":"c57083c1-0f6f-4e07-aa17-218073eaa9c0","Type":"ContainerDied","Data":"4465eaae7961fc5e10a757db9c37ae8b6ba0715cc67d3733dcdb5338aba5be7a"} Nov 23 14:45:11 crc kubenswrapper[5050]: I1123 14:45:11.399756 5050 scope.go:117] "RemoveContainer" containerID="0fda1d257dec07053430a2ec86c07ba51b18489bebe849f4562ff54d71a97c55" Nov 23 14:45:11 crc kubenswrapper[5050]: I1123 14:45:11.436793 5050 scope.go:117] "RemoveContainer" containerID="a0abcb7182d465be0a10d785a373ed85d2d1463f53d0cd6f588483da4ea90b9c" Nov 23 14:45:11 crc kubenswrapper[5050]: I1123 14:45:11.459164 5050 scope.go:117] "RemoveContainer" containerID="dfdd96177e2968ce33524d283310b64515aa7cb4f21f331a2d63897d8330e211" Nov 23 14:45:11 crc kubenswrapper[5050]: I1123 14:45:11.461057 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-gcpxp" podStartSLOduration=2.479565472 podStartE2EDuration="56.461015411s" podCreationTimestamp="2025-11-23 14:44:15 +0000 UTC" firstStartedPulling="2025-11-23 14:44:16.77271334 +0000 UTC m=+151.939709825" lastFinishedPulling="2025-11-23 14:45:10.754163259 +0000 UTC m=+205.921159764" observedRunningTime="2025-11-23 14:45:11.4299159 +0000 UTC m=+206.596912385" watchObservedRunningTime="2025-11-23 14:45:11.461015411 +0000 UTC m=+206.628011886" Nov 23 14:45:11 crc kubenswrapper[5050]: I1123 14:45:11.463196 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-zz4fr" podStartSLOduration=2.51491644 podStartE2EDuration="56.463189221s" podCreationTimestamp="2025-11-23 14:44:15 +0000 UTC" firstStartedPulling="2025-11-23 14:44:16.775537789 +0000 UTC m=+151.942534274" lastFinishedPulling="2025-11-23 14:45:10.72381057 +0000 UTC m=+205.890807055" observedRunningTime="2025-11-23 14:45:11.45778065 +0000 UTC m=+206.624777145" watchObservedRunningTime="2025-11-23 14:45:11.463189221 +0000 UTC m=+206.630185706" Nov 23 14:45:11 crc kubenswrapper[5050]: E1123 14:45:11.465286 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dfdd96177e2968ce33524d283310b64515aa7cb4f21f331a2d63897d8330e211\": container with ID starting with dfdd96177e2968ce33524d283310b64515aa7cb4f21f331a2d63897d8330e211 not found: ID does not exist" containerID="dfdd96177e2968ce33524d283310b64515aa7cb4f21f331a2d63897d8330e211" Nov 23 14:45:11 crc kubenswrapper[5050]: I1123 14:45:11.465347 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dfdd96177e2968ce33524d283310b64515aa7cb4f21f331a2d63897d8330e211"} err="failed to get container status \"dfdd96177e2968ce33524d283310b64515aa7cb4f21f331a2d63897d8330e211\": rpc error: code = NotFound desc = could not find container \"dfdd96177e2968ce33524d283310b64515aa7cb4f21f331a2d63897d8330e211\": container with ID starting with dfdd96177e2968ce33524d283310b64515aa7cb4f21f331a2d63897d8330e211 not found: ID does not exist" Nov 23 14:45:11 crc kubenswrapper[5050]: I1123 14:45:11.465414 5050 scope.go:117] "RemoveContainer" containerID="0fda1d257dec07053430a2ec86c07ba51b18489bebe849f4562ff54d71a97c55" Nov 23 14:45:11 crc kubenswrapper[5050]: E1123 14:45:11.467099 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0fda1d257dec07053430a2ec86c07ba51b18489bebe849f4562ff54d71a97c55\": container with ID starting with 0fda1d257dec07053430a2ec86c07ba51b18489bebe849f4562ff54d71a97c55 not found: ID does not exist" containerID="0fda1d257dec07053430a2ec86c07ba51b18489bebe849f4562ff54d71a97c55" Nov 23 14:45:11 crc kubenswrapper[5050]: I1123 14:45:11.467134 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0fda1d257dec07053430a2ec86c07ba51b18489bebe849f4562ff54d71a97c55"} err="failed to get container status \"0fda1d257dec07053430a2ec86c07ba51b18489bebe849f4562ff54d71a97c55\": rpc error: code = NotFound desc = could not find container \"0fda1d257dec07053430a2ec86c07ba51b18489bebe849f4562ff54d71a97c55\": container with ID starting with 0fda1d257dec07053430a2ec86c07ba51b18489bebe849f4562ff54d71a97c55 not found: ID does not exist" Nov 23 14:45:11 crc kubenswrapper[5050]: I1123 14:45:11.467157 5050 scope.go:117] "RemoveContainer" containerID="a0abcb7182d465be0a10d785a373ed85d2d1463f53d0cd6f588483da4ea90b9c" Nov 23 14:45:11 crc kubenswrapper[5050]: E1123 14:45:11.467694 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a0abcb7182d465be0a10d785a373ed85d2d1463f53d0cd6f588483da4ea90b9c\": container with ID starting with a0abcb7182d465be0a10d785a373ed85d2d1463f53d0cd6f588483da4ea90b9c not found: ID does not exist" containerID="a0abcb7182d465be0a10d785a373ed85d2d1463f53d0cd6f588483da4ea90b9c" Nov 23 14:45:11 crc kubenswrapper[5050]: I1123 14:45:11.467737 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a0abcb7182d465be0a10d785a373ed85d2d1463f53d0cd6f588483da4ea90b9c"} err="failed to get container status \"a0abcb7182d465be0a10d785a373ed85d2d1463f53d0cd6f588483da4ea90b9c\": rpc error: code = NotFound desc = could not find container \"a0abcb7182d465be0a10d785a373ed85d2d1463f53d0cd6f588483da4ea90b9c\": container with ID starting with a0abcb7182d465be0a10d785a373ed85d2d1463f53d0cd6f588483da4ea90b9c not found: ID does not exist" Nov 23 14:45:11 crc kubenswrapper[5050]: I1123 14:45:11.490493 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-t6xmn"] Nov 23 14:45:11 crc kubenswrapper[5050]: I1123 14:45:11.493503 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-t6xmn"] Nov 23 14:45:11 crc kubenswrapper[5050]: I1123 14:45:11.559738 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20491310-05b1-4f61-9574-163a5463be0e" path="/var/lib/kubelet/pods/20491310-05b1-4f61-9574-163a5463be0e/volumes" Nov 23 14:45:12 crc kubenswrapper[5050]: I1123 14:45:12.389344 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ppggf" event={"ID":"f89a2bf0-1a38-420d-9dba-d1a797364724","Type":"ContainerStarted","Data":"595b4f52ee3f3a29cfad947340eff5d52dba39b072b419063b53512696ee3efa"} Nov 23 14:45:12 crc kubenswrapper[5050]: I1123 14:45:12.411252 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tpll6" event={"ID":"c57083c1-0f6f-4e07-aa17-218073eaa9c0","Type":"ContainerStarted","Data":"eb07d0ee5705478f18b2b80e68d5725374d7b7a34e1e4782c9e6aad64b6a0be8"} Nov 23 14:45:12 crc kubenswrapper[5050]: I1123 14:45:12.430623 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-ppggf" podStartSLOduration=3.482098197 podStartE2EDuration="54.430598375s" podCreationTimestamp="2025-11-23 14:44:18 +0000 UTC" firstStartedPulling="2025-11-23 14:44:20.887285741 +0000 UTC m=+156.054282226" lastFinishedPulling="2025-11-23 14:45:11.835785899 +0000 UTC m=+207.002782404" observedRunningTime="2025-11-23 14:45:12.424605518 +0000 UTC m=+207.591602013" watchObservedRunningTime="2025-11-23 14:45:12.430598375 +0000 UTC m=+207.597594860" Nov 23 14:45:12 crc kubenswrapper[5050]: I1123 14:45:12.446810 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tpll6" podStartSLOduration=2.34618279 podStartE2EDuration="57.446787269s" podCreationTimestamp="2025-11-23 14:44:15 +0000 UTC" firstStartedPulling="2025-11-23 14:44:16.780419946 +0000 UTC m=+151.947416431" lastFinishedPulling="2025-11-23 14:45:11.881024425 +0000 UTC m=+207.048020910" observedRunningTime="2025-11-23 14:45:12.442331014 +0000 UTC m=+207.609327489" watchObservedRunningTime="2025-11-23 14:45:12.446787269 +0000 UTC m=+207.613783754" Nov 23 14:45:15 crc kubenswrapper[5050]: I1123 14:45:15.243187 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-n9s79" Nov 23 14:45:15 crc kubenswrapper[5050]: I1123 14:45:15.243704 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-n9s79" Nov 23 14:45:15 crc kubenswrapper[5050]: I1123 14:45:15.293382 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-n9s79" Nov 23 14:45:15 crc kubenswrapper[5050]: I1123 14:45:15.426414 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tpll6" Nov 23 14:45:15 crc kubenswrapper[5050]: I1123 14:45:15.426525 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tpll6" Nov 23 14:45:15 crc kubenswrapper[5050]: I1123 14:45:15.468804 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tpll6" Nov 23 14:45:15 crc kubenswrapper[5050]: I1123 14:45:15.605123 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-zz4fr" Nov 23 14:45:15 crc kubenswrapper[5050]: I1123 14:45:15.605207 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-zz4fr" Nov 23 14:45:15 crc kubenswrapper[5050]: I1123 14:45:15.647454 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-zz4fr" Nov 23 14:45:15 crc kubenswrapper[5050]: I1123 14:45:15.825841 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-gcpxp" Nov 23 14:45:15 crc kubenswrapper[5050]: I1123 14:45:15.825912 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-gcpxp" Nov 23 14:45:15 crc kubenswrapper[5050]: I1123 14:45:15.868701 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-gcpxp" Nov 23 14:45:16 crc kubenswrapper[5050]: I1123 14:45:16.509475 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-zz4fr" Nov 23 14:45:16 crc kubenswrapper[5050]: I1123 14:45:16.538153 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-gcpxp" Nov 23 14:45:18 crc kubenswrapper[5050]: I1123 14:45:18.603487 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zz4fr"] Nov 23 14:45:18 crc kubenswrapper[5050]: I1123 14:45:18.603833 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-zz4fr" podUID="65bfd579-9076-4956-bd2b-e1c0c93775b5" containerName="registry-server" containerID="cri-o://ad373a5b486e70d543cf1fbac140806016ba4fbd41b8e700b160011c3299924f" gracePeriod=2 Nov 23 14:45:18 crc kubenswrapper[5050]: I1123 14:45:18.816897 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ppggf" Nov 23 14:45:18 crc kubenswrapper[5050]: I1123 14:45:18.817402 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-ppggf" Nov 23 14:45:18 crc kubenswrapper[5050]: I1123 14:45:18.887422 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-ppggf" Nov 23 14:45:19 crc kubenswrapper[5050]: I1123 14:45:19.472223 5050 generic.go:334] "Generic (PLEG): container finished" podID="65bfd579-9076-4956-bd2b-e1c0c93775b5" containerID="ad373a5b486e70d543cf1fbac140806016ba4fbd41b8e700b160011c3299924f" exitCode=0 Nov 23 14:45:19 crc kubenswrapper[5050]: I1123 14:45:19.473998 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zz4fr" event={"ID":"65bfd579-9076-4956-bd2b-e1c0c93775b5","Type":"ContainerDied","Data":"ad373a5b486e70d543cf1fbac140806016ba4fbd41b8e700b160011c3299924f"} Nov 23 14:45:19 crc kubenswrapper[5050]: I1123 14:45:19.527331 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-ppggf" Nov 23 14:45:19 crc kubenswrapper[5050]: I1123 14:45:19.606605 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gcpxp"] Nov 23 14:45:19 crc kubenswrapper[5050]: I1123 14:45:19.607355 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-gcpxp" podUID="fe09ca9a-811b-4126-a3bf-a43b0edbb475" containerName="registry-server" containerID="cri-o://ad4a174116aaa0e006ea066dd82f0e7b17f59a5c302be7203d65341fb90d9d2f" gracePeriod=2 Nov 23 14:45:19 crc kubenswrapper[5050]: I1123 14:45:19.770026 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zz4fr" Nov 23 14:45:19 crc kubenswrapper[5050]: I1123 14:45:19.872464 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4m7cl\" (UniqueName: \"kubernetes.io/projected/65bfd579-9076-4956-bd2b-e1c0c93775b5-kube-api-access-4m7cl\") pod \"65bfd579-9076-4956-bd2b-e1c0c93775b5\" (UID: \"65bfd579-9076-4956-bd2b-e1c0c93775b5\") " Nov 23 14:45:19 crc kubenswrapper[5050]: I1123 14:45:19.872536 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65bfd579-9076-4956-bd2b-e1c0c93775b5-catalog-content\") pod \"65bfd579-9076-4956-bd2b-e1c0c93775b5\" (UID: \"65bfd579-9076-4956-bd2b-e1c0c93775b5\") " Nov 23 14:45:19 crc kubenswrapper[5050]: I1123 14:45:19.872631 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65bfd579-9076-4956-bd2b-e1c0c93775b5-utilities\") pod \"65bfd579-9076-4956-bd2b-e1c0c93775b5\" (UID: \"65bfd579-9076-4956-bd2b-e1c0c93775b5\") " Nov 23 14:45:19 crc kubenswrapper[5050]: I1123 14:45:19.874483 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/65bfd579-9076-4956-bd2b-e1c0c93775b5-utilities" (OuterVolumeSpecName: "utilities") pod "65bfd579-9076-4956-bd2b-e1c0c93775b5" (UID: "65bfd579-9076-4956-bd2b-e1c0c93775b5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:45:19 crc kubenswrapper[5050]: I1123 14:45:19.896816 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65bfd579-9076-4956-bd2b-e1c0c93775b5-kube-api-access-4m7cl" (OuterVolumeSpecName: "kube-api-access-4m7cl") pod "65bfd579-9076-4956-bd2b-e1c0c93775b5" (UID: "65bfd579-9076-4956-bd2b-e1c0c93775b5"). InnerVolumeSpecName "kube-api-access-4m7cl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:45:19 crc kubenswrapper[5050]: I1123 14:45:19.949708 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/65bfd579-9076-4956-bd2b-e1c0c93775b5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "65bfd579-9076-4956-bd2b-e1c0c93775b5" (UID: "65bfd579-9076-4956-bd2b-e1c0c93775b5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:45:19 crc kubenswrapper[5050]: I1123 14:45:19.974650 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4m7cl\" (UniqueName: \"kubernetes.io/projected/65bfd579-9076-4956-bd2b-e1c0c93775b5-kube-api-access-4m7cl\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:19 crc kubenswrapper[5050]: I1123 14:45:19.974696 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65bfd579-9076-4956-bd2b-e1c0c93775b5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:19 crc kubenswrapper[5050]: I1123 14:45:19.974714 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65bfd579-9076-4956-bd2b-e1c0c93775b5-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.059392 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gcpxp" Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.077571 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe09ca9a-811b-4126-a3bf-a43b0edbb475-utilities\") pod \"fe09ca9a-811b-4126-a3bf-a43b0edbb475\" (UID: \"fe09ca9a-811b-4126-a3bf-a43b0edbb475\") " Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.077653 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe09ca9a-811b-4126-a3bf-a43b0edbb475-catalog-content\") pod \"fe09ca9a-811b-4126-a3bf-a43b0edbb475\" (UID: \"fe09ca9a-811b-4126-a3bf-a43b0edbb475\") " Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.077724 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4bfl9\" (UniqueName: \"kubernetes.io/projected/fe09ca9a-811b-4126-a3bf-a43b0edbb475-kube-api-access-4bfl9\") pod \"fe09ca9a-811b-4126-a3bf-a43b0edbb475\" (UID: \"fe09ca9a-811b-4126-a3bf-a43b0edbb475\") " Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.081812 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe09ca9a-811b-4126-a3bf-a43b0edbb475-utilities" (OuterVolumeSpecName: "utilities") pod "fe09ca9a-811b-4126-a3bf-a43b0edbb475" (UID: "fe09ca9a-811b-4126-a3bf-a43b0edbb475"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.088492 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe09ca9a-811b-4126-a3bf-a43b0edbb475-kube-api-access-4bfl9" (OuterVolumeSpecName: "kube-api-access-4bfl9") pod "fe09ca9a-811b-4126-a3bf-a43b0edbb475" (UID: "fe09ca9a-811b-4126-a3bf-a43b0edbb475"). InnerVolumeSpecName "kube-api-access-4bfl9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.138174 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe09ca9a-811b-4126-a3bf-a43b0edbb475-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fe09ca9a-811b-4126-a3bf-a43b0edbb475" (UID: "fe09ca9a-811b-4126-a3bf-a43b0edbb475"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.178847 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4bfl9\" (UniqueName: \"kubernetes.io/projected/fe09ca9a-811b-4126-a3bf-a43b0edbb475-kube-api-access-4bfl9\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.178910 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe09ca9a-811b-4126-a3bf-a43b0edbb475-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.178924 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe09ca9a-811b-4126-a3bf-a43b0edbb475-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.479005 5050 generic.go:334] "Generic (PLEG): container finished" podID="fe09ca9a-811b-4126-a3bf-a43b0edbb475" containerID="ad4a174116aaa0e006ea066dd82f0e7b17f59a5c302be7203d65341fb90d9d2f" exitCode=0 Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.479063 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gcpxp" event={"ID":"fe09ca9a-811b-4126-a3bf-a43b0edbb475","Type":"ContainerDied","Data":"ad4a174116aaa0e006ea066dd82f0e7b17f59a5c302be7203d65341fb90d9d2f"} Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.479128 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gcpxp" event={"ID":"fe09ca9a-811b-4126-a3bf-a43b0edbb475","Type":"ContainerDied","Data":"3c6fc9f46cadb9047cd86707395965780442ebe36b85a8ec691d3524d495cada"} Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.479153 5050 scope.go:117] "RemoveContainer" containerID="ad4a174116aaa0e006ea066dd82f0e7b17f59a5c302be7203d65341fb90d9d2f" Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.479470 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gcpxp" Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.482986 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zz4fr" event={"ID":"65bfd579-9076-4956-bd2b-e1c0c93775b5","Type":"ContainerDied","Data":"1b4b44cca077f7edde1b56926751acb7ac87a92280231b0ad1b808eb6a860710"} Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.483088 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zz4fr" Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.506490 5050 scope.go:117] "RemoveContainer" containerID="28bfdb0bb4104972b4c28510b1f1e62c1387b24595e403f9381f73e66b13c46f" Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.545789 5050 scope.go:117] "RemoveContainer" containerID="86e1ef12c5c5fb5f82ce14b40fb93e2c0766bedb9b430b321e6e81b9e83ee2c6" Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.550942 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gcpxp"] Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.565987 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-gcpxp"] Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.586698 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zz4fr"] Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.593769 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-zz4fr"] Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.609256 5050 scope.go:117] "RemoveContainer" containerID="ad4a174116aaa0e006ea066dd82f0e7b17f59a5c302be7203d65341fb90d9d2f" Nov 23 14:45:20 crc kubenswrapper[5050]: E1123 14:45:20.610609 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ad4a174116aaa0e006ea066dd82f0e7b17f59a5c302be7203d65341fb90d9d2f\": container with ID starting with ad4a174116aaa0e006ea066dd82f0e7b17f59a5c302be7203d65341fb90d9d2f not found: ID does not exist" containerID="ad4a174116aaa0e006ea066dd82f0e7b17f59a5c302be7203d65341fb90d9d2f" Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.610652 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad4a174116aaa0e006ea066dd82f0e7b17f59a5c302be7203d65341fb90d9d2f"} err="failed to get container status \"ad4a174116aaa0e006ea066dd82f0e7b17f59a5c302be7203d65341fb90d9d2f\": rpc error: code = NotFound desc = could not find container \"ad4a174116aaa0e006ea066dd82f0e7b17f59a5c302be7203d65341fb90d9d2f\": container with ID starting with ad4a174116aaa0e006ea066dd82f0e7b17f59a5c302be7203d65341fb90d9d2f not found: ID does not exist" Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.610678 5050 scope.go:117] "RemoveContainer" containerID="28bfdb0bb4104972b4c28510b1f1e62c1387b24595e403f9381f73e66b13c46f" Nov 23 14:45:20 crc kubenswrapper[5050]: E1123 14:45:20.617623 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28bfdb0bb4104972b4c28510b1f1e62c1387b24595e403f9381f73e66b13c46f\": container with ID starting with 28bfdb0bb4104972b4c28510b1f1e62c1387b24595e403f9381f73e66b13c46f not found: ID does not exist" containerID="28bfdb0bb4104972b4c28510b1f1e62c1387b24595e403f9381f73e66b13c46f" Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.617681 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28bfdb0bb4104972b4c28510b1f1e62c1387b24595e403f9381f73e66b13c46f"} err="failed to get container status \"28bfdb0bb4104972b4c28510b1f1e62c1387b24595e403f9381f73e66b13c46f\": rpc error: code = NotFound desc = could not find container \"28bfdb0bb4104972b4c28510b1f1e62c1387b24595e403f9381f73e66b13c46f\": container with ID starting with 28bfdb0bb4104972b4c28510b1f1e62c1387b24595e403f9381f73e66b13c46f not found: ID does not exist" Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.617715 5050 scope.go:117] "RemoveContainer" containerID="86e1ef12c5c5fb5f82ce14b40fb93e2c0766bedb9b430b321e6e81b9e83ee2c6" Nov 23 14:45:20 crc kubenswrapper[5050]: E1123 14:45:20.618049 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86e1ef12c5c5fb5f82ce14b40fb93e2c0766bedb9b430b321e6e81b9e83ee2c6\": container with ID starting with 86e1ef12c5c5fb5f82ce14b40fb93e2c0766bedb9b430b321e6e81b9e83ee2c6 not found: ID does not exist" containerID="86e1ef12c5c5fb5f82ce14b40fb93e2c0766bedb9b430b321e6e81b9e83ee2c6" Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.618071 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86e1ef12c5c5fb5f82ce14b40fb93e2c0766bedb9b430b321e6e81b9e83ee2c6"} err="failed to get container status \"86e1ef12c5c5fb5f82ce14b40fb93e2c0766bedb9b430b321e6e81b9e83ee2c6\": rpc error: code = NotFound desc = could not find container \"86e1ef12c5c5fb5f82ce14b40fb93e2c0766bedb9b430b321e6e81b9e83ee2c6\": container with ID starting with 86e1ef12c5c5fb5f82ce14b40fb93e2c0766bedb9b430b321e6e81b9e83ee2c6 not found: ID does not exist" Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.618085 5050 scope.go:117] "RemoveContainer" containerID="ad373a5b486e70d543cf1fbac140806016ba4fbd41b8e700b160011c3299924f" Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.644992 5050 scope.go:117] "RemoveContainer" containerID="582009a24f1bc05ef5512383d3d8da4a8e99e9af0e8ba0d573cf2106dcec96ba" Nov 23 14:45:20 crc kubenswrapper[5050]: I1123 14:45:20.662765 5050 scope.go:117] "RemoveContainer" containerID="95f21d52727a6f926ee944dd93e9ac8110802f07d506fcfd95c74640a65d2d33" Nov 23 14:45:21 crc kubenswrapper[5050]: I1123 14:45:21.564869 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65bfd579-9076-4956-bd2b-e1c0c93775b5" path="/var/lib/kubelet/pods/65bfd579-9076-4956-bd2b-e1c0c93775b5/volumes" Nov 23 14:45:21 crc kubenswrapper[5050]: I1123 14:45:21.566786 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe09ca9a-811b-4126-a3bf-a43b0edbb475" path="/var/lib/kubelet/pods/fe09ca9a-811b-4126-a3bf-a43b0edbb475/volumes" Nov 23 14:45:22 crc kubenswrapper[5050]: I1123 14:45:22.006927 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ppggf"] Nov 23 14:45:22 crc kubenswrapper[5050]: I1123 14:45:22.008227 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-ppggf" podUID="f89a2bf0-1a38-420d-9dba-d1a797364724" containerName="registry-server" containerID="cri-o://595b4f52ee3f3a29cfad947340eff5d52dba39b072b419063b53512696ee3efa" gracePeriod=2 Nov 23 14:45:22 crc kubenswrapper[5050]: I1123 14:45:22.476724 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ppggf" Nov 23 14:45:22 crc kubenswrapper[5050]: I1123 14:45:22.502859 5050 generic.go:334] "Generic (PLEG): container finished" podID="f89a2bf0-1a38-420d-9dba-d1a797364724" containerID="595b4f52ee3f3a29cfad947340eff5d52dba39b072b419063b53512696ee3efa" exitCode=0 Nov 23 14:45:22 crc kubenswrapper[5050]: I1123 14:45:22.502989 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ppggf" event={"ID":"f89a2bf0-1a38-420d-9dba-d1a797364724","Type":"ContainerDied","Data":"595b4f52ee3f3a29cfad947340eff5d52dba39b072b419063b53512696ee3efa"} Nov 23 14:45:22 crc kubenswrapper[5050]: I1123 14:45:22.503044 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ppggf" event={"ID":"f89a2bf0-1a38-420d-9dba-d1a797364724","Type":"ContainerDied","Data":"e52bf925a29fb71fd4b18cdc5c1dc175bc284b1f78bc142906202d9b0a5e736f"} Nov 23 14:45:22 crc kubenswrapper[5050]: I1123 14:45:22.503084 5050 scope.go:117] "RemoveContainer" containerID="595b4f52ee3f3a29cfad947340eff5d52dba39b072b419063b53512696ee3efa" Nov 23 14:45:22 crc kubenswrapper[5050]: I1123 14:45:22.503740 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ppggf" Nov 23 14:45:22 crc kubenswrapper[5050]: I1123 14:45:22.530172 5050 scope.go:117] "RemoveContainer" containerID="1bd9a7d112cb602d0ef4778f90a13e3e5ccd5d6f75a9c1e8d04abd5306626133" Nov 23 14:45:22 crc kubenswrapper[5050]: I1123 14:45:22.555667 5050 scope.go:117] "RemoveContainer" containerID="9580398a5276d41a87eb211f9b29cc92ef376f4d3668907426b39cad6a148a1b" Nov 23 14:45:22 crc kubenswrapper[5050]: I1123 14:45:22.572261 5050 scope.go:117] "RemoveContainer" containerID="595b4f52ee3f3a29cfad947340eff5d52dba39b072b419063b53512696ee3efa" Nov 23 14:45:22 crc kubenswrapper[5050]: E1123 14:45:22.572991 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"595b4f52ee3f3a29cfad947340eff5d52dba39b072b419063b53512696ee3efa\": container with ID starting with 595b4f52ee3f3a29cfad947340eff5d52dba39b072b419063b53512696ee3efa not found: ID does not exist" containerID="595b4f52ee3f3a29cfad947340eff5d52dba39b072b419063b53512696ee3efa" Nov 23 14:45:22 crc kubenswrapper[5050]: I1123 14:45:22.573069 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"595b4f52ee3f3a29cfad947340eff5d52dba39b072b419063b53512696ee3efa"} err="failed to get container status \"595b4f52ee3f3a29cfad947340eff5d52dba39b072b419063b53512696ee3efa\": rpc error: code = NotFound desc = could not find container \"595b4f52ee3f3a29cfad947340eff5d52dba39b072b419063b53512696ee3efa\": container with ID starting with 595b4f52ee3f3a29cfad947340eff5d52dba39b072b419063b53512696ee3efa not found: ID does not exist" Nov 23 14:45:22 crc kubenswrapper[5050]: I1123 14:45:22.573118 5050 scope.go:117] "RemoveContainer" containerID="1bd9a7d112cb602d0ef4778f90a13e3e5ccd5d6f75a9c1e8d04abd5306626133" Nov 23 14:45:22 crc kubenswrapper[5050]: E1123 14:45:22.573495 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1bd9a7d112cb602d0ef4778f90a13e3e5ccd5d6f75a9c1e8d04abd5306626133\": container with ID starting with 1bd9a7d112cb602d0ef4778f90a13e3e5ccd5d6f75a9c1e8d04abd5306626133 not found: ID does not exist" containerID="1bd9a7d112cb602d0ef4778f90a13e3e5ccd5d6f75a9c1e8d04abd5306626133" Nov 23 14:45:22 crc kubenswrapper[5050]: I1123 14:45:22.573540 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1bd9a7d112cb602d0ef4778f90a13e3e5ccd5d6f75a9c1e8d04abd5306626133"} err="failed to get container status \"1bd9a7d112cb602d0ef4778f90a13e3e5ccd5d6f75a9c1e8d04abd5306626133\": rpc error: code = NotFound desc = could not find container \"1bd9a7d112cb602d0ef4778f90a13e3e5ccd5d6f75a9c1e8d04abd5306626133\": container with ID starting with 1bd9a7d112cb602d0ef4778f90a13e3e5ccd5d6f75a9c1e8d04abd5306626133 not found: ID does not exist" Nov 23 14:45:22 crc kubenswrapper[5050]: I1123 14:45:22.573570 5050 scope.go:117] "RemoveContainer" containerID="9580398a5276d41a87eb211f9b29cc92ef376f4d3668907426b39cad6a148a1b" Nov 23 14:45:22 crc kubenswrapper[5050]: E1123 14:45:22.573994 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9580398a5276d41a87eb211f9b29cc92ef376f4d3668907426b39cad6a148a1b\": container with ID starting with 9580398a5276d41a87eb211f9b29cc92ef376f4d3668907426b39cad6a148a1b not found: ID does not exist" containerID="9580398a5276d41a87eb211f9b29cc92ef376f4d3668907426b39cad6a148a1b" Nov 23 14:45:22 crc kubenswrapper[5050]: I1123 14:45:22.574118 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9580398a5276d41a87eb211f9b29cc92ef376f4d3668907426b39cad6a148a1b"} err="failed to get container status \"9580398a5276d41a87eb211f9b29cc92ef376f4d3668907426b39cad6a148a1b\": rpc error: code = NotFound desc = could not find container \"9580398a5276d41a87eb211f9b29cc92ef376f4d3668907426b39cad6a148a1b\": container with ID starting with 9580398a5276d41a87eb211f9b29cc92ef376f4d3668907426b39cad6a148a1b not found: ID does not exist" Nov 23 14:45:22 crc kubenswrapper[5050]: I1123 14:45:22.629903 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f89a2bf0-1a38-420d-9dba-d1a797364724-utilities\") pod \"f89a2bf0-1a38-420d-9dba-d1a797364724\" (UID: \"f89a2bf0-1a38-420d-9dba-d1a797364724\") " Nov 23 14:45:22 crc kubenswrapper[5050]: I1123 14:45:22.630106 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f89a2bf0-1a38-420d-9dba-d1a797364724-catalog-content\") pod \"f89a2bf0-1a38-420d-9dba-d1a797364724\" (UID: \"f89a2bf0-1a38-420d-9dba-d1a797364724\") " Nov 23 14:45:22 crc kubenswrapper[5050]: I1123 14:45:22.630251 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxv86\" (UniqueName: \"kubernetes.io/projected/f89a2bf0-1a38-420d-9dba-d1a797364724-kube-api-access-wxv86\") pod \"f89a2bf0-1a38-420d-9dba-d1a797364724\" (UID: \"f89a2bf0-1a38-420d-9dba-d1a797364724\") " Nov 23 14:45:22 crc kubenswrapper[5050]: I1123 14:45:22.630800 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f89a2bf0-1a38-420d-9dba-d1a797364724-utilities" (OuterVolumeSpecName: "utilities") pod "f89a2bf0-1a38-420d-9dba-d1a797364724" (UID: "f89a2bf0-1a38-420d-9dba-d1a797364724"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:45:22 crc kubenswrapper[5050]: I1123 14:45:22.639181 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f89a2bf0-1a38-420d-9dba-d1a797364724-kube-api-access-wxv86" (OuterVolumeSpecName: "kube-api-access-wxv86") pod "f89a2bf0-1a38-420d-9dba-d1a797364724" (UID: "f89a2bf0-1a38-420d-9dba-d1a797364724"). InnerVolumeSpecName "kube-api-access-wxv86". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:45:22 crc kubenswrapper[5050]: I1123 14:45:22.732857 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxv86\" (UniqueName: \"kubernetes.io/projected/f89a2bf0-1a38-420d-9dba-d1a797364724-kube-api-access-wxv86\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:22 crc kubenswrapper[5050]: I1123 14:45:22.732902 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f89a2bf0-1a38-420d-9dba-d1a797364724-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:22 crc kubenswrapper[5050]: I1123 14:45:22.744749 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f89a2bf0-1a38-420d-9dba-d1a797364724-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f89a2bf0-1a38-420d-9dba-d1a797364724" (UID: "f89a2bf0-1a38-420d-9dba-d1a797364724"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:45:22 crc kubenswrapper[5050]: I1123 14:45:22.834057 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f89a2bf0-1a38-420d-9dba-d1a797364724-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:22 crc kubenswrapper[5050]: I1123 14:45:22.836758 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ppggf"] Nov 23 14:45:22 crc kubenswrapper[5050]: I1123 14:45:22.839149 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-ppggf"] Nov 23 14:45:23 crc kubenswrapper[5050]: I1123 14:45:23.560988 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f89a2bf0-1a38-420d-9dba-d1a797364724" path="/var/lib/kubelet/pods/f89a2bf0-1a38-420d-9dba-d1a797364724/volumes" Nov 23 14:45:25 crc kubenswrapper[5050]: I1123 14:45:25.312094 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-n9s79" Nov 23 14:45:25 crc kubenswrapper[5050]: I1123 14:45:25.465253 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tpll6" Nov 23 14:45:26 crc kubenswrapper[5050]: I1123 14:45:26.869270 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" podUID="5c3f6b64-15bf-4147-be4e-414a2569cb58" containerName="oauth-openshift" containerID="cri-o://7330fc53de879bb0854e2a368af2a31c33bd1bbda86c7c2442129bc4bbdd39ee" gracePeriod=15 Nov 23 14:45:26 crc kubenswrapper[5050]: I1123 14:45:26.975286 5050 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-cvknj container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.10:6443/healthz\": dial tcp 10.217.0.10:6443: connect: connection refused" start-of-body= Nov 23 14:45:26 crc kubenswrapper[5050]: I1123 14:45:26.975382 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" podUID="5c3f6b64-15bf-4147-be4e-414a2569cb58" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.10:6443/healthz\": dial tcp 10.217.0.10:6443: connect: connection refused" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.316490 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.502814 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-cliconfig\") pod \"5c3f6b64-15bf-4147-be4e-414a2569cb58\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.502869 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-user-template-login\") pod \"5c3f6b64-15bf-4147-be4e-414a2569cb58\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.502903 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-session\") pod \"5c3f6b64-15bf-4147-be4e-414a2569cb58\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.502957 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-crxcs\" (UniqueName: \"kubernetes.io/projected/5c3f6b64-15bf-4147-be4e-414a2569cb58-kube-api-access-crxcs\") pod \"5c3f6b64-15bf-4147-be4e-414a2569cb58\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.503015 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-service-ca\") pod \"5c3f6b64-15bf-4147-be4e-414a2569cb58\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.503036 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-user-template-error\") pod \"5c3f6b64-15bf-4147-be4e-414a2569cb58\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.503080 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-user-template-provider-selection\") pod \"5c3f6b64-15bf-4147-be4e-414a2569cb58\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.503111 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-ocp-branding-template\") pod \"5c3f6b64-15bf-4147-be4e-414a2569cb58\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.503144 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-trusted-ca-bundle\") pod \"5c3f6b64-15bf-4147-be4e-414a2569cb58\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.503167 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5c3f6b64-15bf-4147-be4e-414a2569cb58-audit-policies\") pod \"5c3f6b64-15bf-4147-be4e-414a2569cb58\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.503200 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-user-idp-0-file-data\") pod \"5c3f6b64-15bf-4147-be4e-414a2569cb58\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.503224 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-router-certs\") pod \"5c3f6b64-15bf-4147-be4e-414a2569cb58\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.503252 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5c3f6b64-15bf-4147-be4e-414a2569cb58-audit-dir\") pod \"5c3f6b64-15bf-4147-be4e-414a2569cb58\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.503277 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-serving-cert\") pod \"5c3f6b64-15bf-4147-be4e-414a2569cb58\" (UID: \"5c3f6b64-15bf-4147-be4e-414a2569cb58\") " Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.504273 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "5c3f6b64-15bf-4147-be4e-414a2569cb58" (UID: "5c3f6b64-15bf-4147-be4e-414a2569cb58"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.504783 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "5c3f6b64-15bf-4147-be4e-414a2569cb58" (UID: "5c3f6b64-15bf-4147-be4e-414a2569cb58"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.504830 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5c3f6b64-15bf-4147-be4e-414a2569cb58-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "5c3f6b64-15bf-4147-be4e-414a2569cb58" (UID: "5c3f6b64-15bf-4147-be4e-414a2569cb58"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.504792 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "5c3f6b64-15bf-4147-be4e-414a2569cb58" (UID: "5c3f6b64-15bf-4147-be4e-414a2569cb58"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.505202 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c3f6b64-15bf-4147-be4e-414a2569cb58-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "5c3f6b64-15bf-4147-be4e-414a2569cb58" (UID: "5c3f6b64-15bf-4147-be4e-414a2569cb58"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.510239 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c3f6b64-15bf-4147-be4e-414a2569cb58-kube-api-access-crxcs" (OuterVolumeSpecName: "kube-api-access-crxcs") pod "5c3f6b64-15bf-4147-be4e-414a2569cb58" (UID: "5c3f6b64-15bf-4147-be4e-414a2569cb58"). InnerVolumeSpecName "kube-api-access-crxcs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.515945 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "5c3f6b64-15bf-4147-be4e-414a2569cb58" (UID: "5c3f6b64-15bf-4147-be4e-414a2569cb58"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.516284 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "5c3f6b64-15bf-4147-be4e-414a2569cb58" (UID: "5c3f6b64-15bf-4147-be4e-414a2569cb58"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.516858 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "5c3f6b64-15bf-4147-be4e-414a2569cb58" (UID: "5c3f6b64-15bf-4147-be4e-414a2569cb58"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.518021 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "5c3f6b64-15bf-4147-be4e-414a2569cb58" (UID: "5c3f6b64-15bf-4147-be4e-414a2569cb58"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.519694 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "5c3f6b64-15bf-4147-be4e-414a2569cb58" (UID: "5c3f6b64-15bf-4147-be4e-414a2569cb58"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.520105 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "5c3f6b64-15bf-4147-be4e-414a2569cb58" (UID: "5c3f6b64-15bf-4147-be4e-414a2569cb58"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.520139 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "5c3f6b64-15bf-4147-be4e-414a2569cb58" (UID: "5c3f6b64-15bf-4147-be4e-414a2569cb58"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.520233 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "5c3f6b64-15bf-4147-be4e-414a2569cb58" (UID: "5c3f6b64-15bf-4147-be4e-414a2569cb58"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.540833 5050 generic.go:334] "Generic (PLEG): container finished" podID="5c3f6b64-15bf-4147-be4e-414a2569cb58" containerID="7330fc53de879bb0854e2a368af2a31c33bd1bbda86c7c2442129bc4bbdd39ee" exitCode=0 Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.540915 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" event={"ID":"5c3f6b64-15bf-4147-be4e-414a2569cb58","Type":"ContainerDied","Data":"7330fc53de879bb0854e2a368af2a31c33bd1bbda86c7c2442129bc4bbdd39ee"} Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.540991 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.541423 5050 scope.go:117] "RemoveContainer" containerID="7330fc53de879bb0854e2a368af2a31c33bd1bbda86c7c2442129bc4bbdd39ee" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.541318 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-cvknj" event={"ID":"5c3f6b64-15bf-4147-be4e-414a2569cb58","Type":"ContainerDied","Data":"6370bc4ed9c6cadc9e5d9fea45cb47d44dd25fc05bd084c59ee0b3695e1bc514"} Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.563057 5050 scope.go:117] "RemoveContainer" containerID="7330fc53de879bb0854e2a368af2a31c33bd1bbda86c7c2442129bc4bbdd39ee" Nov 23 14:45:27 crc kubenswrapper[5050]: E1123 14:45:27.564248 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7330fc53de879bb0854e2a368af2a31c33bd1bbda86c7c2442129bc4bbdd39ee\": container with ID starting with 7330fc53de879bb0854e2a368af2a31c33bd1bbda86c7c2442129bc4bbdd39ee not found: ID does not exist" containerID="7330fc53de879bb0854e2a368af2a31c33bd1bbda86c7c2442129bc4bbdd39ee" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.564299 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7330fc53de879bb0854e2a368af2a31c33bd1bbda86c7c2442129bc4bbdd39ee"} err="failed to get container status \"7330fc53de879bb0854e2a368af2a31c33bd1bbda86c7c2442129bc4bbdd39ee\": rpc error: code = NotFound desc = could not find container \"7330fc53de879bb0854e2a368af2a31c33bd1bbda86c7c2442129bc4bbdd39ee\": container with ID starting with 7330fc53de879bb0854e2a368af2a31c33bd1bbda86c7c2442129bc4bbdd39ee not found: ID does not exist" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.584236 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-cvknj"] Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.586652 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-cvknj"] Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.604371 5050 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.604509 5050 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.604572 5050 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.604678 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-crxcs\" (UniqueName: \"kubernetes.io/projected/5c3f6b64-15bf-4147-be4e-414a2569cb58-kube-api-access-crxcs\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.604741 5050 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.604807 5050 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.604864 5050 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.604932 5050 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.605003 5050 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.605064 5050 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5c3f6b64-15bf-4147-be4e-414a2569cb58-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.605126 5050 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.605187 5050 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.605243 5050 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5c3f6b64-15bf-4147-be4e-414a2569cb58-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:27 crc kubenswrapper[5050]: I1123 14:45:27.605304 5050 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/5c3f6b64-15bf-4147-be4e-414a2569cb58-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.384673 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-7494c98dcc-jgdkg"] Nov 23 14:45:28 crc kubenswrapper[5050]: E1123 14:45:28.384876 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c3f6b64-15bf-4147-be4e-414a2569cb58" containerName="oauth-openshift" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.384889 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c3f6b64-15bf-4147-be4e-414a2569cb58" containerName="oauth-openshift" Nov 23 14:45:28 crc kubenswrapper[5050]: E1123 14:45:28.384901 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20491310-05b1-4f61-9574-163a5463be0e" containerName="extract-content" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.384907 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="20491310-05b1-4f61-9574-163a5463be0e" containerName="extract-content" Nov 23 14:45:28 crc kubenswrapper[5050]: E1123 14:45:28.384917 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe09ca9a-811b-4126-a3bf-a43b0edbb475" containerName="extract-content" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.384924 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe09ca9a-811b-4126-a3bf-a43b0edbb475" containerName="extract-content" Nov 23 14:45:28 crc kubenswrapper[5050]: E1123 14:45:28.384933 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe09ca9a-811b-4126-a3bf-a43b0edbb475" containerName="extract-utilities" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.384940 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe09ca9a-811b-4126-a3bf-a43b0edbb475" containerName="extract-utilities" Nov 23 14:45:28 crc kubenswrapper[5050]: E1123 14:45:28.384950 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f89a2bf0-1a38-420d-9dba-d1a797364724" containerName="extract-content" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.384955 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f89a2bf0-1a38-420d-9dba-d1a797364724" containerName="extract-content" Nov 23 14:45:28 crc kubenswrapper[5050]: E1123 14:45:28.384964 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65bfd579-9076-4956-bd2b-e1c0c93775b5" containerName="extract-content" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.384970 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="65bfd579-9076-4956-bd2b-e1c0c93775b5" containerName="extract-content" Nov 23 14:45:28 crc kubenswrapper[5050]: E1123 14:45:28.384978 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65bfd579-9076-4956-bd2b-e1c0c93775b5" containerName="registry-server" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.384983 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="65bfd579-9076-4956-bd2b-e1c0c93775b5" containerName="registry-server" Nov 23 14:45:28 crc kubenswrapper[5050]: E1123 14:45:28.384994 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65bfd579-9076-4956-bd2b-e1c0c93775b5" containerName="extract-utilities" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.385000 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="65bfd579-9076-4956-bd2b-e1c0c93775b5" containerName="extract-utilities" Nov 23 14:45:28 crc kubenswrapper[5050]: E1123 14:45:28.385007 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f89a2bf0-1a38-420d-9dba-d1a797364724" containerName="extract-utilities" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.385013 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f89a2bf0-1a38-420d-9dba-d1a797364724" containerName="extract-utilities" Nov 23 14:45:28 crc kubenswrapper[5050]: E1123 14:45:28.385023 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9ec5edf-785a-4b69-bf12-857345ceb036" containerName="collect-profiles" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.385029 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9ec5edf-785a-4b69-bf12-857345ceb036" containerName="collect-profiles" Nov 23 14:45:28 crc kubenswrapper[5050]: E1123 14:45:28.385037 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20491310-05b1-4f61-9574-163a5463be0e" containerName="extract-utilities" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.385042 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="20491310-05b1-4f61-9574-163a5463be0e" containerName="extract-utilities" Nov 23 14:45:28 crc kubenswrapper[5050]: E1123 14:45:28.385050 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe09ca9a-811b-4126-a3bf-a43b0edbb475" containerName="registry-server" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.385055 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe09ca9a-811b-4126-a3bf-a43b0edbb475" containerName="registry-server" Nov 23 14:45:28 crc kubenswrapper[5050]: E1123 14:45:28.385062 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f89a2bf0-1a38-420d-9dba-d1a797364724" containerName="registry-server" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.385068 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f89a2bf0-1a38-420d-9dba-d1a797364724" containerName="registry-server" Nov 23 14:45:28 crc kubenswrapper[5050]: E1123 14:45:28.385078 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20491310-05b1-4f61-9574-163a5463be0e" containerName="registry-server" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.385084 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="20491310-05b1-4f61-9574-163a5463be0e" containerName="registry-server" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.385176 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="65bfd579-9076-4956-bd2b-e1c0c93775b5" containerName="registry-server" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.385187 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe09ca9a-811b-4126-a3bf-a43b0edbb475" containerName="registry-server" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.385197 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="f89a2bf0-1a38-420d-9dba-d1a797364724" containerName="registry-server" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.385205 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9ec5edf-785a-4b69-bf12-857345ceb036" containerName="collect-profiles" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.385216 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c3f6b64-15bf-4147-be4e-414a2569cb58" containerName="oauth-openshift" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.385226 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="20491310-05b1-4f61-9574-163a5463be0e" containerName="registry-server" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.385580 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.387851 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.388169 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.388175 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.389415 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.389576 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.389957 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.390145 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.391002 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.391064 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.391296 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.392735 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.393156 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.398718 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.408050 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-7494c98dcc-jgdkg"] Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.409712 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.432592 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.434971 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.435121 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-system-service-ca\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.435384 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-system-session\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.435515 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.435647 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-system-router-certs\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.435748 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7f4c924c-0293-48c3-b588-0b5db0a2141f-audit-dir\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.435879 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-user-template-login\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.435991 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.436079 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7f4c924c-0293-48c3-b588-0b5db0a2141f-audit-policies\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.436161 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-twkrb\" (UniqueName: \"kubernetes.io/projected/7f4c924c-0293-48c3-b588-0b5db0a2141f-kube-api-access-twkrb\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.436259 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-user-template-error\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.436346 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.436494 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.436600 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.538290 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-system-session\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.538350 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.538385 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-system-router-certs\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.538417 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7f4c924c-0293-48c3-b588-0b5db0a2141f-audit-dir\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.538482 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-user-template-login\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.538511 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.538544 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7f4c924c-0293-48c3-b588-0b5db0a2141f-audit-policies\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.538577 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-twkrb\" (UniqueName: \"kubernetes.io/projected/7f4c924c-0293-48c3-b588-0b5db0a2141f-kube-api-access-twkrb\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.538615 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-user-template-error\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.538642 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.538677 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.538712 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.538763 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.538797 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-system-service-ca\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.539628 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7f4c924c-0293-48c3-b588-0b5db0a2141f-audit-dir\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.539960 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-system-service-ca\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.540122 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7f4c924c-0293-48c3-b588-0b5db0a2141f-audit-policies\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.540548 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.540659 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.547011 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-user-template-error\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.548986 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.549767 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.550067 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.550098 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-user-template-login\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.550122 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-system-router-certs\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.550338 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-system-session\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.550514 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7f4c924c-0293-48c3-b588-0b5db0a2141f-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.565174 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-twkrb\" (UniqueName: \"kubernetes.io/projected/7f4c924c-0293-48c3-b588-0b5db0a2141f-kube-api-access-twkrb\") pod \"oauth-openshift-7494c98dcc-jgdkg\" (UID: \"7f4c924c-0293-48c3-b588-0b5db0a2141f\") " pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.702618 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:28 crc kubenswrapper[5050]: I1123 14:45:28.925871 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-7494c98dcc-jgdkg"] Nov 23 14:45:29 crc kubenswrapper[5050]: I1123 14:45:29.224936 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 14:45:29 crc kubenswrapper[5050]: I1123 14:45:29.225013 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 14:45:29 crc kubenswrapper[5050]: I1123 14:45:29.225083 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 14:45:29 crc kubenswrapper[5050]: I1123 14:45:29.225962 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 14:45:29 crc kubenswrapper[5050]: I1123 14:45:29.226044 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922" gracePeriod=600 Nov 23 14:45:29 crc kubenswrapper[5050]: I1123 14:45:29.555821 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c3f6b64-15bf-4147-be4e-414a2569cb58" path="/var/lib/kubelet/pods/5c3f6b64-15bf-4147-be4e-414a2569cb58/volumes" Nov 23 14:45:29 crc kubenswrapper[5050]: I1123 14:45:29.564524 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922" exitCode=0 Nov 23 14:45:29 crc kubenswrapper[5050]: I1123 14:45:29.564609 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922"} Nov 23 14:45:29 crc kubenswrapper[5050]: I1123 14:45:29.564678 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"86f3ccfe0d53f78ac9dca993751796d574dec20f49c1a0cf613c3239d4e75023"} Nov 23 14:45:29 crc kubenswrapper[5050]: I1123 14:45:29.566493 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" event={"ID":"7f4c924c-0293-48c3-b588-0b5db0a2141f","Type":"ContainerStarted","Data":"879c559ed2f3964e1456c1cb43204a86666b277011b31dfd074974baefc39256"} Nov 23 14:45:29 crc kubenswrapper[5050]: I1123 14:45:29.566531 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" event={"ID":"7f4c924c-0293-48c3-b588-0b5db0a2141f","Type":"ContainerStarted","Data":"d5a089871fba490ef1f7456e909cbb696bc0abc431a0351b3f8f8566d98186fc"} Nov 23 14:45:29 crc kubenswrapper[5050]: I1123 14:45:29.566808 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:29 crc kubenswrapper[5050]: I1123 14:45:29.606934 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" podStartSLOduration=28.6069131 podStartE2EDuration="28.6069131s" podCreationTimestamp="2025-11-23 14:45:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:45:29.606750906 +0000 UTC m=+224.773747391" watchObservedRunningTime="2025-11-23 14:45:29.6069131 +0000 UTC m=+224.773909575" Nov 23 14:45:29 crc kubenswrapper[5050]: I1123 14:45:29.851356 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-7494c98dcc-jgdkg" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.145098 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tpll6"] Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.146202 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tpll6" podUID="c57083c1-0f6f-4e07-aa17-218073eaa9c0" containerName="registry-server" containerID="cri-o://eb07d0ee5705478f18b2b80e68d5725374d7b7a34e1e4782c9e6aad64b6a0be8" gracePeriod=30 Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.158197 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-n9s79"] Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.158460 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-n9s79" podUID="666a5515-92ff-44ef-8e85-f7205f823416" containerName="registry-server" containerID="cri-o://25f735542c1bd1e83ab5944a3ee7501458c50cc2c7d146b70581a081ace8bad0" gracePeriod=30 Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.169553 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wr6q4"] Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.169772 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-wr6q4" podUID="fef4a22f-705c-4dda-8c74-ace62ff4ce68" containerName="marketplace-operator" containerID="cri-o://03d5c8091255edd0e6893789b73cb6b86e8ba0b5d04219d4eca6e11ed0df292f" gracePeriod=30 Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.177379 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7dz9b"] Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.178635 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-7dz9b" podUID="a408f71a-76c2-4c88-9b67-a08257da28eb" containerName="registry-server" containerID="cri-o://3085f65409adad610cc9a5992ccb91e34cb001dbedd474fed92a98b9e3e1f81a" gracePeriod=30 Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.184854 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vpr78"] Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.191926 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-vpr78" podUID="085c7e8e-434a-4bfa-b629-59f2191bc2e9" containerName="registry-server" containerID="cri-o://e15fdfb5b6f09ba8053589fb059eadfc64ecad68cf69e66583789f27b8602cda" gracePeriod=30 Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.194583 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-kdvnh"] Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.195311 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-kdvnh" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.218222 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-kdvnh"] Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.385670 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6c1a2056-be78-4ef3-b451-867a8b230645-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-kdvnh\" (UID: \"6c1a2056-be78-4ef3-b451-867a8b230645\") " pod="openshift-marketplace/marketplace-operator-79b997595-kdvnh" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.385713 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-plxqz\" (UniqueName: \"kubernetes.io/projected/6c1a2056-be78-4ef3-b451-867a8b230645-kube-api-access-plxqz\") pod \"marketplace-operator-79b997595-kdvnh\" (UID: \"6c1a2056-be78-4ef3-b451-867a8b230645\") " pod="openshift-marketplace/marketplace-operator-79b997595-kdvnh" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.385734 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6c1a2056-be78-4ef3-b451-867a8b230645-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-kdvnh\" (UID: \"6c1a2056-be78-4ef3-b451-867a8b230645\") " pod="openshift-marketplace/marketplace-operator-79b997595-kdvnh" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.487924 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6c1a2056-be78-4ef3-b451-867a8b230645-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-kdvnh\" (UID: \"6c1a2056-be78-4ef3-b451-867a8b230645\") " pod="openshift-marketplace/marketplace-operator-79b997595-kdvnh" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.487984 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-plxqz\" (UniqueName: \"kubernetes.io/projected/6c1a2056-be78-4ef3-b451-867a8b230645-kube-api-access-plxqz\") pod \"marketplace-operator-79b997595-kdvnh\" (UID: \"6c1a2056-be78-4ef3-b451-867a8b230645\") " pod="openshift-marketplace/marketplace-operator-79b997595-kdvnh" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.488027 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6c1a2056-be78-4ef3-b451-867a8b230645-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-kdvnh\" (UID: \"6c1a2056-be78-4ef3-b451-867a8b230645\") " pod="openshift-marketplace/marketplace-operator-79b997595-kdvnh" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.489597 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6c1a2056-be78-4ef3-b451-867a8b230645-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-kdvnh\" (UID: \"6c1a2056-be78-4ef3-b451-867a8b230645\") " pod="openshift-marketplace/marketplace-operator-79b997595-kdvnh" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.501949 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6c1a2056-be78-4ef3-b451-867a8b230645-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-kdvnh\" (UID: \"6c1a2056-be78-4ef3-b451-867a8b230645\") " pod="openshift-marketplace/marketplace-operator-79b997595-kdvnh" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.506544 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-plxqz\" (UniqueName: \"kubernetes.io/projected/6c1a2056-be78-4ef3-b451-867a8b230645-kube-api-access-plxqz\") pod \"marketplace-operator-79b997595-kdvnh\" (UID: \"6c1a2056-be78-4ef3-b451-867a8b230645\") " pod="openshift-marketplace/marketplace-operator-79b997595-kdvnh" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.514298 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-kdvnh" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.665492 5050 generic.go:334] "Generic (PLEG): container finished" podID="fef4a22f-705c-4dda-8c74-ace62ff4ce68" containerID="03d5c8091255edd0e6893789b73cb6b86e8ba0b5d04219d4eca6e11ed0df292f" exitCode=0 Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.665724 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-wr6q4" event={"ID":"fef4a22f-705c-4dda-8c74-ace62ff4ce68","Type":"ContainerDied","Data":"03d5c8091255edd0e6893789b73cb6b86e8ba0b5d04219d4eca6e11ed0df292f"} Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.673113 5050 generic.go:334] "Generic (PLEG): container finished" podID="c57083c1-0f6f-4e07-aa17-218073eaa9c0" containerID="eb07d0ee5705478f18b2b80e68d5725374d7b7a34e1e4782c9e6aad64b6a0be8" exitCode=0 Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.673246 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tpll6" event={"ID":"c57083c1-0f6f-4e07-aa17-218073eaa9c0","Type":"ContainerDied","Data":"eb07d0ee5705478f18b2b80e68d5725374d7b7a34e1e4782c9e6aad64b6a0be8"} Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.673794 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n9s79" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.686360 5050 generic.go:334] "Generic (PLEG): container finished" podID="666a5515-92ff-44ef-8e85-f7205f823416" containerID="25f735542c1bd1e83ab5944a3ee7501458c50cc2c7d146b70581a081ace8bad0" exitCode=0 Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.686439 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n9s79" event={"ID":"666a5515-92ff-44ef-8e85-f7205f823416","Type":"ContainerDied","Data":"25f735542c1bd1e83ab5944a3ee7501458c50cc2c7d146b70581a081ace8bad0"} Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.686498 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n9s79" event={"ID":"666a5515-92ff-44ef-8e85-f7205f823416","Type":"ContainerDied","Data":"4172b5725c18c0e47f60b0042bb7783d83c2793ae55b15c91d80413311df54e7"} Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.686517 5050 scope.go:117] "RemoveContainer" containerID="25f735542c1bd1e83ab5944a3ee7501458c50cc2c7d146b70581a081ace8bad0" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.708674 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tpll6" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.709010 5050 generic.go:334] "Generic (PLEG): container finished" podID="a408f71a-76c2-4c88-9b67-a08257da28eb" containerID="3085f65409adad610cc9a5992ccb91e34cb001dbedd474fed92a98b9e3e1f81a" exitCode=0 Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.709215 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7dz9b" event={"ID":"a408f71a-76c2-4c88-9b67-a08257da28eb","Type":"ContainerDied","Data":"3085f65409adad610cc9a5992ccb91e34cb001dbedd474fed92a98b9e3e1f81a"} Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.718548 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vpr78" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.719456 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7dz9b" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.720960 5050 generic.go:334] "Generic (PLEG): container finished" podID="085c7e8e-434a-4bfa-b629-59f2191bc2e9" containerID="e15fdfb5b6f09ba8053589fb059eadfc64ecad68cf69e66583789f27b8602cda" exitCode=0 Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.721018 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vpr78" event={"ID":"085c7e8e-434a-4bfa-b629-59f2191bc2e9","Type":"ContainerDied","Data":"e15fdfb5b6f09ba8053589fb059eadfc64ecad68cf69e66583789f27b8602cda"} Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.722597 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-wr6q4" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.727481 5050 scope.go:117] "RemoveContainer" containerID="498486eb7d78da24d749e0f1d0707cc71d162d9363e354a83b684af58f5cd1b6" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.765808 5050 scope.go:117] "RemoveContainer" containerID="59fd1311c7d3c139db3ba5bfaba5d3cf1c80672ebbc4ba24a95b5a6069e7245d" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.792463 5050 scope.go:117] "RemoveContainer" containerID="25f735542c1bd1e83ab5944a3ee7501458c50cc2c7d146b70581a081ace8bad0" Nov 23 14:45:43 crc kubenswrapper[5050]: E1123 14:45:43.793209 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"25f735542c1bd1e83ab5944a3ee7501458c50cc2c7d146b70581a081ace8bad0\": container with ID starting with 25f735542c1bd1e83ab5944a3ee7501458c50cc2c7d146b70581a081ace8bad0 not found: ID does not exist" containerID="25f735542c1bd1e83ab5944a3ee7501458c50cc2c7d146b70581a081ace8bad0" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.793266 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25f735542c1bd1e83ab5944a3ee7501458c50cc2c7d146b70581a081ace8bad0"} err="failed to get container status \"25f735542c1bd1e83ab5944a3ee7501458c50cc2c7d146b70581a081ace8bad0\": rpc error: code = NotFound desc = could not find container \"25f735542c1bd1e83ab5944a3ee7501458c50cc2c7d146b70581a081ace8bad0\": container with ID starting with 25f735542c1bd1e83ab5944a3ee7501458c50cc2c7d146b70581a081ace8bad0 not found: ID does not exist" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.793300 5050 scope.go:117] "RemoveContainer" containerID="498486eb7d78da24d749e0f1d0707cc71d162d9363e354a83b684af58f5cd1b6" Nov 23 14:45:43 crc kubenswrapper[5050]: E1123 14:45:43.793854 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"498486eb7d78da24d749e0f1d0707cc71d162d9363e354a83b684af58f5cd1b6\": container with ID starting with 498486eb7d78da24d749e0f1d0707cc71d162d9363e354a83b684af58f5cd1b6 not found: ID does not exist" containerID="498486eb7d78da24d749e0f1d0707cc71d162d9363e354a83b684af58f5cd1b6" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.793889 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"498486eb7d78da24d749e0f1d0707cc71d162d9363e354a83b684af58f5cd1b6"} err="failed to get container status \"498486eb7d78da24d749e0f1d0707cc71d162d9363e354a83b684af58f5cd1b6\": rpc error: code = NotFound desc = could not find container \"498486eb7d78da24d749e0f1d0707cc71d162d9363e354a83b684af58f5cd1b6\": container with ID starting with 498486eb7d78da24d749e0f1d0707cc71d162d9363e354a83b684af58f5cd1b6 not found: ID does not exist" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.793915 5050 scope.go:117] "RemoveContainer" containerID="59fd1311c7d3c139db3ba5bfaba5d3cf1c80672ebbc4ba24a95b5a6069e7245d" Nov 23 14:45:43 crc kubenswrapper[5050]: E1123 14:45:43.794166 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"59fd1311c7d3c139db3ba5bfaba5d3cf1c80672ebbc4ba24a95b5a6069e7245d\": container with ID starting with 59fd1311c7d3c139db3ba5bfaba5d3cf1c80672ebbc4ba24a95b5a6069e7245d not found: ID does not exist" containerID="59fd1311c7d3c139db3ba5bfaba5d3cf1c80672ebbc4ba24a95b5a6069e7245d" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.794241 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59fd1311c7d3c139db3ba5bfaba5d3cf1c80672ebbc4ba24a95b5a6069e7245d"} err="failed to get container status \"59fd1311c7d3c139db3ba5bfaba5d3cf1c80672ebbc4ba24a95b5a6069e7245d\": rpc error: code = NotFound desc = could not find container \"59fd1311c7d3c139db3ba5bfaba5d3cf1c80672ebbc4ba24a95b5a6069e7245d\": container with ID starting with 59fd1311c7d3c139db3ba5bfaba5d3cf1c80672ebbc4ba24a95b5a6069e7245d not found: ID does not exist" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.794269 5050 scope.go:117] "RemoveContainer" containerID="e15fdfb5b6f09ba8053589fb059eadfc64ecad68cf69e66583789f27b8602cda" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.796162 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/666a5515-92ff-44ef-8e85-f7205f823416-utilities\") pod \"666a5515-92ff-44ef-8e85-f7205f823416\" (UID: \"666a5515-92ff-44ef-8e85-f7205f823416\") " Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.796330 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6vmb4\" (UniqueName: \"kubernetes.io/projected/666a5515-92ff-44ef-8e85-f7205f823416-kube-api-access-6vmb4\") pod \"666a5515-92ff-44ef-8e85-f7205f823416\" (UID: \"666a5515-92ff-44ef-8e85-f7205f823416\") " Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.796363 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/666a5515-92ff-44ef-8e85-f7205f823416-catalog-content\") pod \"666a5515-92ff-44ef-8e85-f7205f823416\" (UID: \"666a5515-92ff-44ef-8e85-f7205f823416\") " Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.798308 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/666a5515-92ff-44ef-8e85-f7205f823416-utilities" (OuterVolumeSpecName: "utilities") pod "666a5515-92ff-44ef-8e85-f7205f823416" (UID: "666a5515-92ff-44ef-8e85-f7205f823416"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.803816 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/666a5515-92ff-44ef-8e85-f7205f823416-kube-api-access-6vmb4" (OuterVolumeSpecName: "kube-api-access-6vmb4") pod "666a5515-92ff-44ef-8e85-f7205f823416" (UID: "666a5515-92ff-44ef-8e85-f7205f823416"). InnerVolumeSpecName "kube-api-access-6vmb4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.817610 5050 scope.go:117] "RemoveContainer" containerID="3e520e7710257ec2fa279f927149d3d2cc091679172a5d88d4a30d65dd1182eb" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.837477 5050 scope.go:117] "RemoveContainer" containerID="8911b3a870cbc75cd9e32880d18a25eb6e3d3f118517cd5b15087a9811b36ad5" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.854318 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/666a5515-92ff-44ef-8e85-f7205f823416-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "666a5515-92ff-44ef-8e85-f7205f823416" (UID: "666a5515-92ff-44ef-8e85-f7205f823416"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.897591 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xw44v\" (UniqueName: \"kubernetes.io/projected/fef4a22f-705c-4dda-8c74-ace62ff4ce68-kube-api-access-xw44v\") pod \"fef4a22f-705c-4dda-8c74-ace62ff4ce68\" (UID: \"fef4a22f-705c-4dda-8c74-ace62ff4ce68\") " Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.897673 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qdh75\" (UniqueName: \"kubernetes.io/projected/c57083c1-0f6f-4e07-aa17-218073eaa9c0-kube-api-access-qdh75\") pod \"c57083c1-0f6f-4e07-aa17-218073eaa9c0\" (UID: \"c57083c1-0f6f-4e07-aa17-218073eaa9c0\") " Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.897720 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/085c7e8e-434a-4bfa-b629-59f2191bc2e9-catalog-content\") pod \"085c7e8e-434a-4bfa-b629-59f2191bc2e9\" (UID: \"085c7e8e-434a-4bfa-b629-59f2191bc2e9\") " Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.897771 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htvz6\" (UniqueName: \"kubernetes.io/projected/a408f71a-76c2-4c88-9b67-a08257da28eb-kube-api-access-htvz6\") pod \"a408f71a-76c2-4c88-9b67-a08257da28eb\" (UID: \"a408f71a-76c2-4c88-9b67-a08257da28eb\") " Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.897809 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a408f71a-76c2-4c88-9b67-a08257da28eb-utilities\") pod \"a408f71a-76c2-4c88-9b67-a08257da28eb\" (UID: \"a408f71a-76c2-4c88-9b67-a08257da28eb\") " Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.897844 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/fef4a22f-705c-4dda-8c74-ace62ff4ce68-marketplace-operator-metrics\") pod \"fef4a22f-705c-4dda-8c74-ace62ff4ce68\" (UID: \"fef4a22f-705c-4dda-8c74-ace62ff4ce68\") " Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.897881 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c57083c1-0f6f-4e07-aa17-218073eaa9c0-utilities\") pod \"c57083c1-0f6f-4e07-aa17-218073eaa9c0\" (UID: \"c57083c1-0f6f-4e07-aa17-218073eaa9c0\") " Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.897903 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c57083c1-0f6f-4e07-aa17-218073eaa9c0-catalog-content\") pod \"c57083c1-0f6f-4e07-aa17-218073eaa9c0\" (UID: \"c57083c1-0f6f-4e07-aa17-218073eaa9c0\") " Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.897949 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/085c7e8e-434a-4bfa-b629-59f2191bc2e9-utilities\") pod \"085c7e8e-434a-4bfa-b629-59f2191bc2e9\" (UID: \"085c7e8e-434a-4bfa-b629-59f2191bc2e9\") " Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.898058 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wrtjh\" (UniqueName: \"kubernetes.io/projected/085c7e8e-434a-4bfa-b629-59f2191bc2e9-kube-api-access-wrtjh\") pod \"085c7e8e-434a-4bfa-b629-59f2191bc2e9\" (UID: \"085c7e8e-434a-4bfa-b629-59f2191bc2e9\") " Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.898955 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/085c7e8e-434a-4bfa-b629-59f2191bc2e9-utilities" (OuterVolumeSpecName: "utilities") pod "085c7e8e-434a-4bfa-b629-59f2191bc2e9" (UID: "085c7e8e-434a-4bfa-b629-59f2191bc2e9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.899035 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a408f71a-76c2-4c88-9b67-a08257da28eb-catalog-content\") pod \"a408f71a-76c2-4c88-9b67-a08257da28eb\" (UID: \"a408f71a-76c2-4c88-9b67-a08257da28eb\") " Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.899077 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fef4a22f-705c-4dda-8c74-ace62ff4ce68-marketplace-trusted-ca\") pod \"fef4a22f-705c-4dda-8c74-ace62ff4ce68\" (UID: \"fef4a22f-705c-4dda-8c74-ace62ff4ce68\") " Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.899053 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a408f71a-76c2-4c88-9b67-a08257da28eb-utilities" (OuterVolumeSpecName: "utilities") pod "a408f71a-76c2-4c88-9b67-a08257da28eb" (UID: "a408f71a-76c2-4c88-9b67-a08257da28eb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.899387 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c57083c1-0f6f-4e07-aa17-218073eaa9c0-utilities" (OuterVolumeSpecName: "utilities") pod "c57083c1-0f6f-4e07-aa17-218073eaa9c0" (UID: "c57083c1-0f6f-4e07-aa17-218073eaa9c0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.899637 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/666a5515-92ff-44ef-8e85-f7205f823416-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.899663 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a408f71a-76c2-4c88-9b67-a08257da28eb-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.899674 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c57083c1-0f6f-4e07-aa17-218073eaa9c0-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.899684 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/085c7e8e-434a-4bfa-b629-59f2191bc2e9-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.899694 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6vmb4\" (UniqueName: \"kubernetes.io/projected/666a5515-92ff-44ef-8e85-f7205f823416-kube-api-access-6vmb4\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.899707 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/666a5515-92ff-44ef-8e85-f7205f823416-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.900426 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fef4a22f-705c-4dda-8c74-ace62ff4ce68-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "fef4a22f-705c-4dda-8c74-ace62ff4ce68" (UID: "fef4a22f-705c-4dda-8c74-ace62ff4ce68"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.901613 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/085c7e8e-434a-4bfa-b629-59f2191bc2e9-kube-api-access-wrtjh" (OuterVolumeSpecName: "kube-api-access-wrtjh") pod "085c7e8e-434a-4bfa-b629-59f2191bc2e9" (UID: "085c7e8e-434a-4bfa-b629-59f2191bc2e9"). InnerVolumeSpecName "kube-api-access-wrtjh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.901770 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fef4a22f-705c-4dda-8c74-ace62ff4ce68-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "fef4a22f-705c-4dda-8c74-ace62ff4ce68" (UID: "fef4a22f-705c-4dda-8c74-ace62ff4ce68"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.902657 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c57083c1-0f6f-4e07-aa17-218073eaa9c0-kube-api-access-qdh75" (OuterVolumeSpecName: "kube-api-access-qdh75") pod "c57083c1-0f6f-4e07-aa17-218073eaa9c0" (UID: "c57083c1-0f6f-4e07-aa17-218073eaa9c0"). InnerVolumeSpecName "kube-api-access-qdh75". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.902996 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fef4a22f-705c-4dda-8c74-ace62ff4ce68-kube-api-access-xw44v" (OuterVolumeSpecName: "kube-api-access-xw44v") pod "fef4a22f-705c-4dda-8c74-ace62ff4ce68" (UID: "fef4a22f-705c-4dda-8c74-ace62ff4ce68"). InnerVolumeSpecName "kube-api-access-xw44v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.916294 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a408f71a-76c2-4c88-9b67-a08257da28eb-kube-api-access-htvz6" (OuterVolumeSpecName: "kube-api-access-htvz6") pod "a408f71a-76c2-4c88-9b67-a08257da28eb" (UID: "a408f71a-76c2-4c88-9b67-a08257da28eb"). InnerVolumeSpecName "kube-api-access-htvz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.923051 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a408f71a-76c2-4c88-9b67-a08257da28eb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a408f71a-76c2-4c88-9b67-a08257da28eb" (UID: "a408f71a-76c2-4c88-9b67-a08257da28eb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:45:43 crc kubenswrapper[5050]: I1123 14:45:43.961381 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c57083c1-0f6f-4e07-aa17-218073eaa9c0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c57083c1-0f6f-4e07-aa17-218073eaa9c0" (UID: "c57083c1-0f6f-4e07-aa17-218073eaa9c0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.001005 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xw44v\" (UniqueName: \"kubernetes.io/projected/fef4a22f-705c-4dda-8c74-ace62ff4ce68-kube-api-access-xw44v\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.001044 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qdh75\" (UniqueName: \"kubernetes.io/projected/c57083c1-0f6f-4e07-aa17-218073eaa9c0-kube-api-access-qdh75\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.001053 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htvz6\" (UniqueName: \"kubernetes.io/projected/a408f71a-76c2-4c88-9b67-a08257da28eb-kube-api-access-htvz6\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.001062 5050 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/fef4a22f-705c-4dda-8c74-ace62ff4ce68-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.001073 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c57083c1-0f6f-4e07-aa17-218073eaa9c0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.001082 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wrtjh\" (UniqueName: \"kubernetes.io/projected/085c7e8e-434a-4bfa-b629-59f2191bc2e9-kube-api-access-wrtjh\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.001090 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a408f71a-76c2-4c88-9b67-a08257da28eb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.001097 5050 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fef4a22f-705c-4dda-8c74-ace62ff4ce68-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.031512 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/085c7e8e-434a-4bfa-b629-59f2191bc2e9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "085c7e8e-434a-4bfa-b629-59f2191bc2e9" (UID: "085c7e8e-434a-4bfa-b629-59f2191bc2e9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.077410 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-kdvnh"] Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.102206 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/085c7e8e-434a-4bfa-b629-59f2191bc2e9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.728856 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7dz9b" event={"ID":"a408f71a-76c2-4c88-9b67-a08257da28eb","Type":"ContainerDied","Data":"663ff4a50afca895907b1c4a0a7a3c625d1ecbe4a06296e3c7a38a1715842020"} Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.728883 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7dz9b" Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.729282 5050 scope.go:117] "RemoveContainer" containerID="3085f65409adad610cc9a5992ccb91e34cb001dbedd474fed92a98b9e3e1f81a" Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.731156 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-kdvnh" event={"ID":"6c1a2056-be78-4ef3-b451-867a8b230645","Type":"ContainerStarted","Data":"45a95afc894e47bb37f55c04781a60d2e54a54449583d33dbb4e9756570314ff"} Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.731177 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-kdvnh" event={"ID":"6c1a2056-be78-4ef3-b451-867a8b230645","Type":"ContainerStarted","Data":"fd58f4be1f47cb7045ebb02f48b343492c4418fca176724dd8d68ef44c97faab"} Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.731998 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-kdvnh" Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.733958 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vpr78" Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.733964 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vpr78" event={"ID":"085c7e8e-434a-4bfa-b629-59f2191bc2e9","Type":"ContainerDied","Data":"c2189e1b7605af172c0543a8f8d3d01a4cb19febd2c928e49f0529c78fdb50bb"} Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.738225 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-wr6q4" Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.738421 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-wr6q4" event={"ID":"fef4a22f-705c-4dda-8c74-ace62ff4ce68","Type":"ContainerDied","Data":"8f1bfae0b30c257aa8fcbbb6436d37e468068c0ba499a4f361c59a19dbc1feaf"} Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.739648 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-kdvnh" Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.748892 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tpll6" Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.748870 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tpll6" event={"ID":"c57083c1-0f6f-4e07-aa17-218073eaa9c0","Type":"ContainerDied","Data":"50686f234ada4a501532d7891e53c0491293fa9a1f6446e5064b377ec0db3310"} Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.750471 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n9s79" Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.753907 5050 scope.go:117] "RemoveContainer" containerID="334a33cefa4534c0d81b9693c7e6fea419c5a02d6388f55be64585d162f9e659" Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.766030 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-kdvnh" podStartSLOduration=1.765989321 podStartE2EDuration="1.765989321s" podCreationTimestamp="2025-11-23 14:45:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:45:44.763380258 +0000 UTC m=+239.930376733" watchObservedRunningTime="2025-11-23 14:45:44.765989321 +0000 UTC m=+239.932985806" Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.796159 5050 scope.go:117] "RemoveContainer" containerID="f2cd6510174cb6aa7413144286a84cf1ddd5146b921742d13021aca7fe58c412" Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.817963 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7dz9b"] Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.827927 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-7dz9b"] Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.832663 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tpll6"] Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.835671 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tpll6"] Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.850123 5050 scope.go:117] "RemoveContainer" containerID="03d5c8091255edd0e6893789b73cb6b86e8ba0b5d04219d4eca6e11ed0df292f" Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.852094 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wr6q4"] Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.860572 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wr6q4"] Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.867390 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-n9s79"] Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.869761 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-n9s79"] Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.871775 5050 scope.go:117] "RemoveContainer" containerID="eb07d0ee5705478f18b2b80e68d5725374d7b7a34e1e4782c9e6aad64b6a0be8" Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.879917 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vpr78"] Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.882325 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-vpr78"] Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.887287 5050 scope.go:117] "RemoveContainer" containerID="4465eaae7961fc5e10a757db9c37ae8b6ba0715cc67d3733dcdb5338aba5be7a" Nov 23 14:45:44 crc kubenswrapper[5050]: I1123 14:45:44.903791 5050 scope.go:117] "RemoveContainer" containerID="d57b19fbc42c4856fb288efc6b0f797be2cece55dfc509511b167203ab58ef84" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.384186 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4jl4h"] Nov 23 14:45:45 crc kubenswrapper[5050]: E1123 14:45:45.384641 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c57083c1-0f6f-4e07-aa17-218073eaa9c0" containerName="extract-content" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.384684 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="c57083c1-0f6f-4e07-aa17-218073eaa9c0" containerName="extract-content" Nov 23 14:45:45 crc kubenswrapper[5050]: E1123 14:45:45.384717 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a408f71a-76c2-4c88-9b67-a08257da28eb" containerName="extract-content" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.384736 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="a408f71a-76c2-4c88-9b67-a08257da28eb" containerName="extract-content" Nov 23 14:45:45 crc kubenswrapper[5050]: E1123 14:45:45.384759 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a408f71a-76c2-4c88-9b67-a08257da28eb" containerName="extract-utilities" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.384777 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="a408f71a-76c2-4c88-9b67-a08257da28eb" containerName="extract-utilities" Nov 23 14:45:45 crc kubenswrapper[5050]: E1123 14:45:45.384804 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c57083c1-0f6f-4e07-aa17-218073eaa9c0" containerName="extract-utilities" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.384821 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="c57083c1-0f6f-4e07-aa17-218073eaa9c0" containerName="extract-utilities" Nov 23 14:45:45 crc kubenswrapper[5050]: E1123 14:45:45.384843 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="666a5515-92ff-44ef-8e85-f7205f823416" containerName="extract-utilities" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.384858 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="666a5515-92ff-44ef-8e85-f7205f823416" containerName="extract-utilities" Nov 23 14:45:45 crc kubenswrapper[5050]: E1123 14:45:45.384880 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="085c7e8e-434a-4bfa-b629-59f2191bc2e9" containerName="extract-utilities" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.384897 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="085c7e8e-434a-4bfa-b629-59f2191bc2e9" containerName="extract-utilities" Nov 23 14:45:45 crc kubenswrapper[5050]: E1123 14:45:45.384920 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="666a5515-92ff-44ef-8e85-f7205f823416" containerName="registry-server" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.384940 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="666a5515-92ff-44ef-8e85-f7205f823416" containerName="registry-server" Nov 23 14:45:45 crc kubenswrapper[5050]: E1123 14:45:45.384967 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="085c7e8e-434a-4bfa-b629-59f2191bc2e9" containerName="extract-content" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.384981 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="085c7e8e-434a-4bfa-b629-59f2191bc2e9" containerName="extract-content" Nov 23 14:45:45 crc kubenswrapper[5050]: E1123 14:45:45.385001 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fef4a22f-705c-4dda-8c74-ace62ff4ce68" containerName="marketplace-operator" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.385014 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="fef4a22f-705c-4dda-8c74-ace62ff4ce68" containerName="marketplace-operator" Nov 23 14:45:45 crc kubenswrapper[5050]: E1123 14:45:45.385034 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="085c7e8e-434a-4bfa-b629-59f2191bc2e9" containerName="registry-server" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.385046 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="085c7e8e-434a-4bfa-b629-59f2191bc2e9" containerName="registry-server" Nov 23 14:45:45 crc kubenswrapper[5050]: E1123 14:45:45.385063 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="666a5515-92ff-44ef-8e85-f7205f823416" containerName="extract-content" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.385075 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="666a5515-92ff-44ef-8e85-f7205f823416" containerName="extract-content" Nov 23 14:45:45 crc kubenswrapper[5050]: E1123 14:45:45.385090 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a408f71a-76c2-4c88-9b67-a08257da28eb" containerName="registry-server" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.385102 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="a408f71a-76c2-4c88-9b67-a08257da28eb" containerName="registry-server" Nov 23 14:45:45 crc kubenswrapper[5050]: E1123 14:45:45.385119 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c57083c1-0f6f-4e07-aa17-218073eaa9c0" containerName="registry-server" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.385132 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="c57083c1-0f6f-4e07-aa17-218073eaa9c0" containerName="registry-server" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.385330 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="fef4a22f-705c-4dda-8c74-ace62ff4ce68" containerName="marketplace-operator" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.385354 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="085c7e8e-434a-4bfa-b629-59f2191bc2e9" containerName="registry-server" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.385379 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="a408f71a-76c2-4c88-9b67-a08257da28eb" containerName="registry-server" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.385397 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="c57083c1-0f6f-4e07-aa17-218073eaa9c0" containerName="registry-server" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.385414 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="666a5515-92ff-44ef-8e85-f7205f823416" containerName="registry-server" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.386920 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4jl4h" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.389215 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.409232 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4jl4h"] Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.420260 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f8a262c-2ff5-45aa-bd0a-d1c948021ca8-catalog-content\") pod \"redhat-marketplace-4jl4h\" (UID: \"6f8a262c-2ff5-45aa-bd0a-d1c948021ca8\") " pod="openshift-marketplace/redhat-marketplace-4jl4h" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.420400 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c69b4\" (UniqueName: \"kubernetes.io/projected/6f8a262c-2ff5-45aa-bd0a-d1c948021ca8-kube-api-access-c69b4\") pod \"redhat-marketplace-4jl4h\" (UID: \"6f8a262c-2ff5-45aa-bd0a-d1c948021ca8\") " pod="openshift-marketplace/redhat-marketplace-4jl4h" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.420509 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f8a262c-2ff5-45aa-bd0a-d1c948021ca8-utilities\") pod \"redhat-marketplace-4jl4h\" (UID: \"6f8a262c-2ff5-45aa-bd0a-d1c948021ca8\") " pod="openshift-marketplace/redhat-marketplace-4jl4h" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.521187 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f8a262c-2ff5-45aa-bd0a-d1c948021ca8-utilities\") pod \"redhat-marketplace-4jl4h\" (UID: \"6f8a262c-2ff5-45aa-bd0a-d1c948021ca8\") " pod="openshift-marketplace/redhat-marketplace-4jl4h" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.521626 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f8a262c-2ff5-45aa-bd0a-d1c948021ca8-catalog-content\") pod \"redhat-marketplace-4jl4h\" (UID: \"6f8a262c-2ff5-45aa-bd0a-d1c948021ca8\") " pod="openshift-marketplace/redhat-marketplace-4jl4h" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.521774 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c69b4\" (UniqueName: \"kubernetes.io/projected/6f8a262c-2ff5-45aa-bd0a-d1c948021ca8-kube-api-access-c69b4\") pod \"redhat-marketplace-4jl4h\" (UID: \"6f8a262c-2ff5-45aa-bd0a-d1c948021ca8\") " pod="openshift-marketplace/redhat-marketplace-4jl4h" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.521871 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f8a262c-2ff5-45aa-bd0a-d1c948021ca8-utilities\") pod \"redhat-marketplace-4jl4h\" (UID: \"6f8a262c-2ff5-45aa-bd0a-d1c948021ca8\") " pod="openshift-marketplace/redhat-marketplace-4jl4h" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.522327 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f8a262c-2ff5-45aa-bd0a-d1c948021ca8-catalog-content\") pod \"redhat-marketplace-4jl4h\" (UID: \"6f8a262c-2ff5-45aa-bd0a-d1c948021ca8\") " pod="openshift-marketplace/redhat-marketplace-4jl4h" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.543570 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c69b4\" (UniqueName: \"kubernetes.io/projected/6f8a262c-2ff5-45aa-bd0a-d1c948021ca8-kube-api-access-c69b4\") pod \"redhat-marketplace-4jl4h\" (UID: \"6f8a262c-2ff5-45aa-bd0a-d1c948021ca8\") " pod="openshift-marketplace/redhat-marketplace-4jl4h" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.582748 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="085c7e8e-434a-4bfa-b629-59f2191bc2e9" path="/var/lib/kubelet/pods/085c7e8e-434a-4bfa-b629-59f2191bc2e9/volumes" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.589142 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="666a5515-92ff-44ef-8e85-f7205f823416" path="/var/lib/kubelet/pods/666a5515-92ff-44ef-8e85-f7205f823416/volumes" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.589820 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a408f71a-76c2-4c88-9b67-a08257da28eb" path="/var/lib/kubelet/pods/a408f71a-76c2-4c88-9b67-a08257da28eb/volumes" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.590956 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c57083c1-0f6f-4e07-aa17-218073eaa9c0" path="/var/lib/kubelet/pods/c57083c1-0f6f-4e07-aa17-218073eaa9c0/volumes" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.591666 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fef4a22f-705c-4dda-8c74-ace62ff4ce68" path="/var/lib/kubelet/pods/fef4a22f-705c-4dda-8c74-ace62ff4ce68/volumes" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.592113 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xwz4w"] Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.595107 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xwz4w" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.597621 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xwz4w"] Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.598592 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.711032 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.718873 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4jl4h" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.733929 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8c7ace55-a1c9-441f-8150-fb262b63bf29-utilities\") pod \"redhat-operators-xwz4w\" (UID: \"8c7ace55-a1c9-441f-8150-fb262b63bf29\") " pod="openshift-marketplace/redhat-operators-xwz4w" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.734011 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8c7ace55-a1c9-441f-8150-fb262b63bf29-catalog-content\") pod \"redhat-operators-xwz4w\" (UID: \"8c7ace55-a1c9-441f-8150-fb262b63bf29\") " pod="openshift-marketplace/redhat-operators-xwz4w" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.734114 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k54q4\" (UniqueName: \"kubernetes.io/projected/8c7ace55-a1c9-441f-8150-fb262b63bf29-kube-api-access-k54q4\") pod \"redhat-operators-xwz4w\" (UID: \"8c7ace55-a1c9-441f-8150-fb262b63bf29\") " pod="openshift-marketplace/redhat-operators-xwz4w" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.835574 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8c7ace55-a1c9-441f-8150-fb262b63bf29-utilities\") pod \"redhat-operators-xwz4w\" (UID: \"8c7ace55-a1c9-441f-8150-fb262b63bf29\") " pod="openshift-marketplace/redhat-operators-xwz4w" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.836011 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8c7ace55-a1c9-441f-8150-fb262b63bf29-catalog-content\") pod \"redhat-operators-xwz4w\" (UID: \"8c7ace55-a1c9-441f-8150-fb262b63bf29\") " pod="openshift-marketplace/redhat-operators-xwz4w" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.836070 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k54q4\" (UniqueName: \"kubernetes.io/projected/8c7ace55-a1c9-441f-8150-fb262b63bf29-kube-api-access-k54q4\") pod \"redhat-operators-xwz4w\" (UID: \"8c7ace55-a1c9-441f-8150-fb262b63bf29\") " pod="openshift-marketplace/redhat-operators-xwz4w" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.837227 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8c7ace55-a1c9-441f-8150-fb262b63bf29-utilities\") pod \"redhat-operators-xwz4w\" (UID: \"8c7ace55-a1c9-441f-8150-fb262b63bf29\") " pod="openshift-marketplace/redhat-operators-xwz4w" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.837514 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8c7ace55-a1c9-441f-8150-fb262b63bf29-catalog-content\") pod \"redhat-operators-xwz4w\" (UID: \"8c7ace55-a1c9-441f-8150-fb262b63bf29\") " pod="openshift-marketplace/redhat-operators-xwz4w" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.857236 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k54q4\" (UniqueName: \"kubernetes.io/projected/8c7ace55-a1c9-441f-8150-fb262b63bf29-kube-api-access-k54q4\") pod \"redhat-operators-xwz4w\" (UID: \"8c7ace55-a1c9-441f-8150-fb262b63bf29\") " pod="openshift-marketplace/redhat-operators-xwz4w" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.924767 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xwz4w" Nov 23 14:45:45 crc kubenswrapper[5050]: I1123 14:45:45.965900 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4jl4h"] Nov 23 14:45:45 crc kubenswrapper[5050]: W1123 14:45:45.975242 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6f8a262c_2ff5_45aa_bd0a_d1c948021ca8.slice/crio-b98bf71c47fd7745b5dd005e44f5cad03e2641d0086f234296afa81e473cd3b3 WatchSource:0}: Error finding container b98bf71c47fd7745b5dd005e44f5cad03e2641d0086f234296afa81e473cd3b3: Status 404 returned error can't find the container with id b98bf71c47fd7745b5dd005e44f5cad03e2641d0086f234296afa81e473cd3b3 Nov 23 14:45:46 crc kubenswrapper[5050]: I1123 14:45:46.354621 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xwz4w"] Nov 23 14:45:46 crc kubenswrapper[5050]: I1123 14:45:46.763265 5050 generic.go:334] "Generic (PLEG): container finished" podID="8c7ace55-a1c9-441f-8150-fb262b63bf29" containerID="91de7318c5773c434cdc7ff4b81341f44dc70ce18fa0e390e0df498e95dc1769" exitCode=0 Nov 23 14:45:46 crc kubenswrapper[5050]: I1123 14:45:46.763374 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xwz4w" event={"ID":"8c7ace55-a1c9-441f-8150-fb262b63bf29","Type":"ContainerDied","Data":"91de7318c5773c434cdc7ff4b81341f44dc70ce18fa0e390e0df498e95dc1769"} Nov 23 14:45:46 crc kubenswrapper[5050]: I1123 14:45:46.763430 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xwz4w" event={"ID":"8c7ace55-a1c9-441f-8150-fb262b63bf29","Type":"ContainerStarted","Data":"ac6ccff6fc74b214cd33bdb4135e1a5ce47ac1b78a349eeb315810678e6d11a1"} Nov 23 14:45:46 crc kubenswrapper[5050]: I1123 14:45:46.765034 5050 generic.go:334] "Generic (PLEG): container finished" podID="6f8a262c-2ff5-45aa-bd0a-d1c948021ca8" containerID="79dd1ab2f5c6ce6e6b8ba07b5c246ea5100f7a58669dc3a9125d0fa7a6bf8ec2" exitCode=0 Nov 23 14:45:46 crc kubenswrapper[5050]: I1123 14:45:46.765079 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4jl4h" event={"ID":"6f8a262c-2ff5-45aa-bd0a-d1c948021ca8","Type":"ContainerDied","Data":"79dd1ab2f5c6ce6e6b8ba07b5c246ea5100f7a58669dc3a9125d0fa7a6bf8ec2"} Nov 23 14:45:46 crc kubenswrapper[5050]: I1123 14:45:46.765156 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4jl4h" event={"ID":"6f8a262c-2ff5-45aa-bd0a-d1c948021ca8","Type":"ContainerStarted","Data":"b98bf71c47fd7745b5dd005e44f5cad03e2641d0086f234296afa81e473cd3b3"} Nov 23 14:45:47 crc kubenswrapper[5050]: I1123 14:45:47.774982 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ztszf"] Nov 23 14:45:47 crc kubenswrapper[5050]: I1123 14:45:47.776843 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ztszf" Nov 23 14:45:47 crc kubenswrapper[5050]: I1123 14:45:47.784017 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 23 14:45:47 crc kubenswrapper[5050]: I1123 14:45:47.788893 5050 generic.go:334] "Generic (PLEG): container finished" podID="6f8a262c-2ff5-45aa-bd0a-d1c948021ca8" containerID="41be407e26a70a20c0e18e966879bd142bcf3f2f3fdd6e5e38c43875aed53d94" exitCode=0 Nov 23 14:45:47 crc kubenswrapper[5050]: I1123 14:45:47.789485 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4jl4h" event={"ID":"6f8a262c-2ff5-45aa-bd0a-d1c948021ca8","Type":"ContainerDied","Data":"41be407e26a70a20c0e18e966879bd142bcf3f2f3fdd6e5e38c43875aed53d94"} Nov 23 14:45:47 crc kubenswrapper[5050]: I1123 14:45:47.791374 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ztszf"] Nov 23 14:45:47 crc kubenswrapper[5050]: I1123 14:45:47.794000 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xwz4w" event={"ID":"8c7ace55-a1c9-441f-8150-fb262b63bf29","Type":"ContainerStarted","Data":"fbb826c7e8c9673c93c7514f65501986b4212564ea49ca2a37592e6b1bdf9b27"} Nov 23 14:45:47 crc kubenswrapper[5050]: I1123 14:45:47.967603 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jw2xm\" (UniqueName: \"kubernetes.io/projected/e12fe3e5-acc0-402b-aa45-164b108828d8-kube-api-access-jw2xm\") pod \"certified-operators-ztszf\" (UID: \"e12fe3e5-acc0-402b-aa45-164b108828d8\") " pod="openshift-marketplace/certified-operators-ztszf" Nov 23 14:45:47 crc kubenswrapper[5050]: I1123 14:45:47.968346 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e12fe3e5-acc0-402b-aa45-164b108828d8-utilities\") pod \"certified-operators-ztszf\" (UID: \"e12fe3e5-acc0-402b-aa45-164b108828d8\") " pod="openshift-marketplace/certified-operators-ztszf" Nov 23 14:45:47 crc kubenswrapper[5050]: I1123 14:45:47.968413 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e12fe3e5-acc0-402b-aa45-164b108828d8-catalog-content\") pod \"certified-operators-ztszf\" (UID: \"e12fe3e5-acc0-402b-aa45-164b108828d8\") " pod="openshift-marketplace/certified-operators-ztszf" Nov 23 14:45:47 crc kubenswrapper[5050]: I1123 14:45:47.978247 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-tff2j"] Nov 23 14:45:47 crc kubenswrapper[5050]: I1123 14:45:47.979911 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tff2j" Nov 23 14:45:47 crc kubenswrapper[5050]: I1123 14:45:47.983136 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.002330 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tff2j"] Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.070552 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jw2xm\" (UniqueName: \"kubernetes.io/projected/e12fe3e5-acc0-402b-aa45-164b108828d8-kube-api-access-jw2xm\") pod \"certified-operators-ztszf\" (UID: \"e12fe3e5-acc0-402b-aa45-164b108828d8\") " pod="openshift-marketplace/certified-operators-ztszf" Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.070700 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e12fe3e5-acc0-402b-aa45-164b108828d8-utilities\") pod \"certified-operators-ztszf\" (UID: \"e12fe3e5-acc0-402b-aa45-164b108828d8\") " pod="openshift-marketplace/certified-operators-ztszf" Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.070774 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e12fe3e5-acc0-402b-aa45-164b108828d8-catalog-content\") pod \"certified-operators-ztszf\" (UID: \"e12fe3e5-acc0-402b-aa45-164b108828d8\") " pod="openshift-marketplace/certified-operators-ztszf" Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.072173 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e12fe3e5-acc0-402b-aa45-164b108828d8-utilities\") pod \"certified-operators-ztszf\" (UID: \"e12fe3e5-acc0-402b-aa45-164b108828d8\") " pod="openshift-marketplace/certified-operators-ztszf" Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.072626 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e12fe3e5-acc0-402b-aa45-164b108828d8-catalog-content\") pod \"certified-operators-ztszf\" (UID: \"e12fe3e5-acc0-402b-aa45-164b108828d8\") " pod="openshift-marketplace/certified-operators-ztszf" Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.096158 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jw2xm\" (UniqueName: \"kubernetes.io/projected/e12fe3e5-acc0-402b-aa45-164b108828d8-kube-api-access-jw2xm\") pod \"certified-operators-ztszf\" (UID: \"e12fe3e5-acc0-402b-aa45-164b108828d8\") " pod="openshift-marketplace/certified-operators-ztszf" Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.116012 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ztszf" Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.172958 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e8a1e80-a74e-4298-a915-aef2517690c7-catalog-content\") pod \"community-operators-tff2j\" (UID: \"5e8a1e80-a74e-4298-a915-aef2517690c7\") " pod="openshift-marketplace/community-operators-tff2j" Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.173047 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e8a1e80-a74e-4298-a915-aef2517690c7-utilities\") pod \"community-operators-tff2j\" (UID: \"5e8a1e80-a74e-4298-a915-aef2517690c7\") " pod="openshift-marketplace/community-operators-tff2j" Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.173149 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5tp7s\" (UniqueName: \"kubernetes.io/projected/5e8a1e80-a74e-4298-a915-aef2517690c7-kube-api-access-5tp7s\") pod \"community-operators-tff2j\" (UID: \"5e8a1e80-a74e-4298-a915-aef2517690c7\") " pod="openshift-marketplace/community-operators-tff2j" Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.274805 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e8a1e80-a74e-4298-a915-aef2517690c7-catalog-content\") pod \"community-operators-tff2j\" (UID: \"5e8a1e80-a74e-4298-a915-aef2517690c7\") " pod="openshift-marketplace/community-operators-tff2j" Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.275604 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e8a1e80-a74e-4298-a915-aef2517690c7-catalog-content\") pod \"community-operators-tff2j\" (UID: \"5e8a1e80-a74e-4298-a915-aef2517690c7\") " pod="openshift-marketplace/community-operators-tff2j" Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.275665 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e8a1e80-a74e-4298-a915-aef2517690c7-utilities\") pod \"community-operators-tff2j\" (UID: \"5e8a1e80-a74e-4298-a915-aef2517690c7\") " pod="openshift-marketplace/community-operators-tff2j" Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.275739 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5tp7s\" (UniqueName: \"kubernetes.io/projected/5e8a1e80-a74e-4298-a915-aef2517690c7-kube-api-access-5tp7s\") pod \"community-operators-tff2j\" (UID: \"5e8a1e80-a74e-4298-a915-aef2517690c7\") " pod="openshift-marketplace/community-operators-tff2j" Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.276224 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e8a1e80-a74e-4298-a915-aef2517690c7-utilities\") pod \"community-operators-tff2j\" (UID: \"5e8a1e80-a74e-4298-a915-aef2517690c7\") " pod="openshift-marketplace/community-operators-tff2j" Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.300024 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5tp7s\" (UniqueName: \"kubernetes.io/projected/5e8a1e80-a74e-4298-a915-aef2517690c7-kube-api-access-5tp7s\") pod \"community-operators-tff2j\" (UID: \"5e8a1e80-a74e-4298-a915-aef2517690c7\") " pod="openshift-marketplace/community-operators-tff2j" Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.327785 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tff2j" Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.345148 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ztszf"] Nov 23 14:45:48 crc kubenswrapper[5050]: W1123 14:45:48.352469 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode12fe3e5_acc0_402b_aa45_164b108828d8.slice/crio-657da5f9c2f3e2ae7de8be8eb7b7c8e30abf733b8763bec3139a440844941c57 WatchSource:0}: Error finding container 657da5f9c2f3e2ae7de8be8eb7b7c8e30abf733b8763bec3139a440844941c57: Status 404 returned error can't find the container with id 657da5f9c2f3e2ae7de8be8eb7b7c8e30abf733b8763bec3139a440844941c57 Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.579394 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tff2j"] Nov 23 14:45:48 crc kubenswrapper[5050]: W1123 14:45:48.612785 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5e8a1e80_a74e_4298_a915_aef2517690c7.slice/crio-4768f3c6d02bf23199b5fba0dde547e5876248be6b4cb371035876ab7bd1ab08 WatchSource:0}: Error finding container 4768f3c6d02bf23199b5fba0dde547e5876248be6b4cb371035876ab7bd1ab08: Status 404 returned error can't find the container with id 4768f3c6d02bf23199b5fba0dde547e5876248be6b4cb371035876ab7bd1ab08 Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.810111 5050 generic.go:334] "Generic (PLEG): container finished" podID="5e8a1e80-a74e-4298-a915-aef2517690c7" containerID="61cd20f618d509f50b9082f4b02231810e1770b1bd2608bff8a1acc07ace8285" exitCode=0 Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.810619 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tff2j" event={"ID":"5e8a1e80-a74e-4298-a915-aef2517690c7","Type":"ContainerDied","Data":"61cd20f618d509f50b9082f4b02231810e1770b1bd2608bff8a1acc07ace8285"} Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.810649 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tff2j" event={"ID":"5e8a1e80-a74e-4298-a915-aef2517690c7","Type":"ContainerStarted","Data":"4768f3c6d02bf23199b5fba0dde547e5876248be6b4cb371035876ab7bd1ab08"} Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.816364 5050 generic.go:334] "Generic (PLEG): container finished" podID="e12fe3e5-acc0-402b-aa45-164b108828d8" containerID="3d4ab232ebc2ea3765ff8fd78401255345706a777e14572bbf1ef844e6629638" exitCode=0 Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.816412 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ztszf" event={"ID":"e12fe3e5-acc0-402b-aa45-164b108828d8","Type":"ContainerDied","Data":"3d4ab232ebc2ea3765ff8fd78401255345706a777e14572bbf1ef844e6629638"} Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.816434 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ztszf" event={"ID":"e12fe3e5-acc0-402b-aa45-164b108828d8","Type":"ContainerStarted","Data":"657da5f9c2f3e2ae7de8be8eb7b7c8e30abf733b8763bec3139a440844941c57"} Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.819096 5050 generic.go:334] "Generic (PLEG): container finished" podID="8c7ace55-a1c9-441f-8150-fb262b63bf29" containerID="fbb826c7e8c9673c93c7514f65501986b4212564ea49ca2a37592e6b1bdf9b27" exitCode=0 Nov 23 14:45:48 crc kubenswrapper[5050]: I1123 14:45:48.819171 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xwz4w" event={"ID":"8c7ace55-a1c9-441f-8150-fb262b63bf29","Type":"ContainerDied","Data":"fbb826c7e8c9673c93c7514f65501986b4212564ea49ca2a37592e6b1bdf9b27"} Nov 23 14:45:49 crc kubenswrapper[5050]: I1123 14:45:49.842548 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xwz4w" event={"ID":"8c7ace55-a1c9-441f-8150-fb262b63bf29","Type":"ContainerStarted","Data":"a574363e78571841b7e3f56434cc5998fb9998d594b6a5cd8174da92b34bf41c"} Nov 23 14:45:49 crc kubenswrapper[5050]: I1123 14:45:49.848428 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tff2j" event={"ID":"5e8a1e80-a74e-4298-a915-aef2517690c7","Type":"ContainerStarted","Data":"ba843b86ace7189047a9f9adb07e14832601fbcfffe271cf33c1d539fb946725"} Nov 23 14:45:49 crc kubenswrapper[5050]: I1123 14:45:49.852493 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4jl4h" event={"ID":"6f8a262c-2ff5-45aa-bd0a-d1c948021ca8","Type":"ContainerStarted","Data":"3a27e8289bc247354d4a85bf381aae2f4e7429037e21932b3f396210cd98d6bb"} Nov 23 14:45:49 crc kubenswrapper[5050]: I1123 14:45:49.855277 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ztszf" event={"ID":"e12fe3e5-acc0-402b-aa45-164b108828d8","Type":"ContainerStarted","Data":"9b8a9d9ccbfc31d57018b197ac5bfe4451719f094aeef21c01ceabf999c5102a"} Nov 23 14:45:49 crc kubenswrapper[5050]: I1123 14:45:49.874439 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xwz4w" podStartSLOduration=2.44976746 podStartE2EDuration="4.874405736s" podCreationTimestamp="2025-11-23 14:45:45 +0000 UTC" firstStartedPulling="2025-11-23 14:45:46.765725556 +0000 UTC m=+241.932722041" lastFinishedPulling="2025-11-23 14:45:49.190363792 +0000 UTC m=+244.357360317" observedRunningTime="2025-11-23 14:45:49.874082907 +0000 UTC m=+245.041079402" watchObservedRunningTime="2025-11-23 14:45:49.874405736 +0000 UTC m=+245.041402251" Nov 23 14:45:49 crc kubenswrapper[5050]: I1123 14:45:49.908108 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4jl4h" podStartSLOduration=2.995341968 podStartE2EDuration="4.908082658s" podCreationTimestamp="2025-11-23 14:45:45 +0000 UTC" firstStartedPulling="2025-11-23 14:45:46.766367744 +0000 UTC m=+241.933364229" lastFinishedPulling="2025-11-23 14:45:48.679108434 +0000 UTC m=+243.846104919" observedRunningTime="2025-11-23 14:45:49.904907269 +0000 UTC m=+245.071903764" watchObservedRunningTime="2025-11-23 14:45:49.908082658 +0000 UTC m=+245.075079153" Nov 23 14:45:50 crc kubenswrapper[5050]: I1123 14:45:50.864260 5050 generic.go:334] "Generic (PLEG): container finished" podID="5e8a1e80-a74e-4298-a915-aef2517690c7" containerID="ba843b86ace7189047a9f9adb07e14832601fbcfffe271cf33c1d539fb946725" exitCode=0 Nov 23 14:45:50 crc kubenswrapper[5050]: I1123 14:45:50.864387 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tff2j" event={"ID":"5e8a1e80-a74e-4298-a915-aef2517690c7","Type":"ContainerDied","Data":"ba843b86ace7189047a9f9adb07e14832601fbcfffe271cf33c1d539fb946725"} Nov 23 14:45:50 crc kubenswrapper[5050]: I1123 14:45:50.867086 5050 generic.go:334] "Generic (PLEG): container finished" podID="e12fe3e5-acc0-402b-aa45-164b108828d8" containerID="9b8a9d9ccbfc31d57018b197ac5bfe4451719f094aeef21c01ceabf999c5102a" exitCode=0 Nov 23 14:45:50 crc kubenswrapper[5050]: I1123 14:45:50.867636 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ztszf" event={"ID":"e12fe3e5-acc0-402b-aa45-164b108828d8","Type":"ContainerDied","Data":"9b8a9d9ccbfc31d57018b197ac5bfe4451719f094aeef21c01ceabf999c5102a"} Nov 23 14:45:50 crc kubenswrapper[5050]: I1123 14:45:50.867705 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ztszf" event={"ID":"e12fe3e5-acc0-402b-aa45-164b108828d8","Type":"ContainerStarted","Data":"13bd0fab271e3234ae2ad3062178a0470d5247e8adffc137f0a7ea5ac769c6d9"} Nov 23 14:45:50 crc kubenswrapper[5050]: I1123 14:45:50.907861 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ztszf" podStartSLOduration=2.339602117 podStartE2EDuration="3.907835436s" podCreationTimestamp="2025-11-23 14:45:47 +0000 UTC" firstStartedPulling="2025-11-23 14:45:48.817851677 +0000 UTC m=+243.984848162" lastFinishedPulling="2025-11-23 14:45:50.386084986 +0000 UTC m=+245.553081481" observedRunningTime="2025-11-23 14:45:50.902878968 +0000 UTC m=+246.069875483" watchObservedRunningTime="2025-11-23 14:45:50.907835436 +0000 UTC m=+246.074831922" Nov 23 14:45:51 crc kubenswrapper[5050]: I1123 14:45:51.877783 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tff2j" event={"ID":"5e8a1e80-a74e-4298-a915-aef2517690c7","Type":"ContainerStarted","Data":"8f99b614fabad3f865fefae9fdefb783fbd8bb5d86ac9c05cd151f905858b9cf"} Nov 23 14:45:51 crc kubenswrapper[5050]: I1123 14:45:51.907901 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-tff2j" podStartSLOduration=2.452606931 podStartE2EDuration="4.907871144s" podCreationTimestamp="2025-11-23 14:45:47 +0000 UTC" firstStartedPulling="2025-11-23 14:45:48.811819728 +0000 UTC m=+243.978816213" lastFinishedPulling="2025-11-23 14:45:51.26708394 +0000 UTC m=+246.434080426" observedRunningTime="2025-11-23 14:45:51.904775427 +0000 UTC m=+247.071771912" watchObservedRunningTime="2025-11-23 14:45:51.907871144 +0000 UTC m=+247.074867619" Nov 23 14:45:55 crc kubenswrapper[5050]: I1123 14:45:55.719433 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4jl4h" Nov 23 14:45:55 crc kubenswrapper[5050]: I1123 14:45:55.720189 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4jl4h" Nov 23 14:45:55 crc kubenswrapper[5050]: I1123 14:45:55.803989 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4jl4h" Nov 23 14:45:55 crc kubenswrapper[5050]: I1123 14:45:55.925841 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xwz4w" Nov 23 14:45:55 crc kubenswrapper[5050]: I1123 14:45:55.926064 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xwz4w" Nov 23 14:45:55 crc kubenswrapper[5050]: I1123 14:45:55.950164 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4jl4h" Nov 23 14:45:55 crc kubenswrapper[5050]: I1123 14:45:55.966559 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xwz4w" Nov 23 14:45:56 crc kubenswrapper[5050]: I1123 14:45:56.972967 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xwz4w" Nov 23 14:45:58 crc kubenswrapper[5050]: I1123 14:45:58.116909 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ztszf" Nov 23 14:45:58 crc kubenswrapper[5050]: I1123 14:45:58.117010 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ztszf" Nov 23 14:45:58 crc kubenswrapper[5050]: I1123 14:45:58.164867 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ztszf" Nov 23 14:45:58 crc kubenswrapper[5050]: I1123 14:45:58.329033 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-tff2j" Nov 23 14:45:58 crc kubenswrapper[5050]: I1123 14:45:58.329094 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-tff2j" Nov 23 14:45:58 crc kubenswrapper[5050]: I1123 14:45:58.372240 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-tff2j" Nov 23 14:45:58 crc kubenswrapper[5050]: I1123 14:45:58.974557 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ztszf" Nov 23 14:45:58 crc kubenswrapper[5050]: I1123 14:45:58.975617 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-tff2j" Nov 23 14:47:29 crc kubenswrapper[5050]: I1123 14:47:29.224383 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 14:47:29 crc kubenswrapper[5050]: I1123 14:47:29.225566 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 14:47:59 crc kubenswrapper[5050]: I1123 14:47:59.225263 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 14:47:59 crc kubenswrapper[5050]: I1123 14:47:59.226177 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 14:48:29 crc kubenswrapper[5050]: I1123 14:48:29.225109 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 14:48:29 crc kubenswrapper[5050]: I1123 14:48:29.225806 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 14:48:29 crc kubenswrapper[5050]: I1123 14:48:29.225875 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 14:48:29 crc kubenswrapper[5050]: I1123 14:48:29.227011 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"86f3ccfe0d53f78ac9dca993751796d574dec20f49c1a0cf613c3239d4e75023"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 14:48:29 crc kubenswrapper[5050]: I1123 14:48:29.227087 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://86f3ccfe0d53f78ac9dca993751796d574dec20f49c1a0cf613c3239d4e75023" gracePeriod=600 Nov 23 14:48:30 crc kubenswrapper[5050]: I1123 14:48:30.158938 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="86f3ccfe0d53f78ac9dca993751796d574dec20f49c1a0cf613c3239d4e75023" exitCode=0 Nov 23 14:48:30 crc kubenswrapper[5050]: I1123 14:48:30.159019 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"86f3ccfe0d53f78ac9dca993751796d574dec20f49c1a0cf613c3239d4e75023"} Nov 23 14:48:30 crc kubenswrapper[5050]: I1123 14:48:30.159299 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"496733c812e2a0aa8f64801990fbfc3b73c223752a44cbf12486cae5f9ea9838"} Nov 23 14:48:30 crc kubenswrapper[5050]: I1123 14:48:30.159323 5050 scope.go:117] "RemoveContainer" containerID="b97978773311507f492064330c0ebbaf7862255a02347599c52ce6bbd13d8922" Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.348878 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-pf2tq"] Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.352919 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.374343 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-pf2tq"] Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.473308 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/07f52b6d-9146-46cc-91ed-4a6c99b09ca4-registry-certificates\") pod \"image-registry-66df7c8f76-pf2tq\" (UID: \"07f52b6d-9146-46cc-91ed-4a6c99b09ca4\") " pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.473393 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/07f52b6d-9146-46cc-91ed-4a6c99b09ca4-ca-trust-extracted\") pod \"image-registry-66df7c8f76-pf2tq\" (UID: \"07f52b6d-9146-46cc-91ed-4a6c99b09ca4\") " pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.473421 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/07f52b6d-9146-46cc-91ed-4a6c99b09ca4-bound-sa-token\") pod \"image-registry-66df7c8f76-pf2tq\" (UID: \"07f52b6d-9146-46cc-91ed-4a6c99b09ca4\") " pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.473496 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-pf2tq\" (UID: \"07f52b6d-9146-46cc-91ed-4a6c99b09ca4\") " pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.473542 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7p27t\" (UniqueName: \"kubernetes.io/projected/07f52b6d-9146-46cc-91ed-4a6c99b09ca4-kube-api-access-7p27t\") pod \"image-registry-66df7c8f76-pf2tq\" (UID: \"07f52b6d-9146-46cc-91ed-4a6c99b09ca4\") " pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.473625 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/07f52b6d-9146-46cc-91ed-4a6c99b09ca4-trusted-ca\") pod \"image-registry-66df7c8f76-pf2tq\" (UID: \"07f52b6d-9146-46cc-91ed-4a6c99b09ca4\") " pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.473666 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/07f52b6d-9146-46cc-91ed-4a6c99b09ca4-registry-tls\") pod \"image-registry-66df7c8f76-pf2tq\" (UID: \"07f52b6d-9146-46cc-91ed-4a6c99b09ca4\") " pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.473718 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/07f52b6d-9146-46cc-91ed-4a6c99b09ca4-installation-pull-secrets\") pod \"image-registry-66df7c8f76-pf2tq\" (UID: \"07f52b6d-9146-46cc-91ed-4a6c99b09ca4\") " pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.499854 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-pf2tq\" (UID: \"07f52b6d-9146-46cc-91ed-4a6c99b09ca4\") " pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.575299 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/07f52b6d-9146-46cc-91ed-4a6c99b09ca4-trusted-ca\") pod \"image-registry-66df7c8f76-pf2tq\" (UID: \"07f52b6d-9146-46cc-91ed-4a6c99b09ca4\") " pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.575835 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/07f52b6d-9146-46cc-91ed-4a6c99b09ca4-registry-tls\") pod \"image-registry-66df7c8f76-pf2tq\" (UID: \"07f52b6d-9146-46cc-91ed-4a6c99b09ca4\") " pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.576421 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/07f52b6d-9146-46cc-91ed-4a6c99b09ca4-installation-pull-secrets\") pod \"image-registry-66df7c8f76-pf2tq\" (UID: \"07f52b6d-9146-46cc-91ed-4a6c99b09ca4\") " pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.577466 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/07f52b6d-9146-46cc-91ed-4a6c99b09ca4-registry-certificates\") pod \"image-registry-66df7c8f76-pf2tq\" (UID: \"07f52b6d-9146-46cc-91ed-4a6c99b09ca4\") " pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.577640 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/07f52b6d-9146-46cc-91ed-4a6c99b09ca4-ca-trust-extracted\") pod \"image-registry-66df7c8f76-pf2tq\" (UID: \"07f52b6d-9146-46cc-91ed-4a6c99b09ca4\") " pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.577732 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/07f52b6d-9146-46cc-91ed-4a6c99b09ca4-bound-sa-token\") pod \"image-registry-66df7c8f76-pf2tq\" (UID: \"07f52b6d-9146-46cc-91ed-4a6c99b09ca4\") " pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.577829 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7p27t\" (UniqueName: \"kubernetes.io/projected/07f52b6d-9146-46cc-91ed-4a6c99b09ca4-kube-api-access-7p27t\") pod \"image-registry-66df7c8f76-pf2tq\" (UID: \"07f52b6d-9146-46cc-91ed-4a6c99b09ca4\") " pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.578267 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/07f52b6d-9146-46cc-91ed-4a6c99b09ca4-trusted-ca\") pod \"image-registry-66df7c8f76-pf2tq\" (UID: \"07f52b6d-9146-46cc-91ed-4a6c99b09ca4\") " pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.578561 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/07f52b6d-9146-46cc-91ed-4a6c99b09ca4-ca-trust-extracted\") pod \"image-registry-66df7c8f76-pf2tq\" (UID: \"07f52b6d-9146-46cc-91ed-4a6c99b09ca4\") " pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.580621 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/07f52b6d-9146-46cc-91ed-4a6c99b09ca4-registry-certificates\") pod \"image-registry-66df7c8f76-pf2tq\" (UID: \"07f52b6d-9146-46cc-91ed-4a6c99b09ca4\") " pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.586707 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/07f52b6d-9146-46cc-91ed-4a6c99b09ca4-registry-tls\") pod \"image-registry-66df7c8f76-pf2tq\" (UID: \"07f52b6d-9146-46cc-91ed-4a6c99b09ca4\") " pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.588031 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/07f52b6d-9146-46cc-91ed-4a6c99b09ca4-installation-pull-secrets\") pod \"image-registry-66df7c8f76-pf2tq\" (UID: \"07f52b6d-9146-46cc-91ed-4a6c99b09ca4\") " pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.596894 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7p27t\" (UniqueName: \"kubernetes.io/projected/07f52b6d-9146-46cc-91ed-4a6c99b09ca4-kube-api-access-7p27t\") pod \"image-registry-66df7c8f76-pf2tq\" (UID: \"07f52b6d-9146-46cc-91ed-4a6c99b09ca4\") " pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.611985 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/07f52b6d-9146-46cc-91ed-4a6c99b09ca4-bound-sa-token\") pod \"image-registry-66df7c8f76-pf2tq\" (UID: \"07f52b6d-9146-46cc-91ed-4a6c99b09ca4\") " pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.676083 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" Nov 23 14:49:23 crc kubenswrapper[5050]: I1123 14:49:23.990798 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-pf2tq"] Nov 23 14:49:24 crc kubenswrapper[5050]: I1123 14:49:24.586868 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" event={"ID":"07f52b6d-9146-46cc-91ed-4a6c99b09ca4","Type":"ContainerStarted","Data":"7329bde9fe63bfff403baeb05e38bbf22cf0e36fb04794b54a1046e3d96c2496"} Nov 23 14:49:24 crc kubenswrapper[5050]: I1123 14:49:24.587650 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" event={"ID":"07f52b6d-9146-46cc-91ed-4a6c99b09ca4","Type":"ContainerStarted","Data":"ae1f90fb19507f7c711c323b73ddff8eb5b3fab7008f923d8130b74691a6f0b4"} Nov 23 14:49:24 crc kubenswrapper[5050]: I1123 14:49:24.587742 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" Nov 23 14:49:24 crc kubenswrapper[5050]: I1123 14:49:24.617987 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" podStartSLOduration=1.6179471890000001 podStartE2EDuration="1.617947189s" podCreationTimestamp="2025-11-23 14:49:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:49:24.611215583 +0000 UTC m=+459.778212068" watchObservedRunningTime="2025-11-23 14:49:24.617947189 +0000 UTC m=+459.784943724" Nov 23 14:49:43 crc kubenswrapper[5050]: I1123 14:49:43.686175 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-pf2tq" Nov 23 14:49:43 crc kubenswrapper[5050]: I1123 14:49:43.773622 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-rw2cv"] Nov 23 14:50:08 crc kubenswrapper[5050]: I1123 14:50:08.833314 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" podUID="9c29734c-66bd-4ac3-b26a-b5349d786018" containerName="registry" containerID="cri-o://de687d78dbe8543db96ece8c3cb26fe2d4e2f9c7d538c59a18a7882f3cf241d1" gracePeriod=30 Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.310111 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.428796 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9c29734c-66bd-4ac3-b26a-b5349d786018-trusted-ca\") pod \"9c29734c-66bd-4ac3-b26a-b5349d786018\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.428939 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9c29734c-66bd-4ac3-b26a-b5349d786018-registry-tls\") pod \"9c29734c-66bd-4ac3-b26a-b5349d786018\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.429009 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dzqtd\" (UniqueName: \"kubernetes.io/projected/9c29734c-66bd-4ac3-b26a-b5349d786018-kube-api-access-dzqtd\") pod \"9c29734c-66bd-4ac3-b26a-b5349d786018\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.429053 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9c29734c-66bd-4ac3-b26a-b5349d786018-registry-certificates\") pod \"9c29734c-66bd-4ac3-b26a-b5349d786018\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.430297 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c29734c-66bd-4ac3-b26a-b5349d786018-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9c29734c-66bd-4ac3-b26a-b5349d786018" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.430332 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c29734c-66bd-4ac3-b26a-b5349d786018-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "9c29734c-66bd-4ac3-b26a-b5349d786018" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.431093 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9c29734c-66bd-4ac3-b26a-b5349d786018-bound-sa-token\") pod \"9c29734c-66bd-4ac3-b26a-b5349d786018\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.431253 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9c29734c-66bd-4ac3-b26a-b5349d786018-installation-pull-secrets\") pod \"9c29734c-66bd-4ac3-b26a-b5349d786018\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.432011 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9c29734c-66bd-4ac3-b26a-b5349d786018-ca-trust-extracted\") pod \"9c29734c-66bd-4ac3-b26a-b5349d786018\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.433303 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"9c29734c-66bd-4ac3-b26a-b5349d786018\" (UID: \"9c29734c-66bd-4ac3-b26a-b5349d786018\") " Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.437048 5050 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9c29734c-66bd-4ac3-b26a-b5349d786018-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.437178 5050 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9c29734c-66bd-4ac3-b26a-b5349d786018-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.444607 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c29734c-66bd-4ac3-b26a-b5349d786018-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "9c29734c-66bd-4ac3-b26a-b5349d786018" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.447223 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c29734c-66bd-4ac3-b26a-b5349d786018-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "9c29734c-66bd-4ac3-b26a-b5349d786018" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.447256 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c29734c-66bd-4ac3-b26a-b5349d786018-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "9c29734c-66bd-4ac3-b26a-b5349d786018" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.449785 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c29734c-66bd-4ac3-b26a-b5349d786018-kube-api-access-dzqtd" (OuterVolumeSpecName: "kube-api-access-dzqtd") pod "9c29734c-66bd-4ac3-b26a-b5349d786018" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018"). InnerVolumeSpecName "kube-api-access-dzqtd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.457856 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "9c29734c-66bd-4ac3-b26a-b5349d786018" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.469127 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c29734c-66bd-4ac3-b26a-b5349d786018-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "9c29734c-66bd-4ac3-b26a-b5349d786018" (UID: "9c29734c-66bd-4ac3-b26a-b5349d786018"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.539714 5050 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9c29734c-66bd-4ac3-b26a-b5349d786018-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.539783 5050 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9c29734c-66bd-4ac3-b26a-b5349d786018-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.539806 5050 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9c29734c-66bd-4ac3-b26a-b5349d786018-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.539824 5050 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9c29734c-66bd-4ac3-b26a-b5349d786018-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.539843 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dzqtd\" (UniqueName: \"kubernetes.io/projected/9c29734c-66bd-4ac3-b26a-b5349d786018-kube-api-access-dzqtd\") on node \"crc\" DevicePath \"\"" Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.932648 5050 generic.go:334] "Generic (PLEG): container finished" podID="9c29734c-66bd-4ac3-b26a-b5349d786018" containerID="de687d78dbe8543db96ece8c3cb26fe2d4e2f9c7d538c59a18a7882f3cf241d1" exitCode=0 Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.932742 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" event={"ID":"9c29734c-66bd-4ac3-b26a-b5349d786018","Type":"ContainerDied","Data":"de687d78dbe8543db96ece8c3cb26fe2d4e2f9c7d538c59a18a7882f3cf241d1"} Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.932749 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.932797 5050 scope.go:117] "RemoveContainer" containerID="de687d78dbe8543db96ece8c3cb26fe2d4e2f9c7d538c59a18a7882f3cf241d1" Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.932776 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-rw2cv" event={"ID":"9c29734c-66bd-4ac3-b26a-b5349d786018","Type":"ContainerDied","Data":"adde299ff832bf08bcc6ac95f9084f648dc3fbdae25bf1c14f90f0363cd6845e"} Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.960969 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-rw2cv"] Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.967344 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-rw2cv"] Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.969040 5050 scope.go:117] "RemoveContainer" containerID="de687d78dbe8543db96ece8c3cb26fe2d4e2f9c7d538c59a18a7882f3cf241d1" Nov 23 14:50:09 crc kubenswrapper[5050]: E1123 14:50:09.969867 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de687d78dbe8543db96ece8c3cb26fe2d4e2f9c7d538c59a18a7882f3cf241d1\": container with ID starting with de687d78dbe8543db96ece8c3cb26fe2d4e2f9c7d538c59a18a7882f3cf241d1 not found: ID does not exist" containerID="de687d78dbe8543db96ece8c3cb26fe2d4e2f9c7d538c59a18a7882f3cf241d1" Nov 23 14:50:09 crc kubenswrapper[5050]: I1123 14:50:09.969941 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de687d78dbe8543db96ece8c3cb26fe2d4e2f9c7d538c59a18a7882f3cf241d1"} err="failed to get container status \"de687d78dbe8543db96ece8c3cb26fe2d4e2f9c7d538c59a18a7882f3cf241d1\": rpc error: code = NotFound desc = could not find container \"de687d78dbe8543db96ece8c3cb26fe2d4e2f9c7d538c59a18a7882f3cf241d1\": container with ID starting with de687d78dbe8543db96ece8c3cb26fe2d4e2f9c7d538c59a18a7882f3cf241d1 not found: ID does not exist" Nov 23 14:50:11 crc kubenswrapper[5050]: I1123 14:50:11.562774 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c29734c-66bd-4ac3-b26a-b5349d786018" path="/var/lib/kubelet/pods/9c29734c-66bd-4ac3-b26a-b5349d786018/volumes" Nov 23 14:50:29 crc kubenswrapper[5050]: I1123 14:50:29.224640 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 14:50:29 crc kubenswrapper[5050]: I1123 14:50:29.225474 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 14:50:59 crc kubenswrapper[5050]: I1123 14:50:59.224856 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 14:50:59 crc kubenswrapper[5050]: I1123 14:50:59.225720 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 14:51:29 crc kubenswrapper[5050]: I1123 14:51:29.225223 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 14:51:29 crc kubenswrapper[5050]: I1123 14:51:29.226233 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 14:51:29 crc kubenswrapper[5050]: I1123 14:51:29.226333 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 14:51:29 crc kubenswrapper[5050]: I1123 14:51:29.227594 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"496733c812e2a0aa8f64801990fbfc3b73c223752a44cbf12486cae5f9ea9838"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 14:51:29 crc kubenswrapper[5050]: I1123 14:51:29.227727 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://496733c812e2a0aa8f64801990fbfc3b73c223752a44cbf12486cae5f9ea9838" gracePeriod=600 Nov 23 14:51:29 crc kubenswrapper[5050]: I1123 14:51:29.874847 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="496733c812e2a0aa8f64801990fbfc3b73c223752a44cbf12486cae5f9ea9838" exitCode=0 Nov 23 14:51:29 crc kubenswrapper[5050]: I1123 14:51:29.874928 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"496733c812e2a0aa8f64801990fbfc3b73c223752a44cbf12486cae5f9ea9838"} Nov 23 14:51:29 crc kubenswrapper[5050]: I1123 14:51:29.875548 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"f192bfe46cd44b6e123a66e929ac2df13733eb956bf55d1780f3aab83a1b4eec"} Nov 23 14:51:29 crc kubenswrapper[5050]: I1123 14:51:29.875588 5050 scope.go:117] "RemoveContainer" containerID="86f3ccfe0d53f78ac9dca993751796d574dec20f49c1a0cf613c3239d4e75023" Nov 23 14:52:23 crc kubenswrapper[5050]: I1123 14:52:23.956891 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-j8fzz"] Nov 23 14:52:23 crc kubenswrapper[5050]: I1123 14:52:23.958188 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="ovn-controller" containerID="cri-o://34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387" gracePeriod=30 Nov 23 14:52:23 crc kubenswrapper[5050]: I1123 14:52:23.958237 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="nbdb" containerID="cri-o://d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1" gracePeriod=30 Nov 23 14:52:23 crc kubenswrapper[5050]: I1123 14:52:23.958332 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="ovn-acl-logging" containerID="cri-o://af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6" gracePeriod=30 Nov 23 14:52:23 crc kubenswrapper[5050]: I1123 14:52:23.958310 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="kube-rbac-proxy-node" containerID="cri-o://2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e" gracePeriod=30 Nov 23 14:52:23 crc kubenswrapper[5050]: I1123 14:52:23.958369 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="northd" containerID="cri-o://cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb" gracePeriod=30 Nov 23 14:52:23 crc kubenswrapper[5050]: I1123 14:52:23.958312 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f" gracePeriod=30 Nov 23 14:52:23 crc kubenswrapper[5050]: I1123 14:52:23.958488 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="sbdb" containerID="cri-o://7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f" gracePeriod=30 Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.024141 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="ovnkube-controller" containerID="cri-o://64b99478c6a7e63e8f900194b4f35e1d77c9836c192f4a52a281dd036504cd5f" gracePeriod=30 Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.259207 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-qvjn2_abdac21e-18fc-460d-bd3b-73bed66b8ab9/kube-multus/2.log" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.260218 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-qvjn2_abdac21e-18fc-460d-bd3b-73bed66b8ab9/kube-multus/1.log" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.260274 5050 generic.go:334] "Generic (PLEG): container finished" podID="abdac21e-18fc-460d-bd3b-73bed66b8ab9" containerID="c623d3e60ee42251333c8132c3a910eddaf9de12b5abc799178b24d7174e6e5d" exitCode=2 Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.260341 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-qvjn2" event={"ID":"abdac21e-18fc-460d-bd3b-73bed66b8ab9","Type":"ContainerDied","Data":"c623d3e60ee42251333c8132c3a910eddaf9de12b5abc799178b24d7174e6e5d"} Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.260385 5050 scope.go:117] "RemoveContainer" containerID="db0c98dfdc0723630b1490357e96c25cfe466caf464ee1883509dc0bd2521337" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.261648 5050 scope.go:117] "RemoveContainer" containerID="c623d3e60ee42251333c8132c3a910eddaf9de12b5abc799178b24d7174e6e5d" Nov 23 14:52:24 crc kubenswrapper[5050]: E1123 14:52:24.262057 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-qvjn2_openshift-multus(abdac21e-18fc-460d-bd3b-73bed66b8ab9)\"" pod="openshift-multus/multus-qvjn2" podUID="abdac21e-18fc-460d-bd3b-73bed66b8ab9" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.265695 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-j8fzz_6573c043-542c-47ae-a2ba-f70b8baf60c2/ovnkube-controller/3.log" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.268551 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-j8fzz_6573c043-542c-47ae-a2ba-f70b8baf60c2/ovn-acl-logging/0.log" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.269209 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-j8fzz_6573c043-542c-47ae-a2ba-f70b8baf60c2/ovn-controller/0.log" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.269732 5050 generic.go:334] "Generic (PLEG): container finished" podID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerID="64b99478c6a7e63e8f900194b4f35e1d77c9836c192f4a52a281dd036504cd5f" exitCode=0 Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.269775 5050 generic.go:334] "Generic (PLEG): container finished" podID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerID="7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f" exitCode=0 Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.269789 5050 generic.go:334] "Generic (PLEG): container finished" podID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerID="d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1" exitCode=0 Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.269800 5050 generic.go:334] "Generic (PLEG): container finished" podID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerID="bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f" exitCode=0 Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.269811 5050 generic.go:334] "Generic (PLEG): container finished" podID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerID="2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e" exitCode=0 Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.269822 5050 generic.go:334] "Generic (PLEG): container finished" podID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerID="af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6" exitCode=143 Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.269811 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" event={"ID":"6573c043-542c-47ae-a2ba-f70b8baf60c2","Type":"ContainerDied","Data":"64b99478c6a7e63e8f900194b4f35e1d77c9836c192f4a52a281dd036504cd5f"} Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.269871 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" event={"ID":"6573c043-542c-47ae-a2ba-f70b8baf60c2","Type":"ContainerDied","Data":"7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f"} Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.269888 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" event={"ID":"6573c043-542c-47ae-a2ba-f70b8baf60c2","Type":"ContainerDied","Data":"d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1"} Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.269901 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" event={"ID":"6573c043-542c-47ae-a2ba-f70b8baf60c2","Type":"ContainerDied","Data":"bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f"} Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.269918 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" event={"ID":"6573c043-542c-47ae-a2ba-f70b8baf60c2","Type":"ContainerDied","Data":"2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e"} Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.269930 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" event={"ID":"6573c043-542c-47ae-a2ba-f70b8baf60c2","Type":"ContainerDied","Data":"af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6"} Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.269945 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" event={"ID":"6573c043-542c-47ae-a2ba-f70b8baf60c2","Type":"ContainerDied","Data":"34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387"} Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.269830 5050 generic.go:334] "Generic (PLEG): container finished" podID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerID="34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387" exitCode=143 Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.345790 5050 scope.go:117] "RemoveContainer" containerID="82a0b0931dae074fdea5a30d5f082d4040553acde383b476d50f275a40eaec82" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.673973 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-j8fzz_6573c043-542c-47ae-a2ba-f70b8baf60c2/ovn-acl-logging/0.log" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.675026 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-j8fzz_6573c043-542c-47ae-a2ba-f70b8baf60c2/ovn-controller/0.log" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.675867 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.742270 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5mtwf\" (UniqueName: \"kubernetes.io/projected/6573c043-542c-47ae-a2ba-f70b8baf60c2-kube-api-access-5mtwf\") pod \"6573c043-542c-47ae-a2ba-f70b8baf60c2\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.742328 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-etc-openvswitch\") pod \"6573c043-542c-47ae-a2ba-f70b8baf60c2\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.742394 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-run-netns\") pod \"6573c043-542c-47ae-a2ba-f70b8baf60c2\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.742416 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-log-socket\") pod \"6573c043-542c-47ae-a2ba-f70b8baf60c2\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.742471 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-node-log\") pod \"6573c043-542c-47ae-a2ba-f70b8baf60c2\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.742515 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-run-ovn\") pod \"6573c043-542c-47ae-a2ba-f70b8baf60c2\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.742538 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-systemd-units\") pod \"6573c043-542c-47ae-a2ba-f70b8baf60c2\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.742572 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-var-lib-cni-networks-ovn-kubernetes\") pod \"6573c043-542c-47ae-a2ba-f70b8baf60c2\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.742604 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6573c043-542c-47ae-a2ba-f70b8baf60c2-ovnkube-config\") pod \"6573c043-542c-47ae-a2ba-f70b8baf60c2\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.742578 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "6573c043-542c-47ae-a2ba-f70b8baf60c2" (UID: "6573c043-542c-47ae-a2ba-f70b8baf60c2"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.742653 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-var-lib-openvswitch\") pod \"6573c043-542c-47ae-a2ba-f70b8baf60c2\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.742683 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6573c043-542c-47ae-a2ba-f70b8baf60c2-ovn-node-metrics-cert\") pod \"6573c043-542c-47ae-a2ba-f70b8baf60c2\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.742702 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-run-systemd\") pod \"6573c043-542c-47ae-a2ba-f70b8baf60c2\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.742710 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "6573c043-542c-47ae-a2ba-f70b8baf60c2" (UID: "6573c043-542c-47ae-a2ba-f70b8baf60c2"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.742723 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-run-openvswitch\") pod \"6573c043-542c-47ae-a2ba-f70b8baf60c2\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.742779 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "6573c043-542c-47ae-a2ba-f70b8baf60c2" (UID: "6573c043-542c-47ae-a2ba-f70b8baf60c2"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.742825 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "6573c043-542c-47ae-a2ba-f70b8baf60c2" (UID: "6573c043-542c-47ae-a2ba-f70b8baf60c2"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.742829 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-run-ovn-kubernetes\") pod \"6573c043-542c-47ae-a2ba-f70b8baf60c2\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.742859 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "6573c043-542c-47ae-a2ba-f70b8baf60c2" (UID: "6573c043-542c-47ae-a2ba-f70b8baf60c2"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.742908 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6573c043-542c-47ae-a2ba-f70b8baf60c2-ovnkube-script-lib\") pod \"6573c043-542c-47ae-a2ba-f70b8baf60c2\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.742962 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6573c043-542c-47ae-a2ba-f70b8baf60c2-env-overrides\") pod \"6573c043-542c-47ae-a2ba-f70b8baf60c2\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.743033 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-cni-netd\") pod \"6573c043-542c-47ae-a2ba-f70b8baf60c2\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.743078 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-slash\") pod \"6573c043-542c-47ae-a2ba-f70b8baf60c2\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.743128 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-kubelet\") pod \"6573c043-542c-47ae-a2ba-f70b8baf60c2\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.743166 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-cni-bin\") pod \"6573c043-542c-47ae-a2ba-f70b8baf60c2\" (UID: \"6573c043-542c-47ae-a2ba-f70b8baf60c2\") " Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.743572 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6573c043-542c-47ae-a2ba-f70b8baf60c2-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6573c043-542c-47ae-a2ba-f70b8baf60c2" (UID: "6573c043-542c-47ae-a2ba-f70b8baf60c2"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.743610 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "6573c043-542c-47ae-a2ba-f70b8baf60c2" (UID: "6573c043-542c-47ae-a2ba-f70b8baf60c2"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.743942 5050 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.743976 5050 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.744001 5050 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.744033 5050 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.744063 5050 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6573c043-542c-47ae-a2ba-f70b8baf60c2-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.744086 5050 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.744109 5050 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.744175 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "6573c043-542c-47ae-a2ba-f70b8baf60c2" (UID: "6573c043-542c-47ae-a2ba-f70b8baf60c2"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.744231 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-log-socket" (OuterVolumeSpecName: "log-socket") pod "6573c043-542c-47ae-a2ba-f70b8baf60c2" (UID: "6573c043-542c-47ae-a2ba-f70b8baf60c2"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.744282 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-node-log" (OuterVolumeSpecName: "node-log") pod "6573c043-542c-47ae-a2ba-f70b8baf60c2" (UID: "6573c043-542c-47ae-a2ba-f70b8baf60c2"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.744329 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "6573c043-542c-47ae-a2ba-f70b8baf60c2" (UID: "6573c043-542c-47ae-a2ba-f70b8baf60c2"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.745187 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6573c043-542c-47ae-a2ba-f70b8baf60c2-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6573c043-542c-47ae-a2ba-f70b8baf60c2" (UID: "6573c043-542c-47ae-a2ba-f70b8baf60c2"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.745958 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6573c043-542c-47ae-a2ba-f70b8baf60c2-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6573c043-542c-47ae-a2ba-f70b8baf60c2" (UID: "6573c043-542c-47ae-a2ba-f70b8baf60c2"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.746028 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "6573c043-542c-47ae-a2ba-f70b8baf60c2" (UID: "6573c043-542c-47ae-a2ba-f70b8baf60c2"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.746082 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-slash" (OuterVolumeSpecName: "host-slash") pod "6573c043-542c-47ae-a2ba-f70b8baf60c2" (UID: "6573c043-542c-47ae-a2ba-f70b8baf60c2"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.746132 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "6573c043-542c-47ae-a2ba-f70b8baf60c2" (UID: "6573c043-542c-47ae-a2ba-f70b8baf60c2"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.746181 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "6573c043-542c-47ae-a2ba-f70b8baf60c2" (UID: "6573c043-542c-47ae-a2ba-f70b8baf60c2"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.753394 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6573c043-542c-47ae-a2ba-f70b8baf60c2-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6573c043-542c-47ae-a2ba-f70b8baf60c2" (UID: "6573c043-542c-47ae-a2ba-f70b8baf60c2"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.753733 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6573c043-542c-47ae-a2ba-f70b8baf60c2-kube-api-access-5mtwf" (OuterVolumeSpecName: "kube-api-access-5mtwf") pod "6573c043-542c-47ae-a2ba-f70b8baf60c2" (UID: "6573c043-542c-47ae-a2ba-f70b8baf60c2"). InnerVolumeSpecName "kube-api-access-5mtwf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.764079 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-pc5f5"] Nov 23 14:52:24 crc kubenswrapper[5050]: E1123 14:52:24.764511 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="ovnkube-controller" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.764542 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="ovnkube-controller" Nov 23 14:52:24 crc kubenswrapper[5050]: E1123 14:52:24.764561 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="kubecfg-setup" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.764578 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="kubecfg-setup" Nov 23 14:52:24 crc kubenswrapper[5050]: E1123 14:52:24.764598 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="ovn-controller" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.764614 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="ovn-controller" Nov 23 14:52:24 crc kubenswrapper[5050]: E1123 14:52:24.764633 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="kube-rbac-proxy-ovn-metrics" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.764645 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="kube-rbac-proxy-ovn-metrics" Nov 23 14:52:24 crc kubenswrapper[5050]: E1123 14:52:24.764664 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="ovnkube-controller" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.764676 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="ovnkube-controller" Nov 23 14:52:24 crc kubenswrapper[5050]: E1123 14:52:24.764688 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="northd" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.764700 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="northd" Nov 23 14:52:24 crc kubenswrapper[5050]: E1123 14:52:24.764712 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="sbdb" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.764724 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="sbdb" Nov 23 14:52:24 crc kubenswrapper[5050]: E1123 14:52:24.764749 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="ovnkube-controller" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.764761 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="ovnkube-controller" Nov 23 14:52:24 crc kubenswrapper[5050]: E1123 14:52:24.764779 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c29734c-66bd-4ac3-b26a-b5349d786018" containerName="registry" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.764790 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c29734c-66bd-4ac3-b26a-b5349d786018" containerName="registry" Nov 23 14:52:24 crc kubenswrapper[5050]: E1123 14:52:24.764804 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="nbdb" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.764815 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="nbdb" Nov 23 14:52:24 crc kubenswrapper[5050]: E1123 14:52:24.764829 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="kube-rbac-proxy-node" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.764842 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="kube-rbac-proxy-node" Nov 23 14:52:24 crc kubenswrapper[5050]: E1123 14:52:24.764858 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="ovn-acl-logging" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.764870 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="ovn-acl-logging" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.765030 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c29734c-66bd-4ac3-b26a-b5349d786018" containerName="registry" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.765053 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="ovnkube-controller" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.765067 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="kube-rbac-proxy-ovn-metrics" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.765085 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="ovnkube-controller" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.765099 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="northd" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.765113 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="nbdb" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.765132 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="ovnkube-controller" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.765147 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="ovn-acl-logging" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.765163 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="kube-rbac-proxy-node" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.765181 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="ovnkube-controller" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.765201 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="ovn-controller" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.765217 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="sbdb" Nov 23 14:52:24 crc kubenswrapper[5050]: E1123 14:52:24.765372 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="ovnkube-controller" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.765388 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="ovnkube-controller" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.766078 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="ovnkube-controller" Nov 23 14:52:24 crc kubenswrapper[5050]: E1123 14:52:24.766253 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="ovnkube-controller" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.766268 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerName="ovnkube-controller" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.768936 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.780312 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "6573c043-542c-47ae-a2ba-f70b8baf60c2" (UID: "6573c043-542c-47ae-a2ba-f70b8baf60c2"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.844959 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-run-systemd\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.845033 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/4d9dd1fe-6c22-4c55-8026-744f037fec8a-ovnkube-script-lib\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.845060 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4d9dd1fe-6c22-4c55-8026-744f037fec8a-ovnkube-config\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.845092 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-log-socket\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.845135 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4d9dd1fe-6c22-4c55-8026-744f037fec8a-env-overrides\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.845181 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-run-ovn\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.845204 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrpnw\" (UniqueName: \"kubernetes.io/projected/4d9dd1fe-6c22-4c55-8026-744f037fec8a-kube-api-access-nrpnw\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.845229 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-host-run-ovn-kubernetes\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.845252 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-host-slash\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.845500 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.845613 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-host-kubelet\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.845665 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-etc-openvswitch\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.845709 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-host-run-netns\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.845750 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-host-cni-netd\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.845797 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-run-openvswitch\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.845881 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-var-lib-openvswitch\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.845929 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4d9dd1fe-6c22-4c55-8026-744f037fec8a-ovn-node-metrics-cert\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.845972 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-node-log\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.846011 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-systemd-units\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.846152 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-host-cni-bin\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.846338 5050 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.846370 5050 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-log-socket\") on node \"crc\" DevicePath \"\"" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.846385 5050 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-node-log\") on node \"crc\" DevicePath \"\"" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.846402 5050 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6573c043-542c-47ae-a2ba-f70b8baf60c2-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.846416 5050 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.846428 5050 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.846457 5050 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6573c043-542c-47ae-a2ba-f70b8baf60c2-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.846471 5050 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6573c043-542c-47ae-a2ba-f70b8baf60c2-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.846484 5050 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.846496 5050 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-slash\") on node \"crc\" DevicePath \"\"" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.846508 5050 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.846520 5050 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6573c043-542c-47ae-a2ba-f70b8baf60c2-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.846533 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5mtwf\" (UniqueName: \"kubernetes.io/projected/6573c043-542c-47ae-a2ba-f70b8baf60c2-kube-api-access-5mtwf\") on node \"crc\" DevicePath \"\"" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.952545 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-run-ovn\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.952597 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrpnw\" (UniqueName: \"kubernetes.io/projected/4d9dd1fe-6c22-4c55-8026-744f037fec8a-kube-api-access-nrpnw\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.952616 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-host-slash\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.952636 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-host-run-ovn-kubernetes\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.952672 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.952699 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-host-kubelet\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.952718 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-etc-openvswitch\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.952733 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-host-run-netns\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.952750 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-host-cni-netd\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.952768 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-run-openvswitch\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.952787 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4d9dd1fe-6c22-4c55-8026-744f037fec8a-ovn-node-metrics-cert\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.952807 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-var-lib-openvswitch\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.952824 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-node-log\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.952838 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-systemd-units\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.952855 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-host-cni-bin\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.952878 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-run-systemd\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.952908 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/4d9dd1fe-6c22-4c55-8026-744f037fec8a-ovnkube-script-lib\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.952924 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4d9dd1fe-6c22-4c55-8026-744f037fec8a-ovnkube-config\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.952945 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-log-socket\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.952962 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4d9dd1fe-6c22-4c55-8026-744f037fec8a-env-overrides\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.953585 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4d9dd1fe-6c22-4c55-8026-744f037fec8a-env-overrides\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.953644 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-run-ovn\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.954256 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-var-lib-openvswitch\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.954298 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-node-log\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.954331 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-systemd-units\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.954362 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-host-cni-bin\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.954393 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-run-systemd\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.955376 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/4d9dd1fe-6c22-4c55-8026-744f037fec8a-ovnkube-script-lib\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.956806 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4d9dd1fe-6c22-4c55-8026-744f037fec8a-ovnkube-config\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.956881 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-log-socket\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.956954 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-host-kubelet\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.956990 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-host-slash\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.957047 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-host-run-ovn-kubernetes\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.957126 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.957161 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-host-run-netns\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.957182 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-etc-openvswitch\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.957205 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-host-cni-netd\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.957227 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4d9dd1fe-6c22-4c55-8026-744f037fec8a-run-openvswitch\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.959765 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4d9dd1fe-6c22-4c55-8026-744f037fec8a-ovn-node-metrics-cert\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:24 crc kubenswrapper[5050]: I1123 14:52:24.979419 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrpnw\" (UniqueName: \"kubernetes.io/projected/4d9dd1fe-6c22-4c55-8026-744f037fec8a-kube-api-access-nrpnw\") pod \"ovnkube-node-pc5f5\" (UID: \"4d9dd1fe-6c22-4c55-8026-744f037fec8a\") " pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.096371 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.278390 5050 generic.go:334] "Generic (PLEG): container finished" podID="4d9dd1fe-6c22-4c55-8026-744f037fec8a" containerID="4c1156252245522e4003fc0486fe16c1499170a8ccb959aeaccca713ebad4a63" exitCode=0 Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.278424 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" event={"ID":"4d9dd1fe-6c22-4c55-8026-744f037fec8a","Type":"ContainerDied","Data":"4c1156252245522e4003fc0486fe16c1499170a8ccb959aeaccca713ebad4a63"} Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.278988 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" event={"ID":"4d9dd1fe-6c22-4c55-8026-744f037fec8a","Type":"ContainerStarted","Data":"d4e4b34d6e41ae1ce405adb00c77dc130ecfd808c960cb7c4d6f8dc6ceb7120c"} Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.283353 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-qvjn2_abdac21e-18fc-460d-bd3b-73bed66b8ab9/kube-multus/2.log" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.293030 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-j8fzz_6573c043-542c-47ae-a2ba-f70b8baf60c2/ovn-acl-logging/0.log" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.294995 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-j8fzz_6573c043-542c-47ae-a2ba-f70b8baf60c2/ovn-controller/0.log" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.295830 5050 generic.go:334] "Generic (PLEG): container finished" podID="6573c043-542c-47ae-a2ba-f70b8baf60c2" containerID="cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb" exitCode=0 Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.295901 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" event={"ID":"6573c043-542c-47ae-a2ba-f70b8baf60c2","Type":"ContainerDied","Data":"cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb"} Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.295983 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" event={"ID":"6573c043-542c-47ae-a2ba-f70b8baf60c2","Type":"ContainerDied","Data":"9a3221887c76c30560f37a45f62e8988b6432c08dc2eedb0b050b5a6cbff0244"} Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.296011 5050 scope.go:117] "RemoveContainer" containerID="64b99478c6a7e63e8f900194b4f35e1d77c9836c192f4a52a281dd036504cd5f" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.296036 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-j8fzz" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.326890 5050 scope.go:117] "RemoveContainer" containerID="7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.360588 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-j8fzz"] Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.370437 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-j8fzz"] Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.380946 5050 scope.go:117] "RemoveContainer" containerID="d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.401459 5050 scope.go:117] "RemoveContainer" containerID="cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.417523 5050 scope.go:117] "RemoveContainer" containerID="bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.433104 5050 scope.go:117] "RemoveContainer" containerID="2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.446178 5050 scope.go:117] "RemoveContainer" containerID="af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.460706 5050 scope.go:117] "RemoveContainer" containerID="34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.475664 5050 scope.go:117] "RemoveContainer" containerID="6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.504334 5050 scope.go:117] "RemoveContainer" containerID="64b99478c6a7e63e8f900194b4f35e1d77c9836c192f4a52a281dd036504cd5f" Nov 23 14:52:25 crc kubenswrapper[5050]: E1123 14:52:25.505025 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"64b99478c6a7e63e8f900194b4f35e1d77c9836c192f4a52a281dd036504cd5f\": container with ID starting with 64b99478c6a7e63e8f900194b4f35e1d77c9836c192f4a52a281dd036504cd5f not found: ID does not exist" containerID="64b99478c6a7e63e8f900194b4f35e1d77c9836c192f4a52a281dd036504cd5f" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.505089 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64b99478c6a7e63e8f900194b4f35e1d77c9836c192f4a52a281dd036504cd5f"} err="failed to get container status \"64b99478c6a7e63e8f900194b4f35e1d77c9836c192f4a52a281dd036504cd5f\": rpc error: code = NotFound desc = could not find container \"64b99478c6a7e63e8f900194b4f35e1d77c9836c192f4a52a281dd036504cd5f\": container with ID starting with 64b99478c6a7e63e8f900194b4f35e1d77c9836c192f4a52a281dd036504cd5f not found: ID does not exist" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.505129 5050 scope.go:117] "RemoveContainer" containerID="7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f" Nov 23 14:52:25 crc kubenswrapper[5050]: E1123 14:52:25.505685 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\": container with ID starting with 7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f not found: ID does not exist" containerID="7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.505717 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f"} err="failed to get container status \"7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\": rpc error: code = NotFound desc = could not find container \"7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f\": container with ID starting with 7009f69906eef1a76bebe08606ec9a9b7e61ce6217caaf0b784d0943db4bd51f not found: ID does not exist" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.505733 5050 scope.go:117] "RemoveContainer" containerID="d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1" Nov 23 14:52:25 crc kubenswrapper[5050]: E1123 14:52:25.506461 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\": container with ID starting with d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1 not found: ID does not exist" containerID="d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.506545 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1"} err="failed to get container status \"d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\": rpc error: code = NotFound desc = could not find container \"d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1\": container with ID starting with d9de298aa47ed7038c4f4ed742abb7a13f112d7f417637eb11d31a1f1accf0b1 not found: ID does not exist" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.506599 5050 scope.go:117] "RemoveContainer" containerID="cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb" Nov 23 14:52:25 crc kubenswrapper[5050]: E1123 14:52:25.507103 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\": container with ID starting with cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb not found: ID does not exist" containerID="cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.507145 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb"} err="failed to get container status \"cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\": rpc error: code = NotFound desc = could not find container \"cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb\": container with ID starting with cfa445fb3677a09126d124eea1348466a338713c6728a7ef138d9ffa43bec9eb not found: ID does not exist" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.507191 5050 scope.go:117] "RemoveContainer" containerID="bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f" Nov 23 14:52:25 crc kubenswrapper[5050]: E1123 14:52:25.508198 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\": container with ID starting with bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f not found: ID does not exist" containerID="bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.508300 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f"} err="failed to get container status \"bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\": rpc error: code = NotFound desc = could not find container \"bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f\": container with ID starting with bd601391a5bb150716ee97b765dd442d4bf9929eb01dfd17fdce39f327b4698f not found: ID does not exist" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.508332 5050 scope.go:117] "RemoveContainer" containerID="2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e" Nov 23 14:52:25 crc kubenswrapper[5050]: E1123 14:52:25.509100 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\": container with ID starting with 2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e not found: ID does not exist" containerID="2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.509145 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e"} err="failed to get container status \"2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\": rpc error: code = NotFound desc = could not find container \"2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e\": container with ID starting with 2d06dfc9e93891495a387aa8a8e24b4029173b25c953dff4d2e80dbb30f4b03e not found: ID does not exist" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.509176 5050 scope.go:117] "RemoveContainer" containerID="af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6" Nov 23 14:52:25 crc kubenswrapper[5050]: E1123 14:52:25.509539 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\": container with ID starting with af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6 not found: ID does not exist" containerID="af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.509594 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6"} err="failed to get container status \"af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\": rpc error: code = NotFound desc = could not find container \"af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6\": container with ID starting with af3da6e6807f5d42cb8607efbabab0c5d472ddb5c02dd1c33f9f0f3ea8eba9c6 not found: ID does not exist" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.509623 5050 scope.go:117] "RemoveContainer" containerID="34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387" Nov 23 14:52:25 crc kubenswrapper[5050]: E1123 14:52:25.510119 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\": container with ID starting with 34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387 not found: ID does not exist" containerID="34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.510152 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387"} err="failed to get container status \"34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\": rpc error: code = NotFound desc = could not find container \"34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387\": container with ID starting with 34b2a56f4af5df28c458b7f7d4588f81564c734a117bbf4d08e1fccfa43fd387 not found: ID does not exist" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.510172 5050 scope.go:117] "RemoveContainer" containerID="6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc" Nov 23 14:52:25 crc kubenswrapper[5050]: E1123 14:52:25.510529 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\": container with ID starting with 6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc not found: ID does not exist" containerID="6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.510581 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc"} err="failed to get container status \"6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\": rpc error: code = NotFound desc = could not find container \"6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc\": container with ID starting with 6c68e73aa649285bedc9716826bdf5e41580614fba6de910e2dc1fa5bb524dcc not found: ID does not exist" Nov 23 14:52:25 crc kubenswrapper[5050]: I1123 14:52:25.560005 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6573c043-542c-47ae-a2ba-f70b8baf60c2" path="/var/lib/kubelet/pods/6573c043-542c-47ae-a2ba-f70b8baf60c2/volumes" Nov 23 14:52:26 crc kubenswrapper[5050]: I1123 14:52:26.312515 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" event={"ID":"4d9dd1fe-6c22-4c55-8026-744f037fec8a","Type":"ContainerStarted","Data":"711bde3de2c0c62e6d0699b30b55efbd93a78bc4feb12c49974e9910a26aa763"} Nov 23 14:52:26 crc kubenswrapper[5050]: I1123 14:52:26.312990 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" event={"ID":"4d9dd1fe-6c22-4c55-8026-744f037fec8a","Type":"ContainerStarted","Data":"89711aa6cce39798dfe14b12e9663698f6e7986cf61f28dde6d5db50ee3a2054"} Nov 23 14:52:26 crc kubenswrapper[5050]: I1123 14:52:26.313009 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" event={"ID":"4d9dd1fe-6c22-4c55-8026-744f037fec8a","Type":"ContainerStarted","Data":"c9a410d643eea6485091091a0782cb8ad1889eddcfd4b2dfa3e38fa82e7efbb1"} Nov 23 14:52:26 crc kubenswrapper[5050]: I1123 14:52:26.313021 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" event={"ID":"4d9dd1fe-6c22-4c55-8026-744f037fec8a","Type":"ContainerStarted","Data":"54c987b71f0aaad56f86d9e043319452127a5a54bdae332fa2b96c1a152e2153"} Nov 23 14:52:26 crc kubenswrapper[5050]: I1123 14:52:26.313033 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" event={"ID":"4d9dd1fe-6c22-4c55-8026-744f037fec8a","Type":"ContainerStarted","Data":"4ee66785799eb87d06a15e35c30303a382f9af55ef8067913ca6fdcacf949e9a"} Nov 23 14:52:26 crc kubenswrapper[5050]: I1123 14:52:26.313044 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" event={"ID":"4d9dd1fe-6c22-4c55-8026-744f037fec8a","Type":"ContainerStarted","Data":"06f011030c68149f66b85e6eab3fc333abf90a818999248cbfb0c4a45965c524"} Nov 23 14:52:29 crc kubenswrapper[5050]: I1123 14:52:29.342939 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" event={"ID":"4d9dd1fe-6c22-4c55-8026-744f037fec8a","Type":"ContainerStarted","Data":"a5e0067ef76cd78d0dc756a26dfe99f289544f8fa32f7e5adbc491d40fc4cefc"} Nov 23 14:52:31 crc kubenswrapper[5050]: I1123 14:52:31.360845 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" event={"ID":"4d9dd1fe-6c22-4c55-8026-744f037fec8a","Type":"ContainerStarted","Data":"12f0755e90e15f205c460b52e4520f120913899f858faeaf60e28a7ffaadfdf2"} Nov 23 14:52:31 crc kubenswrapper[5050]: I1123 14:52:31.361630 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:31 crc kubenswrapper[5050]: I1123 14:52:31.361729 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:31 crc kubenswrapper[5050]: I1123 14:52:31.361809 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:31 crc kubenswrapper[5050]: I1123 14:52:31.401126 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:31 crc kubenswrapper[5050]: I1123 14:52:31.404164 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" podStartSLOduration=7.404136867 podStartE2EDuration="7.404136867s" podCreationTimestamp="2025-11-23 14:52:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:52:31.400707359 +0000 UTC m=+646.567703884" watchObservedRunningTime="2025-11-23 14:52:31.404136867 +0000 UTC m=+646.571133382" Nov 23 14:52:31 crc kubenswrapper[5050]: I1123 14:52:31.410766 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:52:32 crc kubenswrapper[5050]: I1123 14:52:32.792623 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-cnd8p"] Nov 23 14:52:32 crc kubenswrapper[5050]: I1123 14:52:32.794617 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-cnd8p" Nov 23 14:52:32 crc kubenswrapper[5050]: I1123 14:52:32.797521 5050 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-c7dg6" Nov 23 14:52:32 crc kubenswrapper[5050]: I1123 14:52:32.797811 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Nov 23 14:52:32 crc kubenswrapper[5050]: I1123 14:52:32.798849 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Nov 23 14:52:32 crc kubenswrapper[5050]: I1123 14:52:32.799072 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Nov 23 14:52:32 crc kubenswrapper[5050]: I1123 14:52:32.801768 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-cnd8p"] Nov 23 14:52:32 crc kubenswrapper[5050]: I1123 14:52:32.879616 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/a413b4e9-0e2b-4026-a585-cc7931dc856d-crc-storage\") pod \"crc-storage-crc-cnd8p\" (UID: \"a413b4e9-0e2b-4026-a585-cc7931dc856d\") " pod="crc-storage/crc-storage-crc-cnd8p" Nov 23 14:52:32 crc kubenswrapper[5050]: I1123 14:52:32.879727 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/a413b4e9-0e2b-4026-a585-cc7931dc856d-node-mnt\") pod \"crc-storage-crc-cnd8p\" (UID: \"a413b4e9-0e2b-4026-a585-cc7931dc856d\") " pod="crc-storage/crc-storage-crc-cnd8p" Nov 23 14:52:32 crc kubenswrapper[5050]: I1123 14:52:32.879800 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgjp8\" (UniqueName: \"kubernetes.io/projected/a413b4e9-0e2b-4026-a585-cc7931dc856d-kube-api-access-cgjp8\") pod \"crc-storage-crc-cnd8p\" (UID: \"a413b4e9-0e2b-4026-a585-cc7931dc856d\") " pod="crc-storage/crc-storage-crc-cnd8p" Nov 23 14:52:32 crc kubenswrapper[5050]: I1123 14:52:32.980910 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/a413b4e9-0e2b-4026-a585-cc7931dc856d-crc-storage\") pod \"crc-storage-crc-cnd8p\" (UID: \"a413b4e9-0e2b-4026-a585-cc7931dc856d\") " pod="crc-storage/crc-storage-crc-cnd8p" Nov 23 14:52:32 crc kubenswrapper[5050]: I1123 14:52:32.980987 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/a413b4e9-0e2b-4026-a585-cc7931dc856d-node-mnt\") pod \"crc-storage-crc-cnd8p\" (UID: \"a413b4e9-0e2b-4026-a585-cc7931dc856d\") " pod="crc-storage/crc-storage-crc-cnd8p" Nov 23 14:52:32 crc kubenswrapper[5050]: I1123 14:52:32.981039 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgjp8\" (UniqueName: \"kubernetes.io/projected/a413b4e9-0e2b-4026-a585-cc7931dc856d-kube-api-access-cgjp8\") pod \"crc-storage-crc-cnd8p\" (UID: \"a413b4e9-0e2b-4026-a585-cc7931dc856d\") " pod="crc-storage/crc-storage-crc-cnd8p" Nov 23 14:52:32 crc kubenswrapper[5050]: I1123 14:52:32.981558 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/a413b4e9-0e2b-4026-a585-cc7931dc856d-node-mnt\") pod \"crc-storage-crc-cnd8p\" (UID: \"a413b4e9-0e2b-4026-a585-cc7931dc856d\") " pod="crc-storage/crc-storage-crc-cnd8p" Nov 23 14:52:32 crc kubenswrapper[5050]: I1123 14:52:32.982603 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/a413b4e9-0e2b-4026-a585-cc7931dc856d-crc-storage\") pod \"crc-storage-crc-cnd8p\" (UID: \"a413b4e9-0e2b-4026-a585-cc7931dc856d\") " pod="crc-storage/crc-storage-crc-cnd8p" Nov 23 14:52:33 crc kubenswrapper[5050]: I1123 14:52:33.009542 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgjp8\" (UniqueName: \"kubernetes.io/projected/a413b4e9-0e2b-4026-a585-cc7931dc856d-kube-api-access-cgjp8\") pod \"crc-storage-crc-cnd8p\" (UID: \"a413b4e9-0e2b-4026-a585-cc7931dc856d\") " pod="crc-storage/crc-storage-crc-cnd8p" Nov 23 14:52:33 crc kubenswrapper[5050]: I1123 14:52:33.110592 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-cnd8p" Nov 23 14:52:33 crc kubenswrapper[5050]: E1123 14:52:33.149809 5050 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-cnd8p_crc-storage_a413b4e9-0e2b-4026-a585-cc7931dc856d_0(056f853d2765391ba41cf46f1fb7a89c4c8cce997ffcb9090dd61c3b5ae8791e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 23 14:52:33 crc kubenswrapper[5050]: E1123 14:52:33.149980 5050 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-cnd8p_crc-storage_a413b4e9-0e2b-4026-a585-cc7931dc856d_0(056f853d2765391ba41cf46f1fb7a89c4c8cce997ffcb9090dd61c3b5ae8791e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-cnd8p" Nov 23 14:52:33 crc kubenswrapper[5050]: E1123 14:52:33.150013 5050 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-cnd8p_crc-storage_a413b4e9-0e2b-4026-a585-cc7931dc856d_0(056f853d2765391ba41cf46f1fb7a89c4c8cce997ffcb9090dd61c3b5ae8791e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-cnd8p" Nov 23 14:52:33 crc kubenswrapper[5050]: E1123 14:52:33.150073 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-cnd8p_crc-storage(a413b4e9-0e2b-4026-a585-cc7931dc856d)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-cnd8p_crc-storage(a413b4e9-0e2b-4026-a585-cc7931dc856d)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-cnd8p_crc-storage_a413b4e9-0e2b-4026-a585-cc7931dc856d_0(056f853d2765391ba41cf46f1fb7a89c4c8cce997ffcb9090dd61c3b5ae8791e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-cnd8p" podUID="a413b4e9-0e2b-4026-a585-cc7931dc856d" Nov 23 14:52:33 crc kubenswrapper[5050]: I1123 14:52:33.373436 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-cnd8p" Nov 23 14:52:33 crc kubenswrapper[5050]: I1123 14:52:33.374121 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-cnd8p" Nov 23 14:52:33 crc kubenswrapper[5050]: E1123 14:52:33.405940 5050 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-cnd8p_crc-storage_a413b4e9-0e2b-4026-a585-cc7931dc856d_0(bf9b4b8060bbbb0cf171d19a228da920ee5db0f68958826a46f7714c1a66b1e2): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 23 14:52:33 crc kubenswrapper[5050]: E1123 14:52:33.406031 5050 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-cnd8p_crc-storage_a413b4e9-0e2b-4026-a585-cc7931dc856d_0(bf9b4b8060bbbb0cf171d19a228da920ee5db0f68958826a46f7714c1a66b1e2): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-cnd8p" Nov 23 14:52:33 crc kubenswrapper[5050]: E1123 14:52:33.406062 5050 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-cnd8p_crc-storage_a413b4e9-0e2b-4026-a585-cc7931dc856d_0(bf9b4b8060bbbb0cf171d19a228da920ee5db0f68958826a46f7714c1a66b1e2): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-cnd8p" Nov 23 14:52:33 crc kubenswrapper[5050]: E1123 14:52:33.406141 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-cnd8p_crc-storage(a413b4e9-0e2b-4026-a585-cc7931dc856d)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-cnd8p_crc-storage(a413b4e9-0e2b-4026-a585-cc7931dc856d)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-cnd8p_crc-storage_a413b4e9-0e2b-4026-a585-cc7931dc856d_0(bf9b4b8060bbbb0cf171d19a228da920ee5db0f68958826a46f7714c1a66b1e2): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-cnd8p" podUID="a413b4e9-0e2b-4026-a585-cc7931dc856d" Nov 23 14:52:37 crc kubenswrapper[5050]: I1123 14:52:37.549689 5050 scope.go:117] "RemoveContainer" containerID="c623d3e60ee42251333c8132c3a910eddaf9de12b5abc799178b24d7174e6e5d" Nov 23 14:52:37 crc kubenswrapper[5050]: E1123 14:52:37.550744 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-qvjn2_openshift-multus(abdac21e-18fc-460d-bd3b-73bed66b8ab9)\"" pod="openshift-multus/multus-qvjn2" podUID="abdac21e-18fc-460d-bd3b-73bed66b8ab9" Nov 23 14:52:45 crc kubenswrapper[5050]: I1123 14:52:45.552573 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-cnd8p" Nov 23 14:52:45 crc kubenswrapper[5050]: I1123 14:52:45.554025 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-cnd8p" Nov 23 14:52:45 crc kubenswrapper[5050]: E1123 14:52:45.594747 5050 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-cnd8p_crc-storage_a413b4e9-0e2b-4026-a585-cc7931dc856d_0(f32470bf20e198a814b5f2d3e0427b359a520daeeda59d5f42a9ebe99483e8a4): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 23 14:52:45 crc kubenswrapper[5050]: E1123 14:52:45.594853 5050 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-cnd8p_crc-storage_a413b4e9-0e2b-4026-a585-cc7931dc856d_0(f32470bf20e198a814b5f2d3e0427b359a520daeeda59d5f42a9ebe99483e8a4): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-cnd8p" Nov 23 14:52:45 crc kubenswrapper[5050]: E1123 14:52:45.594892 5050 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-cnd8p_crc-storage_a413b4e9-0e2b-4026-a585-cc7931dc856d_0(f32470bf20e198a814b5f2d3e0427b359a520daeeda59d5f42a9ebe99483e8a4): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-cnd8p" Nov 23 14:52:45 crc kubenswrapper[5050]: E1123 14:52:45.594965 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-cnd8p_crc-storage(a413b4e9-0e2b-4026-a585-cc7931dc856d)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-cnd8p_crc-storage(a413b4e9-0e2b-4026-a585-cc7931dc856d)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-cnd8p_crc-storage_a413b4e9-0e2b-4026-a585-cc7931dc856d_0(f32470bf20e198a814b5f2d3e0427b359a520daeeda59d5f42a9ebe99483e8a4): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-cnd8p" podUID="a413b4e9-0e2b-4026-a585-cc7931dc856d" Nov 23 14:52:49 crc kubenswrapper[5050]: I1123 14:52:49.548782 5050 scope.go:117] "RemoveContainer" containerID="c623d3e60ee42251333c8132c3a910eddaf9de12b5abc799178b24d7174e6e5d" Nov 23 14:52:50 crc kubenswrapper[5050]: I1123 14:52:50.507805 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-qvjn2_abdac21e-18fc-460d-bd3b-73bed66b8ab9/kube-multus/2.log" Nov 23 14:52:50 crc kubenswrapper[5050]: I1123 14:52:50.508210 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-qvjn2" event={"ID":"abdac21e-18fc-460d-bd3b-73bed66b8ab9","Type":"ContainerStarted","Data":"b115940c6446d4d8d3338bb10e1b5f0f360e7f74d42b86e8ec9e4c75548e83a5"} Nov 23 14:52:55 crc kubenswrapper[5050]: I1123 14:52:55.124358 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-pc5f5" Nov 23 14:53:01 crc kubenswrapper[5050]: I1123 14:53:01.547912 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-cnd8p" Nov 23 14:53:01 crc kubenswrapper[5050]: I1123 14:53:01.548581 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-cnd8p" Nov 23 14:53:01 crc kubenswrapper[5050]: I1123 14:53:01.758174 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-cnd8p"] Nov 23 14:53:01 crc kubenswrapper[5050]: I1123 14:53:01.768078 5050 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 23 14:53:02 crc kubenswrapper[5050]: I1123 14:53:02.587813 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-cnd8p" event={"ID":"a413b4e9-0e2b-4026-a585-cc7931dc856d","Type":"ContainerStarted","Data":"91a02d9f04593332d4e144ec608ff312a39ce0df613a175e54016f7368f22ab3"} Nov 23 14:53:03 crc kubenswrapper[5050]: I1123 14:53:03.595961 5050 generic.go:334] "Generic (PLEG): container finished" podID="a413b4e9-0e2b-4026-a585-cc7931dc856d" containerID="7d37866eed4c545e2928239320928a33397fe13f24921be0726982094684866e" exitCode=0 Nov 23 14:53:03 crc kubenswrapper[5050]: I1123 14:53:03.596065 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-cnd8p" event={"ID":"a413b4e9-0e2b-4026-a585-cc7931dc856d","Type":"ContainerDied","Data":"7d37866eed4c545e2928239320928a33397fe13f24921be0726982094684866e"} Nov 23 14:53:04 crc kubenswrapper[5050]: I1123 14:53:04.967989 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-cnd8p" Nov 23 14:53:05 crc kubenswrapper[5050]: I1123 14:53:05.094184 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/a413b4e9-0e2b-4026-a585-cc7931dc856d-node-mnt\") pod \"a413b4e9-0e2b-4026-a585-cc7931dc856d\" (UID: \"a413b4e9-0e2b-4026-a585-cc7931dc856d\") " Nov 23 14:53:05 crc kubenswrapper[5050]: I1123 14:53:05.094339 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cgjp8\" (UniqueName: \"kubernetes.io/projected/a413b4e9-0e2b-4026-a585-cc7931dc856d-kube-api-access-cgjp8\") pod \"a413b4e9-0e2b-4026-a585-cc7931dc856d\" (UID: \"a413b4e9-0e2b-4026-a585-cc7931dc856d\") " Nov 23 14:53:05 crc kubenswrapper[5050]: I1123 14:53:05.094392 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a413b4e9-0e2b-4026-a585-cc7931dc856d-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "a413b4e9-0e2b-4026-a585-cc7931dc856d" (UID: "a413b4e9-0e2b-4026-a585-cc7931dc856d"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 14:53:05 crc kubenswrapper[5050]: I1123 14:53:05.094541 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/a413b4e9-0e2b-4026-a585-cc7931dc856d-crc-storage\") pod \"a413b4e9-0e2b-4026-a585-cc7931dc856d\" (UID: \"a413b4e9-0e2b-4026-a585-cc7931dc856d\") " Nov 23 14:53:05 crc kubenswrapper[5050]: I1123 14:53:05.095405 5050 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/a413b4e9-0e2b-4026-a585-cc7931dc856d-node-mnt\") on node \"crc\" DevicePath \"\"" Nov 23 14:53:05 crc kubenswrapper[5050]: I1123 14:53:05.103850 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a413b4e9-0e2b-4026-a585-cc7931dc856d-kube-api-access-cgjp8" (OuterVolumeSpecName: "kube-api-access-cgjp8") pod "a413b4e9-0e2b-4026-a585-cc7931dc856d" (UID: "a413b4e9-0e2b-4026-a585-cc7931dc856d"). InnerVolumeSpecName "kube-api-access-cgjp8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:53:05 crc kubenswrapper[5050]: I1123 14:53:05.113429 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a413b4e9-0e2b-4026-a585-cc7931dc856d-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "a413b4e9-0e2b-4026-a585-cc7931dc856d" (UID: "a413b4e9-0e2b-4026-a585-cc7931dc856d"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:53:05 crc kubenswrapper[5050]: I1123 14:53:05.196775 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cgjp8\" (UniqueName: \"kubernetes.io/projected/a413b4e9-0e2b-4026-a585-cc7931dc856d-kube-api-access-cgjp8\") on node \"crc\" DevicePath \"\"" Nov 23 14:53:05 crc kubenswrapper[5050]: I1123 14:53:05.196857 5050 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/a413b4e9-0e2b-4026-a585-cc7931dc856d-crc-storage\") on node \"crc\" DevicePath \"\"" Nov 23 14:53:05 crc kubenswrapper[5050]: I1123 14:53:05.611298 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-cnd8p" event={"ID":"a413b4e9-0e2b-4026-a585-cc7931dc856d","Type":"ContainerDied","Data":"91a02d9f04593332d4e144ec608ff312a39ce0df613a175e54016f7368f22ab3"} Nov 23 14:53:05 crc kubenswrapper[5050]: I1123 14:53:05.611608 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="91a02d9f04593332d4e144ec608ff312a39ce0df613a175e54016f7368f22ab3" Nov 23 14:53:05 crc kubenswrapper[5050]: I1123 14:53:05.611397 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-cnd8p" Nov 23 14:53:14 crc kubenswrapper[5050]: I1123 14:53:14.115249 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg"] Nov 23 14:53:14 crc kubenswrapper[5050]: E1123 14:53:14.116200 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a413b4e9-0e2b-4026-a585-cc7931dc856d" containerName="storage" Nov 23 14:53:14 crc kubenswrapper[5050]: I1123 14:53:14.116217 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="a413b4e9-0e2b-4026-a585-cc7931dc856d" containerName="storage" Nov 23 14:53:14 crc kubenswrapper[5050]: I1123 14:53:14.116328 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="a413b4e9-0e2b-4026-a585-cc7931dc856d" containerName="storage" Nov 23 14:53:14 crc kubenswrapper[5050]: I1123 14:53:14.117187 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg" Nov 23 14:53:14 crc kubenswrapper[5050]: I1123 14:53:14.122092 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 23 14:53:14 crc kubenswrapper[5050]: I1123 14:53:14.129084 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg"] Nov 23 14:53:14 crc kubenswrapper[5050]: I1123 14:53:14.132067 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ca5beca1-ae2a-475c-bb17-062dce3850d6-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg\" (UID: \"ca5beca1-ae2a-475c-bb17-062dce3850d6\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg" Nov 23 14:53:14 crc kubenswrapper[5050]: I1123 14:53:14.132115 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btc6q\" (UniqueName: \"kubernetes.io/projected/ca5beca1-ae2a-475c-bb17-062dce3850d6-kube-api-access-btc6q\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg\" (UID: \"ca5beca1-ae2a-475c-bb17-062dce3850d6\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg" Nov 23 14:53:14 crc kubenswrapper[5050]: I1123 14:53:14.132194 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ca5beca1-ae2a-475c-bb17-062dce3850d6-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg\" (UID: \"ca5beca1-ae2a-475c-bb17-062dce3850d6\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg" Nov 23 14:53:14 crc kubenswrapper[5050]: I1123 14:53:14.233764 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ca5beca1-ae2a-475c-bb17-062dce3850d6-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg\" (UID: \"ca5beca1-ae2a-475c-bb17-062dce3850d6\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg" Nov 23 14:53:14 crc kubenswrapper[5050]: I1123 14:53:14.233840 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ca5beca1-ae2a-475c-bb17-062dce3850d6-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg\" (UID: \"ca5beca1-ae2a-475c-bb17-062dce3850d6\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg" Nov 23 14:53:14 crc kubenswrapper[5050]: I1123 14:53:14.233871 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btc6q\" (UniqueName: \"kubernetes.io/projected/ca5beca1-ae2a-475c-bb17-062dce3850d6-kube-api-access-btc6q\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg\" (UID: \"ca5beca1-ae2a-475c-bb17-062dce3850d6\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg" Nov 23 14:53:14 crc kubenswrapper[5050]: I1123 14:53:14.234327 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ca5beca1-ae2a-475c-bb17-062dce3850d6-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg\" (UID: \"ca5beca1-ae2a-475c-bb17-062dce3850d6\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg" Nov 23 14:53:14 crc kubenswrapper[5050]: I1123 14:53:14.234593 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ca5beca1-ae2a-475c-bb17-062dce3850d6-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg\" (UID: \"ca5beca1-ae2a-475c-bb17-062dce3850d6\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg" Nov 23 14:53:14 crc kubenswrapper[5050]: I1123 14:53:14.252466 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btc6q\" (UniqueName: \"kubernetes.io/projected/ca5beca1-ae2a-475c-bb17-062dce3850d6-kube-api-access-btc6q\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg\" (UID: \"ca5beca1-ae2a-475c-bb17-062dce3850d6\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg" Nov 23 14:53:14 crc kubenswrapper[5050]: I1123 14:53:14.436384 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg" Nov 23 14:53:14 crc kubenswrapper[5050]: I1123 14:53:14.658268 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg"] Nov 23 14:53:15 crc kubenswrapper[5050]: I1123 14:53:15.677485 5050 generic.go:334] "Generic (PLEG): container finished" podID="ca5beca1-ae2a-475c-bb17-062dce3850d6" containerID="ea3f4be5516b1b4df7a03376b2034c4c57a2433d13c51427ba750d14ea03a481" exitCode=0 Nov 23 14:53:15 crc kubenswrapper[5050]: I1123 14:53:15.677611 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg" event={"ID":"ca5beca1-ae2a-475c-bb17-062dce3850d6","Type":"ContainerDied","Data":"ea3f4be5516b1b4df7a03376b2034c4c57a2433d13c51427ba750d14ea03a481"} Nov 23 14:53:15 crc kubenswrapper[5050]: I1123 14:53:15.677959 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg" event={"ID":"ca5beca1-ae2a-475c-bb17-062dce3850d6","Type":"ContainerStarted","Data":"b98274bc04b403301dcc341c584e2713cf897921bb44d9af610d75f7ee7c865a"} Nov 23 14:53:17 crc kubenswrapper[5050]: I1123 14:53:17.694319 5050 generic.go:334] "Generic (PLEG): container finished" podID="ca5beca1-ae2a-475c-bb17-062dce3850d6" containerID="397e27384780204584836c342010f7e754d85a008a21f88ed1f043a92531c64d" exitCode=0 Nov 23 14:53:17 crc kubenswrapper[5050]: I1123 14:53:17.694417 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg" event={"ID":"ca5beca1-ae2a-475c-bb17-062dce3850d6","Type":"ContainerDied","Data":"397e27384780204584836c342010f7e754d85a008a21f88ed1f043a92531c64d"} Nov 23 14:53:18 crc kubenswrapper[5050]: I1123 14:53:18.707667 5050 generic.go:334] "Generic (PLEG): container finished" podID="ca5beca1-ae2a-475c-bb17-062dce3850d6" containerID="2fe898b5c46261248a4fdec5a6e8d864131858c4183e3e78f540d0ae8be66f52" exitCode=0 Nov 23 14:53:18 crc kubenswrapper[5050]: I1123 14:53:18.707766 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg" event={"ID":"ca5beca1-ae2a-475c-bb17-062dce3850d6","Type":"ContainerDied","Data":"2fe898b5c46261248a4fdec5a6e8d864131858c4183e3e78f540d0ae8be66f52"} Nov 23 14:53:20 crc kubenswrapper[5050]: I1123 14:53:20.044488 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg" Nov 23 14:53:20 crc kubenswrapper[5050]: I1123 14:53:20.220303 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ca5beca1-ae2a-475c-bb17-062dce3850d6-util\") pod \"ca5beca1-ae2a-475c-bb17-062dce3850d6\" (UID: \"ca5beca1-ae2a-475c-bb17-062dce3850d6\") " Nov 23 14:53:20 crc kubenswrapper[5050]: I1123 14:53:20.220810 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-btc6q\" (UniqueName: \"kubernetes.io/projected/ca5beca1-ae2a-475c-bb17-062dce3850d6-kube-api-access-btc6q\") pod \"ca5beca1-ae2a-475c-bb17-062dce3850d6\" (UID: \"ca5beca1-ae2a-475c-bb17-062dce3850d6\") " Nov 23 14:53:20 crc kubenswrapper[5050]: I1123 14:53:20.221003 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ca5beca1-ae2a-475c-bb17-062dce3850d6-bundle\") pod \"ca5beca1-ae2a-475c-bb17-062dce3850d6\" (UID: \"ca5beca1-ae2a-475c-bb17-062dce3850d6\") " Nov 23 14:53:20 crc kubenswrapper[5050]: I1123 14:53:20.221897 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca5beca1-ae2a-475c-bb17-062dce3850d6-bundle" (OuterVolumeSpecName: "bundle") pod "ca5beca1-ae2a-475c-bb17-062dce3850d6" (UID: "ca5beca1-ae2a-475c-bb17-062dce3850d6"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:53:20 crc kubenswrapper[5050]: I1123 14:53:20.230232 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca5beca1-ae2a-475c-bb17-062dce3850d6-kube-api-access-btc6q" (OuterVolumeSpecName: "kube-api-access-btc6q") pod "ca5beca1-ae2a-475c-bb17-062dce3850d6" (UID: "ca5beca1-ae2a-475c-bb17-062dce3850d6"). InnerVolumeSpecName "kube-api-access-btc6q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:53:20 crc kubenswrapper[5050]: I1123 14:53:20.323132 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-btc6q\" (UniqueName: \"kubernetes.io/projected/ca5beca1-ae2a-475c-bb17-062dce3850d6-kube-api-access-btc6q\") on node \"crc\" DevicePath \"\"" Nov 23 14:53:20 crc kubenswrapper[5050]: I1123 14:53:20.323190 5050 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ca5beca1-ae2a-475c-bb17-062dce3850d6-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 14:53:20 crc kubenswrapper[5050]: I1123 14:53:20.520733 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca5beca1-ae2a-475c-bb17-062dce3850d6-util" (OuterVolumeSpecName: "util") pod "ca5beca1-ae2a-475c-bb17-062dce3850d6" (UID: "ca5beca1-ae2a-475c-bb17-062dce3850d6"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:53:20 crc kubenswrapper[5050]: I1123 14:53:20.527105 5050 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ca5beca1-ae2a-475c-bb17-062dce3850d6-util\") on node \"crc\" DevicePath \"\"" Nov 23 14:53:20 crc kubenswrapper[5050]: I1123 14:53:20.726734 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg" event={"ID":"ca5beca1-ae2a-475c-bb17-062dce3850d6","Type":"ContainerDied","Data":"b98274bc04b403301dcc341c584e2713cf897921bb44d9af610d75f7ee7c865a"} Nov 23 14:53:20 crc kubenswrapper[5050]: I1123 14:53:20.726828 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b98274bc04b403301dcc341c584e2713cf897921bb44d9af610d75f7ee7c865a" Nov 23 14:53:20 crc kubenswrapper[5050]: I1123 14:53:20.726842 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg" Nov 23 14:53:25 crc kubenswrapper[5050]: I1123 14:53:25.908303 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-6lxqw"] Nov 23 14:53:25 crc kubenswrapper[5050]: E1123 14:53:25.911060 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca5beca1-ae2a-475c-bb17-062dce3850d6" containerName="util" Nov 23 14:53:25 crc kubenswrapper[5050]: I1123 14:53:25.911185 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca5beca1-ae2a-475c-bb17-062dce3850d6" containerName="util" Nov 23 14:53:25 crc kubenswrapper[5050]: E1123 14:53:25.911259 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca5beca1-ae2a-475c-bb17-062dce3850d6" containerName="extract" Nov 23 14:53:25 crc kubenswrapper[5050]: I1123 14:53:25.911366 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca5beca1-ae2a-475c-bb17-062dce3850d6" containerName="extract" Nov 23 14:53:25 crc kubenswrapper[5050]: E1123 14:53:25.911475 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca5beca1-ae2a-475c-bb17-062dce3850d6" containerName="pull" Nov 23 14:53:25 crc kubenswrapper[5050]: I1123 14:53:25.911551 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca5beca1-ae2a-475c-bb17-062dce3850d6" containerName="pull" Nov 23 14:53:25 crc kubenswrapper[5050]: I1123 14:53:25.911826 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca5beca1-ae2a-475c-bb17-062dce3850d6" containerName="extract" Nov 23 14:53:25 crc kubenswrapper[5050]: I1123 14:53:25.912743 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-6lxqw" Nov 23 14:53:25 crc kubenswrapper[5050]: I1123 14:53:25.915418 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-nnzht" Nov 23 14:53:25 crc kubenswrapper[5050]: I1123 14:53:25.915714 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 23 14:53:25 crc kubenswrapper[5050]: I1123 14:53:25.916506 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 23 14:53:25 crc kubenswrapper[5050]: I1123 14:53:25.926283 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-6lxqw"] Nov 23 14:53:26 crc kubenswrapper[5050]: I1123 14:53:26.006970 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ccwpz\" (UniqueName: \"kubernetes.io/projected/aea782b8-e7a4-4d41-b608-44bc576aeebc-kube-api-access-ccwpz\") pod \"nmstate-operator-557fdffb88-6lxqw\" (UID: \"aea782b8-e7a4-4d41-b608-44bc576aeebc\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-6lxqw" Nov 23 14:53:26 crc kubenswrapper[5050]: I1123 14:53:26.110296 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ccwpz\" (UniqueName: \"kubernetes.io/projected/aea782b8-e7a4-4d41-b608-44bc576aeebc-kube-api-access-ccwpz\") pod \"nmstate-operator-557fdffb88-6lxqw\" (UID: \"aea782b8-e7a4-4d41-b608-44bc576aeebc\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-6lxqw" Nov 23 14:53:26 crc kubenswrapper[5050]: I1123 14:53:26.132433 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ccwpz\" (UniqueName: \"kubernetes.io/projected/aea782b8-e7a4-4d41-b608-44bc576aeebc-kube-api-access-ccwpz\") pod \"nmstate-operator-557fdffb88-6lxqw\" (UID: \"aea782b8-e7a4-4d41-b608-44bc576aeebc\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-6lxqw" Nov 23 14:53:26 crc kubenswrapper[5050]: I1123 14:53:26.242967 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-6lxqw" Nov 23 14:53:26 crc kubenswrapper[5050]: I1123 14:53:26.486296 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-6lxqw"] Nov 23 14:53:26 crc kubenswrapper[5050]: I1123 14:53:26.768032 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-6lxqw" event={"ID":"aea782b8-e7a4-4d41-b608-44bc576aeebc","Type":"ContainerStarted","Data":"fe511dad9d5f68a54ff9fb6c72c3ec72de72f8f62cc782050393f3295a1c9e33"} Nov 23 14:53:28 crc kubenswrapper[5050]: I1123 14:53:28.785921 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-6lxqw" event={"ID":"aea782b8-e7a4-4d41-b608-44bc576aeebc","Type":"ContainerStarted","Data":"81633dd30c4235a441004fb50e13cd1d173ddb1624eef61e65c966ad83a0e6e0"} Nov 23 14:53:28 crc kubenswrapper[5050]: I1123 14:53:28.807821 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-557fdffb88-6lxqw" podStartSLOduration=1.891448432 podStartE2EDuration="3.807797968s" podCreationTimestamp="2025-11-23 14:53:25 +0000 UTC" firstStartedPulling="2025-11-23 14:53:26.502858591 +0000 UTC m=+701.669855096" lastFinishedPulling="2025-11-23 14:53:28.419208147 +0000 UTC m=+703.586204632" observedRunningTime="2025-11-23 14:53:28.804146647 +0000 UTC m=+703.971143172" watchObservedRunningTime="2025-11-23 14:53:28.807797968 +0000 UTC m=+703.974794453" Nov 23 14:53:29 crc kubenswrapper[5050]: I1123 14:53:29.224538 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 14:53:29 crc kubenswrapper[5050]: I1123 14:53:29.224660 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 14:53:34 crc kubenswrapper[5050]: I1123 14:53:34.766463 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-7vq97"] Nov 23 14:53:34 crc kubenswrapper[5050]: I1123 14:53:34.767715 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-7vq97" Nov 23 14:53:34 crc kubenswrapper[5050]: I1123 14:53:34.776032 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-8m46k" Nov 23 14:53:34 crc kubenswrapper[5050]: I1123 14:53:34.791565 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-nmb6b"] Nov 23 14:53:34 crc kubenswrapper[5050]: I1123 14:53:34.792351 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-nmb6b" Nov 23 14:53:34 crc kubenswrapper[5050]: I1123 14:53:34.794562 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 23 14:53:34 crc kubenswrapper[5050]: I1123 14:53:34.802919 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-7vq97"] Nov 23 14:53:34 crc kubenswrapper[5050]: I1123 14:53:34.813553 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-wkrpm"] Nov 23 14:53:34 crc kubenswrapper[5050]: I1123 14:53:34.814313 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-wkrpm" Nov 23 14:53:34 crc kubenswrapper[5050]: I1123 14:53:34.839061 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-nmb6b"] Nov 23 14:53:34 crc kubenswrapper[5050]: I1123 14:53:34.869782 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/50d94b05-cf38-4f9e-a9fd-6b93aac7646b-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-nmb6b\" (UID: \"50d94b05-cf38-4f9e-a9fd-6b93aac7646b\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-nmb6b" Nov 23 14:53:34 crc kubenswrapper[5050]: I1123 14:53:34.869922 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jpnws\" (UniqueName: \"kubernetes.io/projected/c5b4cff3-6a04-4b06-a466-53b9c96b12bf-kube-api-access-jpnws\") pod \"nmstate-metrics-5dcf9c57c5-7vq97\" (UID: \"c5b4cff3-6a04-4b06-a466-53b9c96b12bf\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-7vq97" Nov 23 14:53:34 crc kubenswrapper[5050]: I1123 14:53:34.869954 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6sgjq\" (UniqueName: \"kubernetes.io/projected/50d94b05-cf38-4f9e-a9fd-6b93aac7646b-kube-api-access-6sgjq\") pod \"nmstate-webhook-6b89b748d8-nmb6b\" (UID: \"50d94b05-cf38-4f9e-a9fd-6b93aac7646b\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-nmb6b" Nov 23 14:53:34 crc kubenswrapper[5050]: I1123 14:53:34.944423 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-87552"] Nov 23 14:53:34 crc kubenswrapper[5050]: I1123 14:53:34.967127 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-87552" Nov 23 14:53:34 crc kubenswrapper[5050]: I1123 14:53:34.970677 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-fv66m" Nov 23 14:53:34 crc kubenswrapper[5050]: I1123 14:53:34.970911 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 23 14:53:34 crc kubenswrapper[5050]: I1123 14:53:34.971097 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 23 14:53:34 crc kubenswrapper[5050]: I1123 14:53:34.973252 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-87552"] Nov 23 14:53:34 crc kubenswrapper[5050]: I1123 14:53:34.974051 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-74xj4\" (UniqueName: \"kubernetes.io/projected/e1705bf0-65ab-45ea-a7f0-b89f69d5622d-kube-api-access-74xj4\") pod \"nmstate-handler-wkrpm\" (UID: \"e1705bf0-65ab-45ea-a7f0-b89f69d5622d\") " pod="openshift-nmstate/nmstate-handler-wkrpm" Nov 23 14:53:34 crc kubenswrapper[5050]: I1123 14:53:34.974184 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jpnws\" (UniqueName: \"kubernetes.io/projected/c5b4cff3-6a04-4b06-a466-53b9c96b12bf-kube-api-access-jpnws\") pod \"nmstate-metrics-5dcf9c57c5-7vq97\" (UID: \"c5b4cff3-6a04-4b06-a466-53b9c96b12bf\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-7vq97" Nov 23 14:53:34 crc kubenswrapper[5050]: I1123 14:53:34.974249 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6sgjq\" (UniqueName: \"kubernetes.io/projected/50d94b05-cf38-4f9e-a9fd-6b93aac7646b-kube-api-access-6sgjq\") pod \"nmstate-webhook-6b89b748d8-nmb6b\" (UID: \"50d94b05-cf38-4f9e-a9fd-6b93aac7646b\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-nmb6b" Nov 23 14:53:34 crc kubenswrapper[5050]: I1123 14:53:34.974288 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/50d94b05-cf38-4f9e-a9fd-6b93aac7646b-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-nmb6b\" (UID: \"50d94b05-cf38-4f9e-a9fd-6b93aac7646b\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-nmb6b" Nov 23 14:53:34 crc kubenswrapper[5050]: I1123 14:53:34.974354 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/e1705bf0-65ab-45ea-a7f0-b89f69d5622d-ovs-socket\") pod \"nmstate-handler-wkrpm\" (UID: \"e1705bf0-65ab-45ea-a7f0-b89f69d5622d\") " pod="openshift-nmstate/nmstate-handler-wkrpm" Nov 23 14:53:34 crc kubenswrapper[5050]: I1123 14:53:34.974417 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/e1705bf0-65ab-45ea-a7f0-b89f69d5622d-dbus-socket\") pod \"nmstate-handler-wkrpm\" (UID: \"e1705bf0-65ab-45ea-a7f0-b89f69d5622d\") " pod="openshift-nmstate/nmstate-handler-wkrpm" Nov 23 14:53:34 crc kubenswrapper[5050]: I1123 14:53:34.974480 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/e1705bf0-65ab-45ea-a7f0-b89f69d5622d-nmstate-lock\") pod \"nmstate-handler-wkrpm\" (UID: \"e1705bf0-65ab-45ea-a7f0-b89f69d5622d\") " pod="openshift-nmstate/nmstate-handler-wkrpm" Nov 23 14:53:34 crc kubenswrapper[5050]: E1123 14:53:34.974546 5050 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Nov 23 14:53:34 crc kubenswrapper[5050]: E1123 14:53:34.974623 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/50d94b05-cf38-4f9e-a9fd-6b93aac7646b-tls-key-pair podName:50d94b05-cf38-4f9e-a9fd-6b93aac7646b nodeName:}" failed. No retries permitted until 2025-11-23 14:53:35.474604926 +0000 UTC m=+710.641601411 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/50d94b05-cf38-4f9e-a9fd-6b93aac7646b-tls-key-pair") pod "nmstate-webhook-6b89b748d8-nmb6b" (UID: "50d94b05-cf38-4f9e-a9fd-6b93aac7646b") : secret "openshift-nmstate-webhook" not found Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.016516 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jpnws\" (UniqueName: \"kubernetes.io/projected/c5b4cff3-6a04-4b06-a466-53b9c96b12bf-kube-api-access-jpnws\") pod \"nmstate-metrics-5dcf9c57c5-7vq97\" (UID: \"c5b4cff3-6a04-4b06-a466-53b9c96b12bf\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-7vq97" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.017399 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6sgjq\" (UniqueName: \"kubernetes.io/projected/50d94b05-cf38-4f9e-a9fd-6b93aac7646b-kube-api-access-6sgjq\") pod \"nmstate-webhook-6b89b748d8-nmb6b\" (UID: \"50d94b05-cf38-4f9e-a9fd-6b93aac7646b\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-nmb6b" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.076025 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/e1705bf0-65ab-45ea-a7f0-b89f69d5622d-dbus-socket\") pod \"nmstate-handler-wkrpm\" (UID: \"e1705bf0-65ab-45ea-a7f0-b89f69d5622d\") " pod="openshift-nmstate/nmstate-handler-wkrpm" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.076096 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/e1705bf0-65ab-45ea-a7f0-b89f69d5622d-nmstate-lock\") pod \"nmstate-handler-wkrpm\" (UID: \"e1705bf0-65ab-45ea-a7f0-b89f69d5622d\") " pod="openshift-nmstate/nmstate-handler-wkrpm" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.076191 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/e1705bf0-65ab-45ea-a7f0-b89f69d5622d-nmstate-lock\") pod \"nmstate-handler-wkrpm\" (UID: \"e1705bf0-65ab-45ea-a7f0-b89f69d5622d\") " pod="openshift-nmstate/nmstate-handler-wkrpm" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.076855 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/e1705bf0-65ab-45ea-a7f0-b89f69d5622d-dbus-socket\") pod \"nmstate-handler-wkrpm\" (UID: \"e1705bf0-65ab-45ea-a7f0-b89f69d5622d\") " pod="openshift-nmstate/nmstate-handler-wkrpm" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.076885 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/0cd5928a-1a84-425a-a57d-e03c10630d1a-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-87552\" (UID: \"0cd5928a-1a84-425a-a57d-e03c10630d1a\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-87552" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.076939 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkzj5\" (UniqueName: \"kubernetes.io/projected/0cd5928a-1a84-425a-a57d-e03c10630d1a-kube-api-access-gkzj5\") pod \"nmstate-console-plugin-5874bd7bc5-87552\" (UID: \"0cd5928a-1a84-425a-a57d-e03c10630d1a\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-87552" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.076974 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-74xj4\" (UniqueName: \"kubernetes.io/projected/e1705bf0-65ab-45ea-a7f0-b89f69d5622d-kube-api-access-74xj4\") pod \"nmstate-handler-wkrpm\" (UID: \"e1705bf0-65ab-45ea-a7f0-b89f69d5622d\") " pod="openshift-nmstate/nmstate-handler-wkrpm" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.077030 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/0cd5928a-1a84-425a-a57d-e03c10630d1a-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-87552\" (UID: \"0cd5928a-1a84-425a-a57d-e03c10630d1a\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-87552" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.077101 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/e1705bf0-65ab-45ea-a7f0-b89f69d5622d-ovs-socket\") pod \"nmstate-handler-wkrpm\" (UID: \"e1705bf0-65ab-45ea-a7f0-b89f69d5622d\") " pod="openshift-nmstate/nmstate-handler-wkrpm" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.077189 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/e1705bf0-65ab-45ea-a7f0-b89f69d5622d-ovs-socket\") pod \"nmstate-handler-wkrpm\" (UID: \"e1705bf0-65ab-45ea-a7f0-b89f69d5622d\") " pod="openshift-nmstate/nmstate-handler-wkrpm" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.099848 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-7vq97" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.113151 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-74xj4\" (UniqueName: \"kubernetes.io/projected/e1705bf0-65ab-45ea-a7f0-b89f69d5622d-kube-api-access-74xj4\") pod \"nmstate-handler-wkrpm\" (UID: \"e1705bf0-65ab-45ea-a7f0-b89f69d5622d\") " pod="openshift-nmstate/nmstate-handler-wkrpm" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.133389 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-wkrpm" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.149553 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-7d46cdf4c7-2ppjq"] Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.150637 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7d46cdf4c7-2ppjq" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.168185 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7d46cdf4c7-2ppjq"] Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.178810 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkzj5\" (UniqueName: \"kubernetes.io/projected/0cd5928a-1a84-425a-a57d-e03c10630d1a-kube-api-access-gkzj5\") pod \"nmstate-console-plugin-5874bd7bc5-87552\" (UID: \"0cd5928a-1a84-425a-a57d-e03c10630d1a\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-87552" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.178898 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/0cd5928a-1a84-425a-a57d-e03c10630d1a-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-87552\" (UID: \"0cd5928a-1a84-425a-a57d-e03c10630d1a\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-87552" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.178989 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/0cd5928a-1a84-425a-a57d-e03c10630d1a-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-87552\" (UID: \"0cd5928a-1a84-425a-a57d-e03c10630d1a\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-87552" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.181009 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/0cd5928a-1a84-425a-a57d-e03c10630d1a-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-87552\" (UID: \"0cd5928a-1a84-425a-a57d-e03c10630d1a\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-87552" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.187999 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/0cd5928a-1a84-425a-a57d-e03c10630d1a-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-87552\" (UID: \"0cd5928a-1a84-425a-a57d-e03c10630d1a\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-87552" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.204546 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkzj5\" (UniqueName: \"kubernetes.io/projected/0cd5928a-1a84-425a-a57d-e03c10630d1a-kube-api-access-gkzj5\") pod \"nmstate-console-plugin-5874bd7bc5-87552\" (UID: \"0cd5928a-1a84-425a-a57d-e03c10630d1a\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-87552" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.284382 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c1b0761a-6d21-4806-b3c7-215ba8528ee3-oauth-serving-cert\") pod \"console-7d46cdf4c7-2ppjq\" (UID: \"c1b0761a-6d21-4806-b3c7-215ba8528ee3\") " pod="openshift-console/console-7d46cdf4c7-2ppjq" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.285111 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tcx7b\" (UniqueName: \"kubernetes.io/projected/c1b0761a-6d21-4806-b3c7-215ba8528ee3-kube-api-access-tcx7b\") pod \"console-7d46cdf4c7-2ppjq\" (UID: \"c1b0761a-6d21-4806-b3c7-215ba8528ee3\") " pod="openshift-console/console-7d46cdf4c7-2ppjq" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.285164 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c1b0761a-6d21-4806-b3c7-215ba8528ee3-console-serving-cert\") pod \"console-7d46cdf4c7-2ppjq\" (UID: \"c1b0761a-6d21-4806-b3c7-215ba8528ee3\") " pod="openshift-console/console-7d46cdf4c7-2ppjq" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.285226 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c1b0761a-6d21-4806-b3c7-215ba8528ee3-console-oauth-config\") pod \"console-7d46cdf4c7-2ppjq\" (UID: \"c1b0761a-6d21-4806-b3c7-215ba8528ee3\") " pod="openshift-console/console-7d46cdf4c7-2ppjq" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.285246 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c1b0761a-6d21-4806-b3c7-215ba8528ee3-trusted-ca-bundle\") pod \"console-7d46cdf4c7-2ppjq\" (UID: \"c1b0761a-6d21-4806-b3c7-215ba8528ee3\") " pod="openshift-console/console-7d46cdf4c7-2ppjq" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.285269 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c1b0761a-6d21-4806-b3c7-215ba8528ee3-console-config\") pod \"console-7d46cdf4c7-2ppjq\" (UID: \"c1b0761a-6d21-4806-b3c7-215ba8528ee3\") " pod="openshift-console/console-7d46cdf4c7-2ppjq" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.285295 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c1b0761a-6d21-4806-b3c7-215ba8528ee3-service-ca\") pod \"console-7d46cdf4c7-2ppjq\" (UID: \"c1b0761a-6d21-4806-b3c7-215ba8528ee3\") " pod="openshift-console/console-7d46cdf4c7-2ppjq" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.288208 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-87552" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.349999 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-7vq97"] Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.387750 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c1b0761a-6d21-4806-b3c7-215ba8528ee3-oauth-serving-cert\") pod \"console-7d46cdf4c7-2ppjq\" (UID: \"c1b0761a-6d21-4806-b3c7-215ba8528ee3\") " pod="openshift-console/console-7d46cdf4c7-2ppjq" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.387853 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tcx7b\" (UniqueName: \"kubernetes.io/projected/c1b0761a-6d21-4806-b3c7-215ba8528ee3-kube-api-access-tcx7b\") pod \"console-7d46cdf4c7-2ppjq\" (UID: \"c1b0761a-6d21-4806-b3c7-215ba8528ee3\") " pod="openshift-console/console-7d46cdf4c7-2ppjq" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.387923 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c1b0761a-6d21-4806-b3c7-215ba8528ee3-console-serving-cert\") pod \"console-7d46cdf4c7-2ppjq\" (UID: \"c1b0761a-6d21-4806-b3c7-215ba8528ee3\") " pod="openshift-console/console-7d46cdf4c7-2ppjq" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.388011 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c1b0761a-6d21-4806-b3c7-215ba8528ee3-console-oauth-config\") pod \"console-7d46cdf4c7-2ppjq\" (UID: \"c1b0761a-6d21-4806-b3c7-215ba8528ee3\") " pod="openshift-console/console-7d46cdf4c7-2ppjq" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.388045 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c1b0761a-6d21-4806-b3c7-215ba8528ee3-trusted-ca-bundle\") pod \"console-7d46cdf4c7-2ppjq\" (UID: \"c1b0761a-6d21-4806-b3c7-215ba8528ee3\") " pod="openshift-console/console-7d46cdf4c7-2ppjq" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.388070 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c1b0761a-6d21-4806-b3c7-215ba8528ee3-console-config\") pod \"console-7d46cdf4c7-2ppjq\" (UID: \"c1b0761a-6d21-4806-b3c7-215ba8528ee3\") " pod="openshift-console/console-7d46cdf4c7-2ppjq" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.388099 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c1b0761a-6d21-4806-b3c7-215ba8528ee3-service-ca\") pod \"console-7d46cdf4c7-2ppjq\" (UID: \"c1b0761a-6d21-4806-b3c7-215ba8528ee3\") " pod="openshift-console/console-7d46cdf4c7-2ppjq" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.388866 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c1b0761a-6d21-4806-b3c7-215ba8528ee3-oauth-serving-cert\") pod \"console-7d46cdf4c7-2ppjq\" (UID: \"c1b0761a-6d21-4806-b3c7-215ba8528ee3\") " pod="openshift-console/console-7d46cdf4c7-2ppjq" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.390135 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c1b0761a-6d21-4806-b3c7-215ba8528ee3-service-ca\") pod \"console-7d46cdf4c7-2ppjq\" (UID: \"c1b0761a-6d21-4806-b3c7-215ba8528ee3\") " pod="openshift-console/console-7d46cdf4c7-2ppjq" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.390518 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c1b0761a-6d21-4806-b3c7-215ba8528ee3-trusted-ca-bundle\") pod \"console-7d46cdf4c7-2ppjq\" (UID: \"c1b0761a-6d21-4806-b3c7-215ba8528ee3\") " pod="openshift-console/console-7d46cdf4c7-2ppjq" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.390798 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c1b0761a-6d21-4806-b3c7-215ba8528ee3-console-config\") pod \"console-7d46cdf4c7-2ppjq\" (UID: \"c1b0761a-6d21-4806-b3c7-215ba8528ee3\") " pod="openshift-console/console-7d46cdf4c7-2ppjq" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.394870 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c1b0761a-6d21-4806-b3c7-215ba8528ee3-console-serving-cert\") pod \"console-7d46cdf4c7-2ppjq\" (UID: \"c1b0761a-6d21-4806-b3c7-215ba8528ee3\") " pod="openshift-console/console-7d46cdf4c7-2ppjq" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.395407 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c1b0761a-6d21-4806-b3c7-215ba8528ee3-console-oauth-config\") pod \"console-7d46cdf4c7-2ppjq\" (UID: \"c1b0761a-6d21-4806-b3c7-215ba8528ee3\") " pod="openshift-console/console-7d46cdf4c7-2ppjq" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.405270 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tcx7b\" (UniqueName: \"kubernetes.io/projected/c1b0761a-6d21-4806-b3c7-215ba8528ee3-kube-api-access-tcx7b\") pod \"console-7d46cdf4c7-2ppjq\" (UID: \"c1b0761a-6d21-4806-b3c7-215ba8528ee3\") " pod="openshift-console/console-7d46cdf4c7-2ppjq" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.489431 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/50d94b05-cf38-4f9e-a9fd-6b93aac7646b-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-nmb6b\" (UID: \"50d94b05-cf38-4f9e-a9fd-6b93aac7646b\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-nmb6b" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.493159 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/50d94b05-cf38-4f9e-a9fd-6b93aac7646b-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-nmb6b\" (UID: \"50d94b05-cf38-4f9e-a9fd-6b93aac7646b\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-nmb6b" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.497203 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7d46cdf4c7-2ppjq" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.523641 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-87552"] Nov 23 14:53:35 crc kubenswrapper[5050]: W1123 14:53:35.535239 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0cd5928a_1a84_425a_a57d_e03c10630d1a.slice/crio-d52b93c86acbf629bb4869faf057371189650e70a4bc8ee69f033840ae2ba864 WatchSource:0}: Error finding container d52b93c86acbf629bb4869faf057371189650e70a4bc8ee69f033840ae2ba864: Status 404 returned error can't find the container with id d52b93c86acbf629bb4869faf057371189650e70a4bc8ee69f033840ae2ba864 Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.713550 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7d46cdf4c7-2ppjq"] Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.717623 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-nmb6b" Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.835051 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-87552" event={"ID":"0cd5928a-1a84-425a-a57d-e03c10630d1a","Type":"ContainerStarted","Data":"d52b93c86acbf629bb4869faf057371189650e70a4bc8ee69f033840ae2ba864"} Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.836152 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-wkrpm" event={"ID":"e1705bf0-65ab-45ea-a7f0-b89f69d5622d","Type":"ContainerStarted","Data":"5b9f00671ecacfb134b117b41807ee7e316e57098ae0712fc6ef51cf16034bd1"} Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.837088 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-7vq97" event={"ID":"c5b4cff3-6a04-4b06-a466-53b9c96b12bf","Type":"ContainerStarted","Data":"0c4c9100e81dbc9c492d1d660550e2b569f5909a156be6b367a047b3c323f04c"} Nov 23 14:53:35 crc kubenswrapper[5050]: I1123 14:53:35.837921 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7d46cdf4c7-2ppjq" event={"ID":"c1b0761a-6d21-4806-b3c7-215ba8528ee3","Type":"ContainerStarted","Data":"efabe4b40b9272b6afd79efb811d35926a1b858340874401990471258bad4cac"} Nov 23 14:53:36 crc kubenswrapper[5050]: I1123 14:53:36.009989 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-nmb6b"] Nov 23 14:53:36 crc kubenswrapper[5050]: I1123 14:53:36.847598 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7d46cdf4c7-2ppjq" event={"ID":"c1b0761a-6d21-4806-b3c7-215ba8528ee3","Type":"ContainerStarted","Data":"3bc85e04c1d1630631119822920447d6556c8c158c95bb6552f81f5a07569ccf"} Nov 23 14:53:36 crc kubenswrapper[5050]: I1123 14:53:36.850310 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-nmb6b" event={"ID":"50d94b05-cf38-4f9e-a9fd-6b93aac7646b","Type":"ContainerStarted","Data":"f7d0785796c3237bc2b5639748bae35ede9164098e32a2b00e1e75598debbaf5"} Nov 23 14:53:36 crc kubenswrapper[5050]: I1123 14:53:36.869740 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-7d46cdf4c7-2ppjq" podStartSLOduration=1.869720367 podStartE2EDuration="1.869720367s" podCreationTimestamp="2025-11-23 14:53:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:53:36.866779456 +0000 UTC m=+712.033775941" watchObservedRunningTime="2025-11-23 14:53:36.869720367 +0000 UTC m=+712.036716842" Nov 23 14:53:38 crc kubenswrapper[5050]: I1123 14:53:38.870713 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-wkrpm" event={"ID":"e1705bf0-65ab-45ea-a7f0-b89f69d5622d","Type":"ContainerStarted","Data":"7900abe8715eb67f6b5088f2876f48efe8a7d1e8dfefa6d7bb052c9bf3f9b89f"} Nov 23 14:53:38 crc kubenswrapper[5050]: I1123 14:53:38.871633 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-wkrpm" Nov 23 14:53:38 crc kubenswrapper[5050]: I1123 14:53:38.874278 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-7vq97" event={"ID":"c5b4cff3-6a04-4b06-a466-53b9c96b12bf","Type":"ContainerStarted","Data":"f3bd6b85fbacd8a2bb1976dfe08863682c5112b8fc7f94d368cd45c9bf44590e"} Nov 23 14:53:38 crc kubenswrapper[5050]: I1123 14:53:38.876157 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-nmb6b" event={"ID":"50d94b05-cf38-4f9e-a9fd-6b93aac7646b","Type":"ContainerStarted","Data":"4eef5ad921050f42af5cac38ee6be7407482230c58c11a2772866a9919f9e95b"} Nov 23 14:53:38 crc kubenswrapper[5050]: I1123 14:53:38.876309 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-nmb6b" Nov 23 14:53:38 crc kubenswrapper[5050]: I1123 14:53:38.879062 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-87552" event={"ID":"0cd5928a-1a84-425a-a57d-e03c10630d1a","Type":"ContainerStarted","Data":"91ca080d1ce6e5521a8b56979f7f96eb2e8754291876b8d6eca0329f873f09df"} Nov 23 14:53:38 crc kubenswrapper[5050]: I1123 14:53:38.891849 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-wkrpm" podStartSLOduration=1.6492385189999998 podStartE2EDuration="4.891816073s" podCreationTimestamp="2025-11-23 14:53:34 +0000 UTC" firstStartedPulling="2025-11-23 14:53:35.190784064 +0000 UTC m=+710.357780539" lastFinishedPulling="2025-11-23 14:53:38.433361608 +0000 UTC m=+713.600358093" observedRunningTime="2025-11-23 14:53:38.887660781 +0000 UTC m=+714.054657276" watchObservedRunningTime="2025-11-23 14:53:38.891816073 +0000 UTC m=+714.058812578" Nov 23 14:53:38 crc kubenswrapper[5050]: I1123 14:53:38.917054 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-nmb6b" podStartSLOduration=2.493230248 podStartE2EDuration="4.917030845s" podCreationTimestamp="2025-11-23 14:53:34 +0000 UTC" firstStartedPulling="2025-11-23 14:53:36.022879417 +0000 UTC m=+711.189875902" lastFinishedPulling="2025-11-23 14:53:38.446679984 +0000 UTC m=+713.613676499" observedRunningTime="2025-11-23 14:53:38.912460843 +0000 UTC m=+714.079457338" watchObservedRunningTime="2025-11-23 14:53:38.917030845 +0000 UTC m=+714.084027340" Nov 23 14:53:40 crc kubenswrapper[5050]: I1123 14:53:40.902226 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-7vq97" event={"ID":"c5b4cff3-6a04-4b06-a466-53b9c96b12bf","Type":"ContainerStarted","Data":"fcefecf524fc069028e926e1c01740668e947096d88b5463a03439869e4e4959"} Nov 23 14:53:40 crc kubenswrapper[5050]: I1123 14:53:40.931975 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-7vq97" podStartSLOduration=1.6269266020000002 podStartE2EDuration="6.931950153s" podCreationTimestamp="2025-11-23 14:53:34 +0000 UTC" firstStartedPulling="2025-11-23 14:53:35.356699049 +0000 UTC m=+710.523695534" lastFinishedPulling="2025-11-23 14:53:40.66172257 +0000 UTC m=+715.828719085" observedRunningTime="2025-11-23 14:53:40.926216655 +0000 UTC m=+716.093213150" watchObservedRunningTime="2025-11-23 14:53:40.931950153 +0000 UTC m=+716.098946658" Nov 23 14:53:40 crc kubenswrapper[5050]: I1123 14:53:40.934105 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-87552" podStartSLOduration=4.034740851 podStartE2EDuration="6.934094411s" podCreationTimestamp="2025-11-23 14:53:34 +0000 UTC" firstStartedPulling="2025-11-23 14:53:35.537879714 +0000 UTC m=+710.704876209" lastFinishedPulling="2025-11-23 14:53:38.437233254 +0000 UTC m=+713.604229769" observedRunningTime="2025-11-23 14:53:38.944737993 +0000 UTC m=+714.111734488" watchObservedRunningTime="2025-11-23 14:53:40.934094411 +0000 UTC m=+716.101090906" Nov 23 14:53:45 crc kubenswrapper[5050]: I1123 14:53:45.160400 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-wkrpm" Nov 23 14:53:45 crc kubenswrapper[5050]: I1123 14:53:45.499027 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-7d46cdf4c7-2ppjq" Nov 23 14:53:45 crc kubenswrapper[5050]: I1123 14:53:45.499478 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-7d46cdf4c7-2ppjq" Nov 23 14:53:45 crc kubenswrapper[5050]: I1123 14:53:45.507901 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-7d46cdf4c7-2ppjq" Nov 23 14:53:45 crc kubenswrapper[5050]: I1123 14:53:45.952924 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-7d46cdf4c7-2ppjq" Nov 23 14:53:46 crc kubenswrapper[5050]: I1123 14:53:46.031181 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-b646l"] Nov 23 14:53:55 crc kubenswrapper[5050]: I1123 14:53:55.730220 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-nmb6b" Nov 23 14:53:59 crc kubenswrapper[5050]: I1123 14:53:59.225061 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 14:53:59 crc kubenswrapper[5050]: I1123 14:53:59.225562 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.097510 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-b646l" podUID="629f02dd-753b-46d2-b808-812468f6c9c5" containerName="console" containerID="cri-o://d6e5465c9bce3be906df974e1359ec5addf250e00288f9197a7a3b2fcde806bd" gracePeriod=15 Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.401619 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d"] Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.404695 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.408673 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.417867 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d"] Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.419034 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/13fb76cc-ba40-4b0d-85cf-73a8fcd56f36-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d\" (UID: \"13fb76cc-ba40-4b0d-85cf-73a8fcd56f36\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.419118 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9bjd\" (UniqueName: \"kubernetes.io/projected/13fb76cc-ba40-4b0d-85cf-73a8fcd56f36-kube-api-access-w9bjd\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d\" (UID: \"13fb76cc-ba40-4b0d-85cf-73a8fcd56f36\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.419177 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/13fb76cc-ba40-4b0d-85cf-73a8fcd56f36-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d\" (UID: \"13fb76cc-ba40-4b0d-85cf-73a8fcd56f36\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.479530 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-b646l_629f02dd-753b-46d2-b808-812468f6c9c5/console/0.log" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.479649 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.523049 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/13fb76cc-ba40-4b0d-85cf-73a8fcd56f36-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d\" (UID: \"13fb76cc-ba40-4b0d-85cf-73a8fcd56f36\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.523205 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9bjd\" (UniqueName: \"kubernetes.io/projected/13fb76cc-ba40-4b0d-85cf-73a8fcd56f36-kube-api-access-w9bjd\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d\" (UID: \"13fb76cc-ba40-4b0d-85cf-73a8fcd56f36\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.523283 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/13fb76cc-ba40-4b0d-85cf-73a8fcd56f36-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d\" (UID: \"13fb76cc-ba40-4b0d-85cf-73a8fcd56f36\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.523944 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/13fb76cc-ba40-4b0d-85cf-73a8fcd56f36-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d\" (UID: \"13fb76cc-ba40-4b0d-85cf-73a8fcd56f36\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.523944 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/13fb76cc-ba40-4b0d-85cf-73a8fcd56f36-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d\" (UID: \"13fb76cc-ba40-4b0d-85cf-73a8fcd56f36\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.548152 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9bjd\" (UniqueName: \"kubernetes.io/projected/13fb76cc-ba40-4b0d-85cf-73a8fcd56f36-kube-api-access-w9bjd\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d\" (UID: \"13fb76cc-ba40-4b0d-85cf-73a8fcd56f36\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.624210 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f7pm4\" (UniqueName: \"kubernetes.io/projected/629f02dd-753b-46d2-b808-812468f6c9c5-kube-api-access-f7pm4\") pod \"629f02dd-753b-46d2-b808-812468f6c9c5\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.624302 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/629f02dd-753b-46d2-b808-812468f6c9c5-console-serving-cert\") pod \"629f02dd-753b-46d2-b808-812468f6c9c5\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.624353 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/629f02dd-753b-46d2-b808-812468f6c9c5-console-config\") pod \"629f02dd-753b-46d2-b808-812468f6c9c5\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.624413 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/629f02dd-753b-46d2-b808-812468f6c9c5-oauth-serving-cert\") pod \"629f02dd-753b-46d2-b808-812468f6c9c5\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.624515 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/629f02dd-753b-46d2-b808-812468f6c9c5-trusted-ca-bundle\") pod \"629f02dd-753b-46d2-b808-812468f6c9c5\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.624549 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/629f02dd-753b-46d2-b808-812468f6c9c5-service-ca\") pod \"629f02dd-753b-46d2-b808-812468f6c9c5\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.624578 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/629f02dd-753b-46d2-b808-812468f6c9c5-console-oauth-config\") pod \"629f02dd-753b-46d2-b808-812468f6c9c5\" (UID: \"629f02dd-753b-46d2-b808-812468f6c9c5\") " Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.625587 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/629f02dd-753b-46d2-b808-812468f6c9c5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "629f02dd-753b-46d2-b808-812468f6c9c5" (UID: "629f02dd-753b-46d2-b808-812468f6c9c5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.625596 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/629f02dd-753b-46d2-b808-812468f6c9c5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "629f02dd-753b-46d2-b808-812468f6c9c5" (UID: "629f02dd-753b-46d2-b808-812468f6c9c5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.625701 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/629f02dd-753b-46d2-b808-812468f6c9c5-service-ca" (OuterVolumeSpecName: "service-ca") pod "629f02dd-753b-46d2-b808-812468f6c9c5" (UID: "629f02dd-753b-46d2-b808-812468f6c9c5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.625720 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/629f02dd-753b-46d2-b808-812468f6c9c5-console-config" (OuterVolumeSpecName: "console-config") pod "629f02dd-753b-46d2-b808-812468f6c9c5" (UID: "629f02dd-753b-46d2-b808-812468f6c9c5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.628394 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/629f02dd-753b-46d2-b808-812468f6c9c5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "629f02dd-753b-46d2-b808-812468f6c9c5" (UID: "629f02dd-753b-46d2-b808-812468f6c9c5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.628804 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/629f02dd-753b-46d2-b808-812468f6c9c5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "629f02dd-753b-46d2-b808-812468f6c9c5" (UID: "629f02dd-753b-46d2-b808-812468f6c9c5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.629812 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/629f02dd-753b-46d2-b808-812468f6c9c5-kube-api-access-f7pm4" (OuterVolumeSpecName: "kube-api-access-f7pm4") pod "629f02dd-753b-46d2-b808-812468f6c9c5" (UID: "629f02dd-753b-46d2-b808-812468f6c9c5"). InnerVolumeSpecName "kube-api-access-f7pm4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.724312 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.725998 5050 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/629f02dd-753b-46d2-b808-812468f6c9c5-console-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.726035 5050 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/629f02dd-753b-46d2-b808-812468f6c9c5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.726049 5050 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/629f02dd-753b-46d2-b808-812468f6c9c5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.726063 5050 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/629f02dd-753b-46d2-b808-812468f6c9c5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.726076 5050 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/629f02dd-753b-46d2-b808-812468f6c9c5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.726090 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f7pm4\" (UniqueName: \"kubernetes.io/projected/629f02dd-753b-46d2-b808-812468f6c9c5-kube-api-access-f7pm4\") on node \"crc\" DevicePath \"\"" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.726104 5050 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/629f02dd-753b-46d2-b808-812468f6c9c5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:54:11 crc kubenswrapper[5050]: I1123 14:54:11.983693 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d"] Nov 23 14:54:12 crc kubenswrapper[5050]: I1123 14:54:12.146723 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-b646l_629f02dd-753b-46d2-b808-812468f6c9c5/console/0.log" Nov 23 14:54:12 crc kubenswrapper[5050]: I1123 14:54:12.147178 5050 generic.go:334] "Generic (PLEG): container finished" podID="629f02dd-753b-46d2-b808-812468f6c9c5" containerID="d6e5465c9bce3be906df974e1359ec5addf250e00288f9197a7a3b2fcde806bd" exitCode=2 Nov 23 14:54:12 crc kubenswrapper[5050]: I1123 14:54:12.147253 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-b646l" event={"ID":"629f02dd-753b-46d2-b808-812468f6c9c5","Type":"ContainerDied","Data":"d6e5465c9bce3be906df974e1359ec5addf250e00288f9197a7a3b2fcde806bd"} Nov 23 14:54:12 crc kubenswrapper[5050]: I1123 14:54:12.147296 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-b646l" event={"ID":"629f02dd-753b-46d2-b808-812468f6c9c5","Type":"ContainerDied","Data":"d7721e29652a6ded78d37b60fc87d9b4c0e4475020c9f0aea6a917080b70c986"} Nov 23 14:54:12 crc kubenswrapper[5050]: I1123 14:54:12.147320 5050 scope.go:117] "RemoveContainer" containerID="d6e5465c9bce3be906df974e1359ec5addf250e00288f9197a7a3b2fcde806bd" Nov 23 14:54:12 crc kubenswrapper[5050]: I1123 14:54:12.147359 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-b646l" Nov 23 14:54:12 crc kubenswrapper[5050]: I1123 14:54:12.154551 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d" event={"ID":"13fb76cc-ba40-4b0d-85cf-73a8fcd56f36","Type":"ContainerStarted","Data":"9a7b018eea729c00a0cb80e58698cd1821942bbc04bde76ee9fcdb9c06a0e291"} Nov 23 14:54:12 crc kubenswrapper[5050]: I1123 14:54:12.170985 5050 scope.go:117] "RemoveContainer" containerID="d6e5465c9bce3be906df974e1359ec5addf250e00288f9197a7a3b2fcde806bd" Nov 23 14:54:12 crc kubenswrapper[5050]: E1123 14:54:12.171642 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6e5465c9bce3be906df974e1359ec5addf250e00288f9197a7a3b2fcde806bd\": container with ID starting with d6e5465c9bce3be906df974e1359ec5addf250e00288f9197a7a3b2fcde806bd not found: ID does not exist" containerID="d6e5465c9bce3be906df974e1359ec5addf250e00288f9197a7a3b2fcde806bd" Nov 23 14:54:12 crc kubenswrapper[5050]: I1123 14:54:12.171719 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6e5465c9bce3be906df974e1359ec5addf250e00288f9197a7a3b2fcde806bd"} err="failed to get container status \"d6e5465c9bce3be906df974e1359ec5addf250e00288f9197a7a3b2fcde806bd\": rpc error: code = NotFound desc = could not find container \"d6e5465c9bce3be906df974e1359ec5addf250e00288f9197a7a3b2fcde806bd\": container with ID starting with d6e5465c9bce3be906df974e1359ec5addf250e00288f9197a7a3b2fcde806bd not found: ID does not exist" Nov 23 14:54:12 crc kubenswrapper[5050]: I1123 14:54:12.215377 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-b646l"] Nov 23 14:54:12 crc kubenswrapper[5050]: I1123 14:54:12.225508 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-b646l"] Nov 23 14:54:13 crc kubenswrapper[5050]: I1123 14:54:13.162325 5050 generic.go:334] "Generic (PLEG): container finished" podID="13fb76cc-ba40-4b0d-85cf-73a8fcd56f36" containerID="4fa5095c8a43a605ba7c0a78af768f0504ff8f1de1561495dd0f17bf79c32fbe" exitCode=0 Nov 23 14:54:13 crc kubenswrapper[5050]: I1123 14:54:13.162377 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d" event={"ID":"13fb76cc-ba40-4b0d-85cf-73a8fcd56f36","Type":"ContainerDied","Data":"4fa5095c8a43a605ba7c0a78af768f0504ff8f1de1561495dd0f17bf79c32fbe"} Nov 23 14:54:13 crc kubenswrapper[5050]: I1123 14:54:13.559237 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="629f02dd-753b-46d2-b808-812468f6c9c5" path="/var/lib/kubelet/pods/629f02dd-753b-46d2-b808-812468f6c9c5/volumes" Nov 23 14:54:13 crc kubenswrapper[5050]: I1123 14:54:13.830873 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2bsvx"] Nov 23 14:54:13 crc kubenswrapper[5050]: I1123 14:54:13.831189 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" podUID="72f33e56-9fe4-420b-aa31-8d49ed5c7584" containerName="controller-manager" containerID="cri-o://b1ea10a32fe287bc765ea5e2ebeddac902aee6e6f6c57e856ee5909dd2806cef" gracePeriod=30 Nov 23 14:54:13 crc kubenswrapper[5050]: I1123 14:54:13.944933 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj"] Nov 23 14:54:13 crc kubenswrapper[5050]: I1123 14:54:13.945304 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj" podUID="650d8b46-fa24-4b73-8f95-a0fbfe331fa6" containerName="route-controller-manager" containerID="cri-o://b239ecf0ee6b03c29ce08ce1a2494318f08e38a22392ec0cc3486552d4249b39" gracePeriod=30 Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.169702 5050 generic.go:334] "Generic (PLEG): container finished" podID="650d8b46-fa24-4b73-8f95-a0fbfe331fa6" containerID="b239ecf0ee6b03c29ce08ce1a2494318f08e38a22392ec0cc3486552d4249b39" exitCode=0 Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.169793 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj" event={"ID":"650d8b46-fa24-4b73-8f95-a0fbfe331fa6","Type":"ContainerDied","Data":"b239ecf0ee6b03c29ce08ce1a2494318f08e38a22392ec0cc3486552d4249b39"} Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.172736 5050 generic.go:334] "Generic (PLEG): container finished" podID="72f33e56-9fe4-420b-aa31-8d49ed5c7584" containerID="b1ea10a32fe287bc765ea5e2ebeddac902aee6e6f6c57e856ee5909dd2806cef" exitCode=0 Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.172758 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" event={"ID":"72f33e56-9fe4-420b-aa31-8d49ed5c7584","Type":"ContainerDied","Data":"b1ea10a32fe287bc765ea5e2ebeddac902aee6e6f6c57e856ee5909dd2806cef"} Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.236190 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.363588 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72f33e56-9fe4-420b-aa31-8d49ed5c7584-config\") pod \"72f33e56-9fe4-420b-aa31-8d49ed5c7584\" (UID: \"72f33e56-9fe4-420b-aa31-8d49ed5c7584\") " Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.364099 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/72f33e56-9fe4-420b-aa31-8d49ed5c7584-client-ca\") pod \"72f33e56-9fe4-420b-aa31-8d49ed5c7584\" (UID: \"72f33e56-9fe4-420b-aa31-8d49ed5c7584\") " Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.364211 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72f33e56-9fe4-420b-aa31-8d49ed5c7584-serving-cert\") pod \"72f33e56-9fe4-420b-aa31-8d49ed5c7584\" (UID: \"72f33e56-9fe4-420b-aa31-8d49ed5c7584\") " Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.364234 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/72f33e56-9fe4-420b-aa31-8d49ed5c7584-proxy-ca-bundles\") pod \"72f33e56-9fe4-420b-aa31-8d49ed5c7584\" (UID: \"72f33e56-9fe4-420b-aa31-8d49ed5c7584\") " Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.365097 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/72f33e56-9fe4-420b-aa31-8d49ed5c7584-client-ca" (OuterVolumeSpecName: "client-ca") pod "72f33e56-9fe4-420b-aa31-8d49ed5c7584" (UID: "72f33e56-9fe4-420b-aa31-8d49ed5c7584"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.365141 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/72f33e56-9fe4-420b-aa31-8d49ed5c7584-config" (OuterVolumeSpecName: "config") pod "72f33e56-9fe4-420b-aa31-8d49ed5c7584" (UID: "72f33e56-9fe4-420b-aa31-8d49ed5c7584"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.365531 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n2hll\" (UniqueName: \"kubernetes.io/projected/72f33e56-9fe4-420b-aa31-8d49ed5c7584-kube-api-access-n2hll\") pod \"72f33e56-9fe4-420b-aa31-8d49ed5c7584\" (UID: \"72f33e56-9fe4-420b-aa31-8d49ed5c7584\") " Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.365698 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/72f33e56-9fe4-420b-aa31-8d49ed5c7584-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "72f33e56-9fe4-420b-aa31-8d49ed5c7584" (UID: "72f33e56-9fe4-420b-aa31-8d49ed5c7584"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.366760 5050 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/72f33e56-9fe4-420b-aa31-8d49ed5c7584-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.367175 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72f33e56-9fe4-420b-aa31-8d49ed5c7584-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.367211 5050 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/72f33e56-9fe4-420b-aa31-8d49ed5c7584-client-ca\") on node \"crc\" DevicePath \"\"" Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.372929 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72f33e56-9fe4-420b-aa31-8d49ed5c7584-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "72f33e56-9fe4-420b-aa31-8d49ed5c7584" (UID: "72f33e56-9fe4-420b-aa31-8d49ed5c7584"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.373365 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72f33e56-9fe4-420b-aa31-8d49ed5c7584-kube-api-access-n2hll" (OuterVolumeSpecName: "kube-api-access-n2hll") pod "72f33e56-9fe4-420b-aa31-8d49ed5c7584" (UID: "72f33e56-9fe4-420b-aa31-8d49ed5c7584"). InnerVolumeSpecName "kube-api-access-n2hll". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.401326 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj" Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.470639 5050 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72f33e56-9fe4-420b-aa31-8d49ed5c7584-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.470904 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n2hll\" (UniqueName: \"kubernetes.io/projected/72f33e56-9fe4-420b-aa31-8d49ed5c7584-kube-api-access-n2hll\") on node \"crc\" DevicePath \"\"" Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.571419 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/650d8b46-fa24-4b73-8f95-a0fbfe331fa6-client-ca\") pod \"650d8b46-fa24-4b73-8f95-a0fbfe331fa6\" (UID: \"650d8b46-fa24-4b73-8f95-a0fbfe331fa6\") " Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.571494 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/650d8b46-fa24-4b73-8f95-a0fbfe331fa6-config\") pod \"650d8b46-fa24-4b73-8f95-a0fbfe331fa6\" (UID: \"650d8b46-fa24-4b73-8f95-a0fbfe331fa6\") " Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.571554 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/650d8b46-fa24-4b73-8f95-a0fbfe331fa6-serving-cert\") pod \"650d8b46-fa24-4b73-8f95-a0fbfe331fa6\" (UID: \"650d8b46-fa24-4b73-8f95-a0fbfe331fa6\") " Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.571611 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z9m2p\" (UniqueName: \"kubernetes.io/projected/650d8b46-fa24-4b73-8f95-a0fbfe331fa6-kube-api-access-z9m2p\") pod \"650d8b46-fa24-4b73-8f95-a0fbfe331fa6\" (UID: \"650d8b46-fa24-4b73-8f95-a0fbfe331fa6\") " Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.572335 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/650d8b46-fa24-4b73-8f95-a0fbfe331fa6-client-ca" (OuterVolumeSpecName: "client-ca") pod "650d8b46-fa24-4b73-8f95-a0fbfe331fa6" (UID: "650d8b46-fa24-4b73-8f95-a0fbfe331fa6"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.572390 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/650d8b46-fa24-4b73-8f95-a0fbfe331fa6-config" (OuterVolumeSpecName: "config") pod "650d8b46-fa24-4b73-8f95-a0fbfe331fa6" (UID: "650d8b46-fa24-4b73-8f95-a0fbfe331fa6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.577179 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/650d8b46-fa24-4b73-8f95-a0fbfe331fa6-kube-api-access-z9m2p" (OuterVolumeSpecName: "kube-api-access-z9m2p") pod "650d8b46-fa24-4b73-8f95-a0fbfe331fa6" (UID: "650d8b46-fa24-4b73-8f95-a0fbfe331fa6"). InnerVolumeSpecName "kube-api-access-z9m2p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.577225 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/650d8b46-fa24-4b73-8f95-a0fbfe331fa6-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "650d8b46-fa24-4b73-8f95-a0fbfe331fa6" (UID: "650d8b46-fa24-4b73-8f95-a0fbfe331fa6"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.673832 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/650d8b46-fa24-4b73-8f95-a0fbfe331fa6-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.673872 5050 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/650d8b46-fa24-4b73-8f95-a0fbfe331fa6-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.673885 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z9m2p\" (UniqueName: \"kubernetes.io/projected/650d8b46-fa24-4b73-8f95-a0fbfe331fa6-kube-api-access-z9m2p\") on node \"crc\" DevicePath \"\"" Nov 23 14:54:14 crc kubenswrapper[5050]: I1123 14:54:14.673895 5050 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/650d8b46-fa24-4b73-8f95-a0fbfe331fa6-client-ca\") on node \"crc\" DevicePath \"\"" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.182935 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj" event={"ID":"650d8b46-fa24-4b73-8f95-a0fbfe331fa6","Type":"ContainerDied","Data":"71fffe767fce8cc5b95ed1cb1005b398b5efc8340a6674f2e1c7d8e046cf6dee"} Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.183049 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.183527 5050 scope.go:117] "RemoveContainer" containerID="b239ecf0ee6b03c29ce08ce1a2494318f08e38a22392ec0cc3486552d4249b39" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.185699 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" event={"ID":"72f33e56-9fe4-420b-aa31-8d49ed5c7584","Type":"ContainerDied","Data":"431fa29464358052ab2819f58db34cc71c30bcc46442a378e0821673f50b5e4b"} Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.185771 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-2bsvx" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.190640 5050 generic.go:334] "Generic (PLEG): container finished" podID="13fb76cc-ba40-4b0d-85cf-73a8fcd56f36" containerID="a6e3227956a50fcdb095a48d5797fbeab2cb2e299d6468e8330374df8ee8ac44" exitCode=0 Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.190672 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d" event={"ID":"13fb76cc-ba40-4b0d-85cf-73a8fcd56f36","Type":"ContainerDied","Data":"a6e3227956a50fcdb095a48d5797fbeab2cb2e299d6468e8330374df8ee8ac44"} Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.230216 5050 scope.go:117] "RemoveContainer" containerID="b1ea10a32fe287bc765ea5e2ebeddac902aee6e6f6c57e856ee5909dd2806cef" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.243943 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2bsvx"] Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.248962 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2bsvx"] Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.261468 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj"] Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.266241 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-b9jzj"] Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.561812 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="650d8b46-fa24-4b73-8f95-a0fbfe331fa6" path="/var/lib/kubelet/pods/650d8b46-fa24-4b73-8f95-a0fbfe331fa6/volumes" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.563072 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="72f33e56-9fe4-420b-aa31-8d49ed5c7584" path="/var/lib/kubelet/pods/72f33e56-9fe4-420b-aa31-8d49ed5c7584/volumes" Nov 23 14:54:15 crc kubenswrapper[5050]: E1123 14:54:15.583651 5050 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod13fb76cc_ba40_4b0d_85cf_73a8fcd56f36.slice/crio-3864db8eac1581fa2bd68a2744134f5a4333c9d5834142ad84e49617feacbc21.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod13fb76cc_ba40_4b0d_85cf_73a8fcd56f36.slice/crio-conmon-3864db8eac1581fa2bd68a2744134f5a4333c9d5834142ad84e49617feacbc21.scope\": RecentStats: unable to find data in memory cache]" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.743136 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-78f76f8596-frpz6"] Nov 23 14:54:15 crc kubenswrapper[5050]: E1123 14:54:15.743806 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72f33e56-9fe4-420b-aa31-8d49ed5c7584" containerName="controller-manager" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.743862 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="72f33e56-9fe4-420b-aa31-8d49ed5c7584" containerName="controller-manager" Nov 23 14:54:15 crc kubenswrapper[5050]: E1123 14:54:15.743890 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="629f02dd-753b-46d2-b808-812468f6c9c5" containerName="console" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.743912 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="629f02dd-753b-46d2-b808-812468f6c9c5" containerName="console" Nov 23 14:54:15 crc kubenswrapper[5050]: E1123 14:54:15.743945 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="650d8b46-fa24-4b73-8f95-a0fbfe331fa6" containerName="route-controller-manager" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.743964 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="650d8b46-fa24-4b73-8f95-a0fbfe331fa6" containerName="route-controller-manager" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.744282 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="72f33e56-9fe4-420b-aa31-8d49ed5c7584" containerName="controller-manager" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.744318 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="629f02dd-753b-46d2-b808-812468f6c9c5" containerName="console" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.744347 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="650d8b46-fa24-4b73-8f95-a0fbfe331fa6" containerName="route-controller-manager" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.745523 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-78f76f8596-frpz6" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.748795 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.749647 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.750005 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.749996 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.750269 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.753286 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.753596 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8579f458bf-spd5n"] Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.754960 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8579f458bf-spd5n" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.757599 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-78f76f8596-frpz6"] Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.758945 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.759198 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.761621 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.761949 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.762106 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.762642 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.764731 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8579f458bf-spd5n"] Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.768749 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.889736 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/45d410a9-2978-4560-9218-590193dc9177-client-ca\") pod \"controller-manager-78f76f8596-frpz6\" (UID: \"45d410a9-2978-4560-9218-590193dc9177\") " pod="openshift-controller-manager/controller-manager-78f76f8596-frpz6" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.889814 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/45d410a9-2978-4560-9218-590193dc9177-proxy-ca-bundles\") pod \"controller-manager-78f76f8596-frpz6\" (UID: \"45d410a9-2978-4560-9218-590193dc9177\") " pod="openshift-controller-manager/controller-manager-78f76f8596-frpz6" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.889852 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45d410a9-2978-4560-9218-590193dc9177-config\") pod \"controller-manager-78f76f8596-frpz6\" (UID: \"45d410a9-2978-4560-9218-590193dc9177\") " pod="openshift-controller-manager/controller-manager-78f76f8596-frpz6" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.889901 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/45d410a9-2978-4560-9218-590193dc9177-serving-cert\") pod \"controller-manager-78f76f8596-frpz6\" (UID: \"45d410a9-2978-4560-9218-590193dc9177\") " pod="openshift-controller-manager/controller-manager-78f76f8596-frpz6" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.889929 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4e3592b6-bb29-4266-be9d-661160354156-client-ca\") pod \"route-controller-manager-8579f458bf-spd5n\" (UID: \"4e3592b6-bb29-4266-be9d-661160354156\") " pod="openshift-route-controller-manager/route-controller-manager-8579f458bf-spd5n" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.889993 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e3592b6-bb29-4266-be9d-661160354156-config\") pod \"route-controller-manager-8579f458bf-spd5n\" (UID: \"4e3592b6-bb29-4266-be9d-661160354156\") " pod="openshift-route-controller-manager/route-controller-manager-8579f458bf-spd5n" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.890047 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e3592b6-bb29-4266-be9d-661160354156-serving-cert\") pod \"route-controller-manager-8579f458bf-spd5n\" (UID: \"4e3592b6-bb29-4266-be9d-661160354156\") " pod="openshift-route-controller-manager/route-controller-manager-8579f458bf-spd5n" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.890083 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlgjj\" (UniqueName: \"kubernetes.io/projected/45d410a9-2978-4560-9218-590193dc9177-kube-api-access-hlgjj\") pod \"controller-manager-78f76f8596-frpz6\" (UID: \"45d410a9-2978-4560-9218-590193dc9177\") " pod="openshift-controller-manager/controller-manager-78f76f8596-frpz6" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.890112 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nd8hp\" (UniqueName: \"kubernetes.io/projected/4e3592b6-bb29-4266-be9d-661160354156-kube-api-access-nd8hp\") pod \"route-controller-manager-8579f458bf-spd5n\" (UID: \"4e3592b6-bb29-4266-be9d-661160354156\") " pod="openshift-route-controller-manager/route-controller-manager-8579f458bf-spd5n" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.991205 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e3592b6-bb29-4266-be9d-661160354156-config\") pod \"route-controller-manager-8579f458bf-spd5n\" (UID: \"4e3592b6-bb29-4266-be9d-661160354156\") " pod="openshift-route-controller-manager/route-controller-manager-8579f458bf-spd5n" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.991269 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e3592b6-bb29-4266-be9d-661160354156-serving-cert\") pod \"route-controller-manager-8579f458bf-spd5n\" (UID: \"4e3592b6-bb29-4266-be9d-661160354156\") " pod="openshift-route-controller-manager/route-controller-manager-8579f458bf-spd5n" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.991299 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlgjj\" (UniqueName: \"kubernetes.io/projected/45d410a9-2978-4560-9218-590193dc9177-kube-api-access-hlgjj\") pod \"controller-manager-78f76f8596-frpz6\" (UID: \"45d410a9-2978-4560-9218-590193dc9177\") " pod="openshift-controller-manager/controller-manager-78f76f8596-frpz6" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.991320 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nd8hp\" (UniqueName: \"kubernetes.io/projected/4e3592b6-bb29-4266-be9d-661160354156-kube-api-access-nd8hp\") pod \"route-controller-manager-8579f458bf-spd5n\" (UID: \"4e3592b6-bb29-4266-be9d-661160354156\") " pod="openshift-route-controller-manager/route-controller-manager-8579f458bf-spd5n" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.991359 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/45d410a9-2978-4560-9218-590193dc9177-client-ca\") pod \"controller-manager-78f76f8596-frpz6\" (UID: \"45d410a9-2978-4560-9218-590193dc9177\") " pod="openshift-controller-manager/controller-manager-78f76f8596-frpz6" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.991380 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/45d410a9-2978-4560-9218-590193dc9177-proxy-ca-bundles\") pod \"controller-manager-78f76f8596-frpz6\" (UID: \"45d410a9-2978-4560-9218-590193dc9177\") " pod="openshift-controller-manager/controller-manager-78f76f8596-frpz6" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.991403 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45d410a9-2978-4560-9218-590193dc9177-config\") pod \"controller-manager-78f76f8596-frpz6\" (UID: \"45d410a9-2978-4560-9218-590193dc9177\") " pod="openshift-controller-manager/controller-manager-78f76f8596-frpz6" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.991428 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/45d410a9-2978-4560-9218-590193dc9177-serving-cert\") pod \"controller-manager-78f76f8596-frpz6\" (UID: \"45d410a9-2978-4560-9218-590193dc9177\") " pod="openshift-controller-manager/controller-manager-78f76f8596-frpz6" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.991585 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4e3592b6-bb29-4266-be9d-661160354156-client-ca\") pod \"route-controller-manager-8579f458bf-spd5n\" (UID: \"4e3592b6-bb29-4266-be9d-661160354156\") " pod="openshift-route-controller-manager/route-controller-manager-8579f458bf-spd5n" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.993943 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/45d410a9-2978-4560-9218-590193dc9177-proxy-ca-bundles\") pod \"controller-manager-78f76f8596-frpz6\" (UID: \"45d410a9-2978-4560-9218-590193dc9177\") " pod="openshift-controller-manager/controller-manager-78f76f8596-frpz6" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.994019 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45d410a9-2978-4560-9218-590193dc9177-config\") pod \"controller-manager-78f76f8596-frpz6\" (UID: \"45d410a9-2978-4560-9218-590193dc9177\") " pod="openshift-controller-manager/controller-manager-78f76f8596-frpz6" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.995052 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/45d410a9-2978-4560-9218-590193dc9177-client-ca\") pod \"controller-manager-78f76f8596-frpz6\" (UID: \"45d410a9-2978-4560-9218-590193dc9177\") " pod="openshift-controller-manager/controller-manager-78f76f8596-frpz6" Nov 23 14:54:15 crc kubenswrapper[5050]: I1123 14:54:15.996938 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e3592b6-bb29-4266-be9d-661160354156-config\") pod \"route-controller-manager-8579f458bf-spd5n\" (UID: \"4e3592b6-bb29-4266-be9d-661160354156\") " pod="openshift-route-controller-manager/route-controller-manager-8579f458bf-spd5n" Nov 23 14:54:16 crc kubenswrapper[5050]: I1123 14:54:16.014265 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e3592b6-bb29-4266-be9d-661160354156-serving-cert\") pod \"route-controller-manager-8579f458bf-spd5n\" (UID: \"4e3592b6-bb29-4266-be9d-661160354156\") " pod="openshift-route-controller-manager/route-controller-manager-8579f458bf-spd5n" Nov 23 14:54:16 crc kubenswrapper[5050]: I1123 14:54:16.014530 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nd8hp\" (UniqueName: \"kubernetes.io/projected/4e3592b6-bb29-4266-be9d-661160354156-kube-api-access-nd8hp\") pod \"route-controller-manager-8579f458bf-spd5n\" (UID: \"4e3592b6-bb29-4266-be9d-661160354156\") " pod="openshift-route-controller-manager/route-controller-manager-8579f458bf-spd5n" Nov 23 14:54:16 crc kubenswrapper[5050]: I1123 14:54:16.014571 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4e3592b6-bb29-4266-be9d-661160354156-client-ca\") pod \"route-controller-manager-8579f458bf-spd5n\" (UID: \"4e3592b6-bb29-4266-be9d-661160354156\") " pod="openshift-route-controller-manager/route-controller-manager-8579f458bf-spd5n" Nov 23 14:54:16 crc kubenswrapper[5050]: I1123 14:54:16.015566 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/45d410a9-2978-4560-9218-590193dc9177-serving-cert\") pod \"controller-manager-78f76f8596-frpz6\" (UID: \"45d410a9-2978-4560-9218-590193dc9177\") " pod="openshift-controller-manager/controller-manager-78f76f8596-frpz6" Nov 23 14:54:16 crc kubenswrapper[5050]: I1123 14:54:16.026854 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlgjj\" (UniqueName: \"kubernetes.io/projected/45d410a9-2978-4560-9218-590193dc9177-kube-api-access-hlgjj\") pod \"controller-manager-78f76f8596-frpz6\" (UID: \"45d410a9-2978-4560-9218-590193dc9177\") " pod="openshift-controller-manager/controller-manager-78f76f8596-frpz6" Nov 23 14:54:16 crc kubenswrapper[5050]: I1123 14:54:16.074767 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-78f76f8596-frpz6" Nov 23 14:54:16 crc kubenswrapper[5050]: I1123 14:54:16.093487 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8579f458bf-spd5n" Nov 23 14:54:16 crc kubenswrapper[5050]: I1123 14:54:16.202671 5050 generic.go:334] "Generic (PLEG): container finished" podID="13fb76cc-ba40-4b0d-85cf-73a8fcd56f36" containerID="3864db8eac1581fa2bd68a2744134f5a4333c9d5834142ad84e49617feacbc21" exitCode=0 Nov 23 14:54:16 crc kubenswrapper[5050]: I1123 14:54:16.202738 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d" event={"ID":"13fb76cc-ba40-4b0d-85cf-73a8fcd56f36","Type":"ContainerDied","Data":"3864db8eac1581fa2bd68a2744134f5a4333c9d5834142ad84e49617feacbc21"} Nov 23 14:54:16 crc kubenswrapper[5050]: I1123 14:54:16.433098 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-78f76f8596-frpz6"] Nov 23 14:54:16 crc kubenswrapper[5050]: W1123 14:54:16.438174 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod45d410a9_2978_4560_9218_590193dc9177.slice/crio-eb359f8cc209753d537c1f11501bc79ffb9d978aa55ebe300ada0b94f19e822a WatchSource:0}: Error finding container eb359f8cc209753d537c1f11501bc79ffb9d978aa55ebe300ada0b94f19e822a: Status 404 returned error can't find the container with id eb359f8cc209753d537c1f11501bc79ffb9d978aa55ebe300ada0b94f19e822a Nov 23 14:54:16 crc kubenswrapper[5050]: I1123 14:54:16.583984 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8579f458bf-spd5n"] Nov 23 14:54:16 crc kubenswrapper[5050]: W1123 14:54:16.599065 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4e3592b6_bb29_4266_be9d_661160354156.slice/crio-a25dfa37cf98cc2ca5625ae7dbcc3028c8962a7a825c9114f368a83b2ff90770 WatchSource:0}: Error finding container a25dfa37cf98cc2ca5625ae7dbcc3028c8962a7a825c9114f368a83b2ff90770: Status 404 returned error can't find the container with id a25dfa37cf98cc2ca5625ae7dbcc3028c8962a7a825c9114f368a83b2ff90770 Nov 23 14:54:17 crc kubenswrapper[5050]: I1123 14:54:17.222699 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8579f458bf-spd5n" event={"ID":"4e3592b6-bb29-4266-be9d-661160354156","Type":"ContainerStarted","Data":"37a01a314c7b5dd66e5d262f3f629d19c5a22c614542aa81696577334b44a9b9"} Nov 23 14:54:17 crc kubenswrapper[5050]: I1123 14:54:17.223244 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8579f458bf-spd5n" event={"ID":"4e3592b6-bb29-4266-be9d-661160354156","Type":"ContainerStarted","Data":"a25dfa37cf98cc2ca5625ae7dbcc3028c8962a7a825c9114f368a83b2ff90770"} Nov 23 14:54:17 crc kubenswrapper[5050]: I1123 14:54:17.223269 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-8579f458bf-spd5n" Nov 23 14:54:17 crc kubenswrapper[5050]: I1123 14:54:17.225605 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-78f76f8596-frpz6" event={"ID":"45d410a9-2978-4560-9218-590193dc9177","Type":"ContainerStarted","Data":"07b7b0aa84b18c4a3d492181877f61bed76e20498ff5a5cec5401a8e76d3207b"} Nov 23 14:54:17 crc kubenswrapper[5050]: I1123 14:54:17.225644 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-78f76f8596-frpz6" event={"ID":"45d410a9-2978-4560-9218-590193dc9177","Type":"ContainerStarted","Data":"eb359f8cc209753d537c1f11501bc79ffb9d978aa55ebe300ada0b94f19e822a"} Nov 23 14:54:17 crc kubenswrapper[5050]: I1123 14:54:17.226653 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-78f76f8596-frpz6" Nov 23 14:54:17 crc kubenswrapper[5050]: I1123 14:54:17.231234 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-78f76f8596-frpz6" Nov 23 14:54:17 crc kubenswrapper[5050]: I1123 14:54:17.248942 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-8579f458bf-spd5n" podStartSLOduration=3.248916757 podStartE2EDuration="3.248916757s" podCreationTimestamp="2025-11-23 14:54:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:54:17.247992196 +0000 UTC m=+752.414988681" watchObservedRunningTime="2025-11-23 14:54:17.248916757 +0000 UTC m=+752.415913252" Nov 23 14:54:17 crc kubenswrapper[5050]: I1123 14:54:17.269405 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-78f76f8596-frpz6" podStartSLOduration=4.269372312 podStartE2EDuration="4.269372312s" podCreationTimestamp="2025-11-23 14:54:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:54:17.267098462 +0000 UTC m=+752.434094947" watchObservedRunningTime="2025-11-23 14:54:17.269372312 +0000 UTC m=+752.436368847" Nov 23 14:54:17 crc kubenswrapper[5050]: I1123 14:54:17.523286 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-8579f458bf-spd5n" Nov 23 14:54:17 crc kubenswrapper[5050]: I1123 14:54:17.556557 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d" Nov 23 14:54:17 crc kubenswrapper[5050]: I1123 14:54:17.721275 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9bjd\" (UniqueName: \"kubernetes.io/projected/13fb76cc-ba40-4b0d-85cf-73a8fcd56f36-kube-api-access-w9bjd\") pod \"13fb76cc-ba40-4b0d-85cf-73a8fcd56f36\" (UID: \"13fb76cc-ba40-4b0d-85cf-73a8fcd56f36\") " Nov 23 14:54:17 crc kubenswrapper[5050]: I1123 14:54:17.721415 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/13fb76cc-ba40-4b0d-85cf-73a8fcd56f36-util\") pod \"13fb76cc-ba40-4b0d-85cf-73a8fcd56f36\" (UID: \"13fb76cc-ba40-4b0d-85cf-73a8fcd56f36\") " Nov 23 14:54:17 crc kubenswrapper[5050]: I1123 14:54:17.721525 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/13fb76cc-ba40-4b0d-85cf-73a8fcd56f36-bundle\") pod \"13fb76cc-ba40-4b0d-85cf-73a8fcd56f36\" (UID: \"13fb76cc-ba40-4b0d-85cf-73a8fcd56f36\") " Nov 23 14:54:17 crc kubenswrapper[5050]: I1123 14:54:17.722667 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13fb76cc-ba40-4b0d-85cf-73a8fcd56f36-bundle" (OuterVolumeSpecName: "bundle") pod "13fb76cc-ba40-4b0d-85cf-73a8fcd56f36" (UID: "13fb76cc-ba40-4b0d-85cf-73a8fcd56f36"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:54:17 crc kubenswrapper[5050]: I1123 14:54:17.729347 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13fb76cc-ba40-4b0d-85cf-73a8fcd56f36-kube-api-access-w9bjd" (OuterVolumeSpecName: "kube-api-access-w9bjd") pod "13fb76cc-ba40-4b0d-85cf-73a8fcd56f36" (UID: "13fb76cc-ba40-4b0d-85cf-73a8fcd56f36"). InnerVolumeSpecName "kube-api-access-w9bjd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:54:17 crc kubenswrapper[5050]: I1123 14:54:17.823357 5050 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/13fb76cc-ba40-4b0d-85cf-73a8fcd56f36-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 14:54:17 crc kubenswrapper[5050]: I1123 14:54:17.823408 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9bjd\" (UniqueName: \"kubernetes.io/projected/13fb76cc-ba40-4b0d-85cf-73a8fcd56f36-kube-api-access-w9bjd\") on node \"crc\" DevicePath \"\"" Nov 23 14:54:17 crc kubenswrapper[5050]: I1123 14:54:17.892073 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13fb76cc-ba40-4b0d-85cf-73a8fcd56f36-util" (OuterVolumeSpecName: "util") pod "13fb76cc-ba40-4b0d-85cf-73a8fcd56f36" (UID: "13fb76cc-ba40-4b0d-85cf-73a8fcd56f36"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:54:17 crc kubenswrapper[5050]: I1123 14:54:17.925955 5050 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/13fb76cc-ba40-4b0d-85cf-73a8fcd56f36-util\") on node \"crc\" DevicePath \"\"" Nov 23 14:54:18 crc kubenswrapper[5050]: I1123 14:54:18.234863 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d" event={"ID":"13fb76cc-ba40-4b0d-85cf-73a8fcd56f36","Type":"ContainerDied","Data":"9a7b018eea729c00a0cb80e58698cd1821942bbc04bde76ee9fcdb9c06a0e291"} Nov 23 14:54:18 crc kubenswrapper[5050]: I1123 14:54:18.235342 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9a7b018eea729c00a0cb80e58698cd1821942bbc04bde76ee9fcdb9c06a0e291" Nov 23 14:54:18 crc kubenswrapper[5050]: I1123 14:54:18.235119 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d" Nov 23 14:54:22 crc kubenswrapper[5050]: I1123 14:54:22.713376 5050 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.464591 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-575dfc8b8d-ztv7m"] Nov 23 14:54:26 crc kubenswrapper[5050]: E1123 14:54:26.465325 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13fb76cc-ba40-4b0d-85cf-73a8fcd56f36" containerName="util" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.465342 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="13fb76cc-ba40-4b0d-85cf-73a8fcd56f36" containerName="util" Nov 23 14:54:26 crc kubenswrapper[5050]: E1123 14:54:26.465362 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13fb76cc-ba40-4b0d-85cf-73a8fcd56f36" containerName="extract" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.465370 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="13fb76cc-ba40-4b0d-85cf-73a8fcd56f36" containerName="extract" Nov 23 14:54:26 crc kubenswrapper[5050]: E1123 14:54:26.465381 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13fb76cc-ba40-4b0d-85cf-73a8fcd56f36" containerName="pull" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.465388 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="13fb76cc-ba40-4b0d-85cf-73a8fcd56f36" containerName="pull" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.465539 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="13fb76cc-ba40-4b0d-85cf-73a8fcd56f36" containerName="extract" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.466055 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-575dfc8b8d-ztv7m" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.474946 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.475268 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.475427 5050 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.478484 5050 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-5jhmb" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.482104 5050 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.489073 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-575dfc8b8d-ztv7m"] Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.660558 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6khgt\" (UniqueName: \"kubernetes.io/projected/7492d5b8-beb8-4678-bb7b-80dab041dcfc-kube-api-access-6khgt\") pod \"metallb-operator-controller-manager-575dfc8b8d-ztv7m\" (UID: \"7492d5b8-beb8-4678-bb7b-80dab041dcfc\") " pod="metallb-system/metallb-operator-controller-manager-575dfc8b8d-ztv7m" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.660937 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7492d5b8-beb8-4678-bb7b-80dab041dcfc-apiservice-cert\") pod \"metallb-operator-controller-manager-575dfc8b8d-ztv7m\" (UID: \"7492d5b8-beb8-4678-bb7b-80dab041dcfc\") " pod="metallb-system/metallb-operator-controller-manager-575dfc8b8d-ztv7m" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.661013 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7492d5b8-beb8-4678-bb7b-80dab041dcfc-webhook-cert\") pod \"metallb-operator-controller-manager-575dfc8b8d-ztv7m\" (UID: \"7492d5b8-beb8-4678-bb7b-80dab041dcfc\") " pod="metallb-system/metallb-operator-controller-manager-575dfc8b8d-ztv7m" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.761936 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7492d5b8-beb8-4678-bb7b-80dab041dcfc-apiservice-cert\") pod \"metallb-operator-controller-manager-575dfc8b8d-ztv7m\" (UID: \"7492d5b8-beb8-4678-bb7b-80dab041dcfc\") " pod="metallb-system/metallb-operator-controller-manager-575dfc8b8d-ztv7m" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.762023 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7492d5b8-beb8-4678-bb7b-80dab041dcfc-webhook-cert\") pod \"metallb-operator-controller-manager-575dfc8b8d-ztv7m\" (UID: \"7492d5b8-beb8-4678-bb7b-80dab041dcfc\") " pod="metallb-system/metallb-operator-controller-manager-575dfc8b8d-ztv7m" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.762060 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6khgt\" (UniqueName: \"kubernetes.io/projected/7492d5b8-beb8-4678-bb7b-80dab041dcfc-kube-api-access-6khgt\") pod \"metallb-operator-controller-manager-575dfc8b8d-ztv7m\" (UID: \"7492d5b8-beb8-4678-bb7b-80dab041dcfc\") " pod="metallb-system/metallb-operator-controller-manager-575dfc8b8d-ztv7m" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.762385 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-777d788784-h2285"] Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.763363 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-777d788784-h2285" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.766059 5050 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.766339 5050 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.766592 5050 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-mp7vk" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.772323 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7492d5b8-beb8-4678-bb7b-80dab041dcfc-webhook-cert\") pod \"metallb-operator-controller-manager-575dfc8b8d-ztv7m\" (UID: \"7492d5b8-beb8-4678-bb7b-80dab041dcfc\") " pod="metallb-system/metallb-operator-controller-manager-575dfc8b8d-ztv7m" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.772843 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7492d5b8-beb8-4678-bb7b-80dab041dcfc-apiservice-cert\") pod \"metallb-operator-controller-manager-575dfc8b8d-ztv7m\" (UID: \"7492d5b8-beb8-4678-bb7b-80dab041dcfc\") " pod="metallb-system/metallb-operator-controller-manager-575dfc8b8d-ztv7m" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.784257 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-777d788784-h2285"] Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.793124 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6khgt\" (UniqueName: \"kubernetes.io/projected/7492d5b8-beb8-4678-bb7b-80dab041dcfc-kube-api-access-6khgt\") pod \"metallb-operator-controller-manager-575dfc8b8d-ztv7m\" (UID: \"7492d5b8-beb8-4678-bb7b-80dab041dcfc\") " pod="metallb-system/metallb-operator-controller-manager-575dfc8b8d-ztv7m" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.863661 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c5e8fb3c-c4fd-43e6-9def-6903243d2daa-apiservice-cert\") pod \"metallb-operator-webhook-server-777d788784-h2285\" (UID: \"c5e8fb3c-c4fd-43e6-9def-6903243d2daa\") " pod="metallb-system/metallb-operator-webhook-server-777d788784-h2285" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.863712 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t8v7q\" (UniqueName: \"kubernetes.io/projected/c5e8fb3c-c4fd-43e6-9def-6903243d2daa-kube-api-access-t8v7q\") pod \"metallb-operator-webhook-server-777d788784-h2285\" (UID: \"c5e8fb3c-c4fd-43e6-9def-6903243d2daa\") " pod="metallb-system/metallb-operator-webhook-server-777d788784-h2285" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.863760 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c5e8fb3c-c4fd-43e6-9def-6903243d2daa-webhook-cert\") pod \"metallb-operator-webhook-server-777d788784-h2285\" (UID: \"c5e8fb3c-c4fd-43e6-9def-6903243d2daa\") " pod="metallb-system/metallb-operator-webhook-server-777d788784-h2285" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.965966 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c5e8fb3c-c4fd-43e6-9def-6903243d2daa-webhook-cert\") pod \"metallb-operator-webhook-server-777d788784-h2285\" (UID: \"c5e8fb3c-c4fd-43e6-9def-6903243d2daa\") " pod="metallb-system/metallb-operator-webhook-server-777d788784-h2285" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.966350 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c5e8fb3c-c4fd-43e6-9def-6903243d2daa-apiservice-cert\") pod \"metallb-operator-webhook-server-777d788784-h2285\" (UID: \"c5e8fb3c-c4fd-43e6-9def-6903243d2daa\") " pod="metallb-system/metallb-operator-webhook-server-777d788784-h2285" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.966400 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8v7q\" (UniqueName: \"kubernetes.io/projected/c5e8fb3c-c4fd-43e6-9def-6903243d2daa-kube-api-access-t8v7q\") pod \"metallb-operator-webhook-server-777d788784-h2285\" (UID: \"c5e8fb3c-c4fd-43e6-9def-6903243d2daa\") " pod="metallb-system/metallb-operator-webhook-server-777d788784-h2285" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.970232 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c5e8fb3c-c4fd-43e6-9def-6903243d2daa-webhook-cert\") pod \"metallb-operator-webhook-server-777d788784-h2285\" (UID: \"c5e8fb3c-c4fd-43e6-9def-6903243d2daa\") " pod="metallb-system/metallb-operator-webhook-server-777d788784-h2285" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.985966 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c5e8fb3c-c4fd-43e6-9def-6903243d2daa-apiservice-cert\") pod \"metallb-operator-webhook-server-777d788784-h2285\" (UID: \"c5e8fb3c-c4fd-43e6-9def-6903243d2daa\") " pod="metallb-system/metallb-operator-webhook-server-777d788784-h2285" Nov 23 14:54:26 crc kubenswrapper[5050]: I1123 14:54:26.986861 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t8v7q\" (UniqueName: \"kubernetes.io/projected/c5e8fb3c-c4fd-43e6-9def-6903243d2daa-kube-api-access-t8v7q\") pod \"metallb-operator-webhook-server-777d788784-h2285\" (UID: \"c5e8fb3c-c4fd-43e6-9def-6903243d2daa\") " pod="metallb-system/metallb-operator-webhook-server-777d788784-h2285" Nov 23 14:54:27 crc kubenswrapper[5050]: I1123 14:54:27.085388 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-575dfc8b8d-ztv7m" Nov 23 14:54:27 crc kubenswrapper[5050]: I1123 14:54:27.120481 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-777d788784-h2285" Nov 23 14:54:27 crc kubenswrapper[5050]: I1123 14:54:27.623250 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-575dfc8b8d-ztv7m"] Nov 23 14:54:27 crc kubenswrapper[5050]: W1123 14:54:27.645465 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7492d5b8_beb8_4678_bb7b_80dab041dcfc.slice/crio-722d6401da6b9e2e2fdd85cfe471038b81d25c03aa9b0d972763a589969c7117 WatchSource:0}: Error finding container 722d6401da6b9e2e2fdd85cfe471038b81d25c03aa9b0d972763a589969c7117: Status 404 returned error can't find the container with id 722d6401da6b9e2e2fdd85cfe471038b81d25c03aa9b0d972763a589969c7117 Nov 23 14:54:27 crc kubenswrapper[5050]: I1123 14:54:27.698987 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-777d788784-h2285"] Nov 23 14:54:27 crc kubenswrapper[5050]: W1123 14:54:27.710308 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc5e8fb3c_c4fd_43e6_9def_6903243d2daa.slice/crio-78671e2dc5f7a49cd2b752aa0f6aee0faa5e8a629c6088326446c65ce710330e WatchSource:0}: Error finding container 78671e2dc5f7a49cd2b752aa0f6aee0faa5e8a629c6088326446c65ce710330e: Status 404 returned error can't find the container with id 78671e2dc5f7a49cd2b752aa0f6aee0faa5e8a629c6088326446c65ce710330e Nov 23 14:54:28 crc kubenswrapper[5050]: I1123 14:54:28.314474 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-777d788784-h2285" event={"ID":"c5e8fb3c-c4fd-43e6-9def-6903243d2daa","Type":"ContainerStarted","Data":"78671e2dc5f7a49cd2b752aa0f6aee0faa5e8a629c6088326446c65ce710330e"} Nov 23 14:54:28 crc kubenswrapper[5050]: I1123 14:54:28.315765 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-575dfc8b8d-ztv7m" event={"ID":"7492d5b8-beb8-4678-bb7b-80dab041dcfc","Type":"ContainerStarted","Data":"722d6401da6b9e2e2fdd85cfe471038b81d25c03aa9b0d972763a589969c7117"} Nov 23 14:54:29 crc kubenswrapper[5050]: I1123 14:54:29.224262 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 14:54:29 crc kubenswrapper[5050]: I1123 14:54:29.224334 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 14:54:29 crc kubenswrapper[5050]: I1123 14:54:29.224387 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 14:54:29 crc kubenswrapper[5050]: I1123 14:54:29.225027 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f192bfe46cd44b6e123a66e929ac2df13733eb956bf55d1780f3aab83a1b4eec"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 14:54:29 crc kubenswrapper[5050]: I1123 14:54:29.225117 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://f192bfe46cd44b6e123a66e929ac2df13733eb956bf55d1780f3aab83a1b4eec" gracePeriod=600 Nov 23 14:54:30 crc kubenswrapper[5050]: I1123 14:54:30.338871 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="f192bfe46cd44b6e123a66e929ac2df13733eb956bf55d1780f3aab83a1b4eec" exitCode=0 Nov 23 14:54:30 crc kubenswrapper[5050]: I1123 14:54:30.339348 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"f192bfe46cd44b6e123a66e929ac2df13733eb956bf55d1780f3aab83a1b4eec"} Nov 23 14:54:30 crc kubenswrapper[5050]: I1123 14:54:30.339414 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"1b270f8e19ea537b9c90473c9519c04a4a641a83e215048ece7d9c5619e16b60"} Nov 23 14:54:30 crc kubenswrapper[5050]: I1123 14:54:30.339435 5050 scope.go:117] "RemoveContainer" containerID="496733c812e2a0aa8f64801990fbfc3b73c223752a44cbf12486cae5f9ea9838" Nov 23 14:54:31 crc kubenswrapper[5050]: I1123 14:54:31.347362 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-575dfc8b8d-ztv7m" event={"ID":"7492d5b8-beb8-4678-bb7b-80dab041dcfc","Type":"ContainerStarted","Data":"b2b8e7509f72491abd063b16ccb32b2643c632bb40f0bae32d4f467dfa076bfc"} Nov 23 14:54:31 crc kubenswrapper[5050]: I1123 14:54:31.349689 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-575dfc8b8d-ztv7m" Nov 23 14:54:31 crc kubenswrapper[5050]: I1123 14:54:31.384541 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-575dfc8b8d-ztv7m" podStartSLOduration=2.089701609 podStartE2EDuration="5.384515998s" podCreationTimestamp="2025-11-23 14:54:26 +0000 UTC" firstStartedPulling="2025-11-23 14:54:27.649303018 +0000 UTC m=+762.816299513" lastFinishedPulling="2025-11-23 14:54:30.944117417 +0000 UTC m=+766.111113902" observedRunningTime="2025-11-23 14:54:31.383857309 +0000 UTC m=+766.550853794" watchObservedRunningTime="2025-11-23 14:54:31.384515998 +0000 UTC m=+766.551512483" Nov 23 14:54:33 crc kubenswrapper[5050]: I1123 14:54:33.367276 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-777d788784-h2285" event={"ID":"c5e8fb3c-c4fd-43e6-9def-6903243d2daa","Type":"ContainerStarted","Data":"f177586ba89375de2274c970344b6294d3e1852cf06418b96c7842000d1f9b0f"} Nov 23 14:54:33 crc kubenswrapper[5050]: I1123 14:54:33.367817 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-777d788784-h2285" Nov 23 14:54:33 crc kubenswrapper[5050]: I1123 14:54:33.391352 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-777d788784-h2285" podStartSLOduration=2.214888707 podStartE2EDuration="7.391320515s" podCreationTimestamp="2025-11-23 14:54:26 +0000 UTC" firstStartedPulling="2025-11-23 14:54:27.715921323 +0000 UTC m=+762.882917828" lastFinishedPulling="2025-11-23 14:54:32.892353111 +0000 UTC m=+768.059349636" observedRunningTime="2025-11-23 14:54:33.387604096 +0000 UTC m=+768.554600591" watchObservedRunningTime="2025-11-23 14:54:33.391320515 +0000 UTC m=+768.558317020" Nov 23 14:54:47 crc kubenswrapper[5050]: I1123 14:54:47.126900 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-777d788784-h2285" Nov 23 14:54:52 crc kubenswrapper[5050]: I1123 14:54:52.781925 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vqf5v"] Nov 23 14:54:52 crc kubenswrapper[5050]: I1123 14:54:52.802617 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vqf5v"] Nov 23 14:54:52 crc kubenswrapper[5050]: I1123 14:54:52.802903 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vqf5v" Nov 23 14:54:52 crc kubenswrapper[5050]: I1123 14:54:52.996029 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7783e2e2-3449-499f-8ee3-a1e4c2a467ba-catalog-content\") pod \"community-operators-vqf5v\" (UID: \"7783e2e2-3449-499f-8ee3-a1e4c2a467ba\") " pod="openshift-marketplace/community-operators-vqf5v" Nov 23 14:54:52 crc kubenswrapper[5050]: I1123 14:54:52.996100 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7783e2e2-3449-499f-8ee3-a1e4c2a467ba-utilities\") pod \"community-operators-vqf5v\" (UID: \"7783e2e2-3449-499f-8ee3-a1e4c2a467ba\") " pod="openshift-marketplace/community-operators-vqf5v" Nov 23 14:54:52 crc kubenswrapper[5050]: I1123 14:54:52.996188 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7cvz\" (UniqueName: \"kubernetes.io/projected/7783e2e2-3449-499f-8ee3-a1e4c2a467ba-kube-api-access-x7cvz\") pod \"community-operators-vqf5v\" (UID: \"7783e2e2-3449-499f-8ee3-a1e4c2a467ba\") " pod="openshift-marketplace/community-operators-vqf5v" Nov 23 14:54:53 crc kubenswrapper[5050]: I1123 14:54:53.097742 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7cvz\" (UniqueName: \"kubernetes.io/projected/7783e2e2-3449-499f-8ee3-a1e4c2a467ba-kube-api-access-x7cvz\") pod \"community-operators-vqf5v\" (UID: \"7783e2e2-3449-499f-8ee3-a1e4c2a467ba\") " pod="openshift-marketplace/community-operators-vqf5v" Nov 23 14:54:53 crc kubenswrapper[5050]: I1123 14:54:53.097869 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7783e2e2-3449-499f-8ee3-a1e4c2a467ba-catalog-content\") pod \"community-operators-vqf5v\" (UID: \"7783e2e2-3449-499f-8ee3-a1e4c2a467ba\") " pod="openshift-marketplace/community-operators-vqf5v" Nov 23 14:54:53 crc kubenswrapper[5050]: I1123 14:54:53.097910 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7783e2e2-3449-499f-8ee3-a1e4c2a467ba-utilities\") pod \"community-operators-vqf5v\" (UID: \"7783e2e2-3449-499f-8ee3-a1e4c2a467ba\") " pod="openshift-marketplace/community-operators-vqf5v" Nov 23 14:54:53 crc kubenswrapper[5050]: I1123 14:54:53.098772 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7783e2e2-3449-499f-8ee3-a1e4c2a467ba-utilities\") pod \"community-operators-vqf5v\" (UID: \"7783e2e2-3449-499f-8ee3-a1e4c2a467ba\") " pod="openshift-marketplace/community-operators-vqf5v" Nov 23 14:54:53 crc kubenswrapper[5050]: I1123 14:54:53.099644 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7783e2e2-3449-499f-8ee3-a1e4c2a467ba-catalog-content\") pod \"community-operators-vqf5v\" (UID: \"7783e2e2-3449-499f-8ee3-a1e4c2a467ba\") " pod="openshift-marketplace/community-operators-vqf5v" Nov 23 14:54:53 crc kubenswrapper[5050]: I1123 14:54:53.126233 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7cvz\" (UniqueName: \"kubernetes.io/projected/7783e2e2-3449-499f-8ee3-a1e4c2a467ba-kube-api-access-x7cvz\") pod \"community-operators-vqf5v\" (UID: \"7783e2e2-3449-499f-8ee3-a1e4c2a467ba\") " pod="openshift-marketplace/community-operators-vqf5v" Nov 23 14:54:53 crc kubenswrapper[5050]: I1123 14:54:53.140113 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vqf5v" Nov 23 14:54:53 crc kubenswrapper[5050]: I1123 14:54:53.618154 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vqf5v"] Nov 23 14:54:53 crc kubenswrapper[5050]: W1123 14:54:53.633829 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7783e2e2_3449_499f_8ee3_a1e4c2a467ba.slice/crio-4eefe9c789cdade75b689f547ec5ee55948b48e47cdaa86e066a46bd3eb0c090 WatchSource:0}: Error finding container 4eefe9c789cdade75b689f547ec5ee55948b48e47cdaa86e066a46bd3eb0c090: Status 404 returned error can't find the container with id 4eefe9c789cdade75b689f547ec5ee55948b48e47cdaa86e066a46bd3eb0c090 Nov 23 14:54:54 crc kubenswrapper[5050]: I1123 14:54:54.528245 5050 generic.go:334] "Generic (PLEG): container finished" podID="7783e2e2-3449-499f-8ee3-a1e4c2a467ba" containerID="314ef357731afb5e6b372ad219c646e48a9d3bc1d51fac8db7ba26f1af6b1f36" exitCode=0 Nov 23 14:54:54 crc kubenswrapper[5050]: I1123 14:54:54.528348 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vqf5v" event={"ID":"7783e2e2-3449-499f-8ee3-a1e4c2a467ba","Type":"ContainerDied","Data":"314ef357731afb5e6b372ad219c646e48a9d3bc1d51fac8db7ba26f1af6b1f36"} Nov 23 14:54:54 crc kubenswrapper[5050]: I1123 14:54:54.529333 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vqf5v" event={"ID":"7783e2e2-3449-499f-8ee3-a1e4c2a467ba","Type":"ContainerStarted","Data":"4eefe9c789cdade75b689f547ec5ee55948b48e47cdaa86e066a46bd3eb0c090"} Nov 23 14:54:55 crc kubenswrapper[5050]: I1123 14:54:55.538551 5050 generic.go:334] "Generic (PLEG): container finished" podID="7783e2e2-3449-499f-8ee3-a1e4c2a467ba" containerID="708cdf9f4a9d8f87493fcedf8defb54d06e0d18f727fe27b8d52213beee3a1c6" exitCode=0 Nov 23 14:54:55 crc kubenswrapper[5050]: I1123 14:54:55.538676 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vqf5v" event={"ID":"7783e2e2-3449-499f-8ee3-a1e4c2a467ba","Type":"ContainerDied","Data":"708cdf9f4a9d8f87493fcedf8defb54d06e0d18f727fe27b8d52213beee3a1c6"} Nov 23 14:54:56 crc kubenswrapper[5050]: I1123 14:54:56.547283 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vqf5v" event={"ID":"7783e2e2-3449-499f-8ee3-a1e4c2a467ba","Type":"ContainerStarted","Data":"e57da42cd5f115b66f2a85750b133311b0cf91f59802b000237ee9ad324803d3"} Nov 23 14:54:56 crc kubenswrapper[5050]: I1123 14:54:56.572615 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vqf5v" podStartSLOduration=3.16838151 podStartE2EDuration="4.572591253s" podCreationTimestamp="2025-11-23 14:54:52 +0000 UTC" firstStartedPulling="2025-11-23 14:54:54.530704943 +0000 UTC m=+789.697701468" lastFinishedPulling="2025-11-23 14:54:55.934914696 +0000 UTC m=+791.101911211" observedRunningTime="2025-11-23 14:54:56.570369378 +0000 UTC m=+791.737365863" watchObservedRunningTime="2025-11-23 14:54:56.572591253 +0000 UTC m=+791.739587738" Nov 23 14:55:03 crc kubenswrapper[5050]: I1123 14:55:03.141030 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vqf5v" Nov 23 14:55:03 crc kubenswrapper[5050]: I1123 14:55:03.143628 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vqf5v" Nov 23 14:55:03 crc kubenswrapper[5050]: I1123 14:55:03.222273 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vqf5v" Nov 23 14:55:03 crc kubenswrapper[5050]: I1123 14:55:03.670212 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vqf5v" Nov 23 14:55:03 crc kubenswrapper[5050]: I1123 14:55:03.740700 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vqf5v"] Nov 23 14:55:05 crc kubenswrapper[5050]: I1123 14:55:05.610407 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vqf5v" podUID="7783e2e2-3449-499f-8ee3-a1e4c2a467ba" containerName="registry-server" containerID="cri-o://e57da42cd5f115b66f2a85750b133311b0cf91f59802b000237ee9ad324803d3" gracePeriod=2 Nov 23 14:55:06 crc kubenswrapper[5050]: I1123 14:55:06.090907 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vqf5v" Nov 23 14:55:06 crc kubenswrapper[5050]: I1123 14:55:06.103812 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7783e2e2-3449-499f-8ee3-a1e4c2a467ba-catalog-content\") pod \"7783e2e2-3449-499f-8ee3-a1e4c2a467ba\" (UID: \"7783e2e2-3449-499f-8ee3-a1e4c2a467ba\") " Nov 23 14:55:06 crc kubenswrapper[5050]: I1123 14:55:06.103933 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7cvz\" (UniqueName: \"kubernetes.io/projected/7783e2e2-3449-499f-8ee3-a1e4c2a467ba-kube-api-access-x7cvz\") pod \"7783e2e2-3449-499f-8ee3-a1e4c2a467ba\" (UID: \"7783e2e2-3449-499f-8ee3-a1e4c2a467ba\") " Nov 23 14:55:06 crc kubenswrapper[5050]: I1123 14:55:06.103984 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7783e2e2-3449-499f-8ee3-a1e4c2a467ba-utilities\") pod \"7783e2e2-3449-499f-8ee3-a1e4c2a467ba\" (UID: \"7783e2e2-3449-499f-8ee3-a1e4c2a467ba\") " Nov 23 14:55:06 crc kubenswrapper[5050]: I1123 14:55:06.106895 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7783e2e2-3449-499f-8ee3-a1e4c2a467ba-utilities" (OuterVolumeSpecName: "utilities") pod "7783e2e2-3449-499f-8ee3-a1e4c2a467ba" (UID: "7783e2e2-3449-499f-8ee3-a1e4c2a467ba"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:55:06 crc kubenswrapper[5050]: I1123 14:55:06.126809 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7783e2e2-3449-499f-8ee3-a1e4c2a467ba-kube-api-access-x7cvz" (OuterVolumeSpecName: "kube-api-access-x7cvz") pod "7783e2e2-3449-499f-8ee3-a1e4c2a467ba" (UID: "7783e2e2-3449-499f-8ee3-a1e4c2a467ba"). InnerVolumeSpecName "kube-api-access-x7cvz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:55:06 crc kubenswrapper[5050]: I1123 14:55:06.205182 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7cvz\" (UniqueName: \"kubernetes.io/projected/7783e2e2-3449-499f-8ee3-a1e4c2a467ba-kube-api-access-x7cvz\") on node \"crc\" DevicePath \"\"" Nov 23 14:55:06 crc kubenswrapper[5050]: I1123 14:55:06.205219 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7783e2e2-3449-499f-8ee3-a1e4c2a467ba-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 14:55:06 crc kubenswrapper[5050]: I1123 14:55:06.498823 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7783e2e2-3449-499f-8ee3-a1e4c2a467ba-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7783e2e2-3449-499f-8ee3-a1e4c2a467ba" (UID: "7783e2e2-3449-499f-8ee3-a1e4c2a467ba"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:55:06 crc kubenswrapper[5050]: I1123 14:55:06.508953 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7783e2e2-3449-499f-8ee3-a1e4c2a467ba-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 14:55:06 crc kubenswrapper[5050]: I1123 14:55:06.618559 5050 generic.go:334] "Generic (PLEG): container finished" podID="7783e2e2-3449-499f-8ee3-a1e4c2a467ba" containerID="e57da42cd5f115b66f2a85750b133311b0cf91f59802b000237ee9ad324803d3" exitCode=0 Nov 23 14:55:06 crc kubenswrapper[5050]: I1123 14:55:06.618629 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vqf5v" event={"ID":"7783e2e2-3449-499f-8ee3-a1e4c2a467ba","Type":"ContainerDied","Data":"e57da42cd5f115b66f2a85750b133311b0cf91f59802b000237ee9ad324803d3"} Nov 23 14:55:06 crc kubenswrapper[5050]: I1123 14:55:06.618657 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vqf5v" Nov 23 14:55:06 crc kubenswrapper[5050]: I1123 14:55:06.618673 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vqf5v" event={"ID":"7783e2e2-3449-499f-8ee3-a1e4c2a467ba","Type":"ContainerDied","Data":"4eefe9c789cdade75b689f547ec5ee55948b48e47cdaa86e066a46bd3eb0c090"} Nov 23 14:55:06 crc kubenswrapper[5050]: I1123 14:55:06.618714 5050 scope.go:117] "RemoveContainer" containerID="e57da42cd5f115b66f2a85750b133311b0cf91f59802b000237ee9ad324803d3" Nov 23 14:55:06 crc kubenswrapper[5050]: I1123 14:55:06.648052 5050 scope.go:117] "RemoveContainer" containerID="708cdf9f4a9d8f87493fcedf8defb54d06e0d18f727fe27b8d52213beee3a1c6" Nov 23 14:55:06 crc kubenswrapper[5050]: I1123 14:55:06.662150 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vqf5v"] Nov 23 14:55:06 crc kubenswrapper[5050]: I1123 14:55:06.664951 5050 scope.go:117] "RemoveContainer" containerID="314ef357731afb5e6b372ad219c646e48a9d3bc1d51fac8db7ba26f1af6b1f36" Nov 23 14:55:06 crc kubenswrapper[5050]: I1123 14:55:06.665282 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vqf5v"] Nov 23 14:55:06 crc kubenswrapper[5050]: I1123 14:55:06.683077 5050 scope.go:117] "RemoveContainer" containerID="e57da42cd5f115b66f2a85750b133311b0cf91f59802b000237ee9ad324803d3" Nov 23 14:55:06 crc kubenswrapper[5050]: E1123 14:55:06.683810 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e57da42cd5f115b66f2a85750b133311b0cf91f59802b000237ee9ad324803d3\": container with ID starting with e57da42cd5f115b66f2a85750b133311b0cf91f59802b000237ee9ad324803d3 not found: ID does not exist" containerID="e57da42cd5f115b66f2a85750b133311b0cf91f59802b000237ee9ad324803d3" Nov 23 14:55:06 crc kubenswrapper[5050]: I1123 14:55:06.683845 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e57da42cd5f115b66f2a85750b133311b0cf91f59802b000237ee9ad324803d3"} err="failed to get container status \"e57da42cd5f115b66f2a85750b133311b0cf91f59802b000237ee9ad324803d3\": rpc error: code = NotFound desc = could not find container \"e57da42cd5f115b66f2a85750b133311b0cf91f59802b000237ee9ad324803d3\": container with ID starting with e57da42cd5f115b66f2a85750b133311b0cf91f59802b000237ee9ad324803d3 not found: ID does not exist" Nov 23 14:55:06 crc kubenswrapper[5050]: I1123 14:55:06.683877 5050 scope.go:117] "RemoveContainer" containerID="708cdf9f4a9d8f87493fcedf8defb54d06e0d18f727fe27b8d52213beee3a1c6" Nov 23 14:55:06 crc kubenswrapper[5050]: E1123 14:55:06.684296 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"708cdf9f4a9d8f87493fcedf8defb54d06e0d18f727fe27b8d52213beee3a1c6\": container with ID starting with 708cdf9f4a9d8f87493fcedf8defb54d06e0d18f727fe27b8d52213beee3a1c6 not found: ID does not exist" containerID="708cdf9f4a9d8f87493fcedf8defb54d06e0d18f727fe27b8d52213beee3a1c6" Nov 23 14:55:06 crc kubenswrapper[5050]: I1123 14:55:06.684360 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"708cdf9f4a9d8f87493fcedf8defb54d06e0d18f727fe27b8d52213beee3a1c6"} err="failed to get container status \"708cdf9f4a9d8f87493fcedf8defb54d06e0d18f727fe27b8d52213beee3a1c6\": rpc error: code = NotFound desc = could not find container \"708cdf9f4a9d8f87493fcedf8defb54d06e0d18f727fe27b8d52213beee3a1c6\": container with ID starting with 708cdf9f4a9d8f87493fcedf8defb54d06e0d18f727fe27b8d52213beee3a1c6 not found: ID does not exist" Nov 23 14:55:06 crc kubenswrapper[5050]: I1123 14:55:06.684412 5050 scope.go:117] "RemoveContainer" containerID="314ef357731afb5e6b372ad219c646e48a9d3bc1d51fac8db7ba26f1af6b1f36" Nov 23 14:55:06 crc kubenswrapper[5050]: E1123 14:55:06.684851 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"314ef357731afb5e6b372ad219c646e48a9d3bc1d51fac8db7ba26f1af6b1f36\": container with ID starting with 314ef357731afb5e6b372ad219c646e48a9d3bc1d51fac8db7ba26f1af6b1f36 not found: ID does not exist" containerID="314ef357731afb5e6b372ad219c646e48a9d3bc1d51fac8db7ba26f1af6b1f36" Nov 23 14:55:06 crc kubenswrapper[5050]: I1123 14:55:06.684881 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"314ef357731afb5e6b372ad219c646e48a9d3bc1d51fac8db7ba26f1af6b1f36"} err="failed to get container status \"314ef357731afb5e6b372ad219c646e48a9d3bc1d51fac8db7ba26f1af6b1f36\": rpc error: code = NotFound desc = could not find container \"314ef357731afb5e6b372ad219c646e48a9d3bc1d51fac8db7ba26f1af6b1f36\": container with ID starting with 314ef357731afb5e6b372ad219c646e48a9d3bc1d51fac8db7ba26f1af6b1f36 not found: ID does not exist" Nov 23 14:55:07 crc kubenswrapper[5050]: I1123 14:55:07.091792 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-575dfc8b8d-ztv7m" Nov 23 14:55:07 crc kubenswrapper[5050]: I1123 14:55:07.559141 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7783e2e2-3449-499f-8ee3-a1e4c2a467ba" path="/var/lib/kubelet/pods/7783e2e2-3449-499f-8ee3-a1e4c2a467ba/volumes" Nov 23 14:55:07 crc kubenswrapper[5050]: I1123 14:55:07.968904 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-sn9hl"] Nov 23 14:55:07 crc kubenswrapper[5050]: E1123 14:55:07.969628 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7783e2e2-3449-499f-8ee3-a1e4c2a467ba" containerName="extract-utilities" Nov 23 14:55:07 crc kubenswrapper[5050]: I1123 14:55:07.969646 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="7783e2e2-3449-499f-8ee3-a1e4c2a467ba" containerName="extract-utilities" Nov 23 14:55:07 crc kubenswrapper[5050]: E1123 14:55:07.969656 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7783e2e2-3449-499f-8ee3-a1e4c2a467ba" containerName="registry-server" Nov 23 14:55:07 crc kubenswrapper[5050]: I1123 14:55:07.969668 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="7783e2e2-3449-499f-8ee3-a1e4c2a467ba" containerName="registry-server" Nov 23 14:55:07 crc kubenswrapper[5050]: E1123 14:55:07.969687 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7783e2e2-3449-499f-8ee3-a1e4c2a467ba" containerName="extract-content" Nov 23 14:55:07 crc kubenswrapper[5050]: I1123 14:55:07.969696 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="7783e2e2-3449-499f-8ee3-a1e4c2a467ba" containerName="extract-content" Nov 23 14:55:07 crc kubenswrapper[5050]: I1123 14:55:07.969833 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="7783e2e2-3449-499f-8ee3-a1e4c2a467ba" containerName="registry-server" Nov 23 14:55:07 crc kubenswrapper[5050]: I1123 14:55:07.972291 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:07 crc kubenswrapper[5050]: I1123 14:55:07.974413 5050 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 23 14:55:07 crc kubenswrapper[5050]: I1123 14:55:07.974544 5050 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-4jksq" Nov 23 14:55:07 crc kubenswrapper[5050]: I1123 14:55:07.980544 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-c2rcn"] Nov 23 14:55:07 crc kubenswrapper[5050]: I1123 14:55:07.981495 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-c2rcn" Nov 23 14:55:07 crc kubenswrapper[5050]: I1123 14:55:07.983293 5050 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 23 14:55:07 crc kubenswrapper[5050]: I1123 14:55:07.989117 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 23 14:55:07 crc kubenswrapper[5050]: I1123 14:55:07.999259 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-c2rcn"] Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.035499 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4e8f95ac-d360-4b81-9bce-28577dce8e4c-metrics-certs\") pod \"frr-k8s-sn9hl\" (UID: \"4e8f95ac-d360-4b81-9bce-28577dce8e4c\") " pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.035549 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8k4x2\" (UniqueName: \"kubernetes.io/projected/4e8f95ac-d360-4b81-9bce-28577dce8e4c-kube-api-access-8k4x2\") pod \"frr-k8s-sn9hl\" (UID: \"4e8f95ac-d360-4b81-9bce-28577dce8e4c\") " pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.035589 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s2xbz\" (UniqueName: \"kubernetes.io/projected/4c8fffca-2f1a-4b47-beb9-66bf0c19f8e0-kube-api-access-s2xbz\") pod \"frr-k8s-webhook-server-6998585d5-c2rcn\" (UID: \"4c8fffca-2f1a-4b47-beb9-66bf0c19f8e0\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-c2rcn" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.035622 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4c8fffca-2f1a-4b47-beb9-66bf0c19f8e0-cert\") pod \"frr-k8s-webhook-server-6998585d5-c2rcn\" (UID: \"4c8fffca-2f1a-4b47-beb9-66bf0c19f8e0\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-c2rcn" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.035660 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/4e8f95ac-d360-4b81-9bce-28577dce8e4c-reloader\") pod \"frr-k8s-sn9hl\" (UID: \"4e8f95ac-d360-4b81-9bce-28577dce8e4c\") " pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.035692 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/4e8f95ac-d360-4b81-9bce-28577dce8e4c-frr-sockets\") pod \"frr-k8s-sn9hl\" (UID: \"4e8f95ac-d360-4b81-9bce-28577dce8e4c\") " pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.035710 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/4e8f95ac-d360-4b81-9bce-28577dce8e4c-frr-startup\") pod \"frr-k8s-sn9hl\" (UID: \"4e8f95ac-d360-4b81-9bce-28577dce8e4c\") " pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.035730 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/4e8f95ac-d360-4b81-9bce-28577dce8e4c-metrics\") pod \"frr-k8s-sn9hl\" (UID: \"4e8f95ac-d360-4b81-9bce-28577dce8e4c\") " pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.035761 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/4e8f95ac-d360-4b81-9bce-28577dce8e4c-frr-conf\") pod \"frr-k8s-sn9hl\" (UID: \"4e8f95ac-d360-4b81-9bce-28577dce8e4c\") " pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.079836 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-dglng"] Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.081063 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-dglng" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.085421 5050 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-6bjm7" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.085720 5050 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.085883 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.086753 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-xvbr9"] Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.088088 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-xvbr9" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.091056 5050 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.097426 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-xvbr9"] Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.097592 5050 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.137723 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/4e8f95ac-d360-4b81-9bce-28577dce8e4c-frr-sockets\") pod \"frr-k8s-sn9hl\" (UID: \"4e8f95ac-d360-4b81-9bce-28577dce8e4c\") " pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.137776 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/4e8f95ac-d360-4b81-9bce-28577dce8e4c-frr-startup\") pod \"frr-k8s-sn9hl\" (UID: \"4e8f95ac-d360-4b81-9bce-28577dce8e4c\") " pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.137801 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/4e8f95ac-d360-4b81-9bce-28577dce8e4c-metrics\") pod \"frr-k8s-sn9hl\" (UID: \"4e8f95ac-d360-4b81-9bce-28577dce8e4c\") " pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.137841 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vdp9\" (UniqueName: \"kubernetes.io/projected/de22d979-ac94-4839-b266-2dfa8ae79de2-kube-api-access-6vdp9\") pod \"controller-6c7b4b5f48-xvbr9\" (UID: \"de22d979-ac94-4839-b266-2dfa8ae79de2\") " pod="metallb-system/controller-6c7b4b5f48-xvbr9" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.137861 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/4e8f95ac-d360-4b81-9bce-28577dce8e4c-frr-conf\") pod \"frr-k8s-sn9hl\" (UID: \"4e8f95ac-d360-4b81-9bce-28577dce8e4c\") " pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.137886 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a29c3193-350c-4e8d-9c6f-bfb1e96a4527-metrics-certs\") pod \"speaker-dglng\" (UID: \"a29c3193-350c-4e8d-9c6f-bfb1e96a4527\") " pod="metallb-system/speaker-dglng" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.137913 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/a29c3193-350c-4e8d-9c6f-bfb1e96a4527-metallb-excludel2\") pod \"speaker-dglng\" (UID: \"a29c3193-350c-4e8d-9c6f-bfb1e96a4527\") " pod="metallb-system/speaker-dglng" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.137944 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4e8f95ac-d360-4b81-9bce-28577dce8e4c-metrics-certs\") pod \"frr-k8s-sn9hl\" (UID: \"4e8f95ac-d360-4b81-9bce-28577dce8e4c\") " pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.137962 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8k4x2\" (UniqueName: \"kubernetes.io/projected/4e8f95ac-d360-4b81-9bce-28577dce8e4c-kube-api-access-8k4x2\") pod \"frr-k8s-sn9hl\" (UID: \"4e8f95ac-d360-4b81-9bce-28577dce8e4c\") " pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.137984 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/de22d979-ac94-4839-b266-2dfa8ae79de2-metrics-certs\") pod \"controller-6c7b4b5f48-xvbr9\" (UID: \"de22d979-ac94-4839-b266-2dfa8ae79de2\") " pod="metallb-system/controller-6c7b4b5f48-xvbr9" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.138007 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/de22d979-ac94-4839-b266-2dfa8ae79de2-cert\") pod \"controller-6c7b4b5f48-xvbr9\" (UID: \"de22d979-ac94-4839-b266-2dfa8ae79de2\") " pod="metallb-system/controller-6c7b4b5f48-xvbr9" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.138031 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2xbz\" (UniqueName: \"kubernetes.io/projected/4c8fffca-2f1a-4b47-beb9-66bf0c19f8e0-kube-api-access-s2xbz\") pod \"frr-k8s-webhook-server-6998585d5-c2rcn\" (UID: \"4c8fffca-2f1a-4b47-beb9-66bf0c19f8e0\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-c2rcn" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.138053 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29vrj\" (UniqueName: \"kubernetes.io/projected/a29c3193-350c-4e8d-9c6f-bfb1e96a4527-kube-api-access-29vrj\") pod \"speaker-dglng\" (UID: \"a29c3193-350c-4e8d-9c6f-bfb1e96a4527\") " pod="metallb-system/speaker-dglng" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.138081 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4c8fffca-2f1a-4b47-beb9-66bf0c19f8e0-cert\") pod \"frr-k8s-webhook-server-6998585d5-c2rcn\" (UID: \"4c8fffca-2f1a-4b47-beb9-66bf0c19f8e0\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-c2rcn" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.138113 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/4e8f95ac-d360-4b81-9bce-28577dce8e4c-reloader\") pod \"frr-k8s-sn9hl\" (UID: \"4e8f95ac-d360-4b81-9bce-28577dce8e4c\") " pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.138142 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/a29c3193-350c-4e8d-9c6f-bfb1e96a4527-memberlist\") pod \"speaker-dglng\" (UID: \"a29c3193-350c-4e8d-9c6f-bfb1e96a4527\") " pod="metallb-system/speaker-dglng" Nov 23 14:55:08 crc kubenswrapper[5050]: E1123 14:55:08.138329 5050 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Nov 23 14:55:08 crc kubenswrapper[5050]: E1123 14:55:08.138406 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4e8f95ac-d360-4b81-9bce-28577dce8e4c-metrics-certs podName:4e8f95ac-d360-4b81-9bce-28577dce8e4c nodeName:}" failed. No retries permitted until 2025-11-23 14:55:08.638378961 +0000 UTC m=+803.805375446 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/4e8f95ac-d360-4b81-9bce-28577dce8e4c-metrics-certs") pod "frr-k8s-sn9hl" (UID: "4e8f95ac-d360-4b81-9bce-28577dce8e4c") : secret "frr-k8s-certs-secret" not found Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.138407 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/4e8f95ac-d360-4b81-9bce-28577dce8e4c-frr-sockets\") pod \"frr-k8s-sn9hl\" (UID: \"4e8f95ac-d360-4b81-9bce-28577dce8e4c\") " pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.138470 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/4e8f95ac-d360-4b81-9bce-28577dce8e4c-metrics\") pod \"frr-k8s-sn9hl\" (UID: \"4e8f95ac-d360-4b81-9bce-28577dce8e4c\") " pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.138583 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/4e8f95ac-d360-4b81-9bce-28577dce8e4c-frr-conf\") pod \"frr-k8s-sn9hl\" (UID: \"4e8f95ac-d360-4b81-9bce-28577dce8e4c\") " pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.139696 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/4e8f95ac-d360-4b81-9bce-28577dce8e4c-frr-startup\") pod \"frr-k8s-sn9hl\" (UID: \"4e8f95ac-d360-4b81-9bce-28577dce8e4c\") " pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.144448 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4c8fffca-2f1a-4b47-beb9-66bf0c19f8e0-cert\") pod \"frr-k8s-webhook-server-6998585d5-c2rcn\" (UID: \"4c8fffca-2f1a-4b47-beb9-66bf0c19f8e0\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-c2rcn" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.158130 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2xbz\" (UniqueName: \"kubernetes.io/projected/4c8fffca-2f1a-4b47-beb9-66bf0c19f8e0-kube-api-access-s2xbz\") pod \"frr-k8s-webhook-server-6998585d5-c2rcn\" (UID: \"4c8fffca-2f1a-4b47-beb9-66bf0c19f8e0\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-c2rcn" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.161756 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/4e8f95ac-d360-4b81-9bce-28577dce8e4c-reloader\") pod \"frr-k8s-sn9hl\" (UID: \"4e8f95ac-d360-4b81-9bce-28577dce8e4c\") " pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.163489 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8k4x2\" (UniqueName: \"kubernetes.io/projected/4e8f95ac-d360-4b81-9bce-28577dce8e4c-kube-api-access-8k4x2\") pod \"frr-k8s-sn9hl\" (UID: \"4e8f95ac-d360-4b81-9bce-28577dce8e4c\") " pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.239046 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vdp9\" (UniqueName: \"kubernetes.io/projected/de22d979-ac94-4839-b266-2dfa8ae79de2-kube-api-access-6vdp9\") pod \"controller-6c7b4b5f48-xvbr9\" (UID: \"de22d979-ac94-4839-b266-2dfa8ae79de2\") " pod="metallb-system/controller-6c7b4b5f48-xvbr9" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.239103 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a29c3193-350c-4e8d-9c6f-bfb1e96a4527-metrics-certs\") pod \"speaker-dglng\" (UID: \"a29c3193-350c-4e8d-9c6f-bfb1e96a4527\") " pod="metallb-system/speaker-dglng" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.239127 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/a29c3193-350c-4e8d-9c6f-bfb1e96a4527-metallb-excludel2\") pod \"speaker-dglng\" (UID: \"a29c3193-350c-4e8d-9c6f-bfb1e96a4527\") " pod="metallb-system/speaker-dglng" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.239169 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/de22d979-ac94-4839-b266-2dfa8ae79de2-metrics-certs\") pod \"controller-6c7b4b5f48-xvbr9\" (UID: \"de22d979-ac94-4839-b266-2dfa8ae79de2\") " pod="metallb-system/controller-6c7b4b5f48-xvbr9" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.239187 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/de22d979-ac94-4839-b266-2dfa8ae79de2-cert\") pod \"controller-6c7b4b5f48-xvbr9\" (UID: \"de22d979-ac94-4839-b266-2dfa8ae79de2\") " pod="metallb-system/controller-6c7b4b5f48-xvbr9" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.239207 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29vrj\" (UniqueName: \"kubernetes.io/projected/a29c3193-350c-4e8d-9c6f-bfb1e96a4527-kube-api-access-29vrj\") pod \"speaker-dglng\" (UID: \"a29c3193-350c-4e8d-9c6f-bfb1e96a4527\") " pod="metallb-system/speaker-dglng" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.239241 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/a29c3193-350c-4e8d-9c6f-bfb1e96a4527-memberlist\") pod \"speaker-dglng\" (UID: \"a29c3193-350c-4e8d-9c6f-bfb1e96a4527\") " pod="metallb-system/speaker-dglng" Nov 23 14:55:08 crc kubenswrapper[5050]: E1123 14:55:08.239392 5050 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 23 14:55:08 crc kubenswrapper[5050]: E1123 14:55:08.239446 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a29c3193-350c-4e8d-9c6f-bfb1e96a4527-memberlist podName:a29c3193-350c-4e8d-9c6f-bfb1e96a4527 nodeName:}" failed. No retries permitted until 2025-11-23 14:55:08.739428813 +0000 UTC m=+803.906425298 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/a29c3193-350c-4e8d-9c6f-bfb1e96a4527-memberlist") pod "speaker-dglng" (UID: "a29c3193-350c-4e8d-9c6f-bfb1e96a4527") : secret "metallb-memberlist" not found Nov 23 14:55:08 crc kubenswrapper[5050]: E1123 14:55:08.240693 5050 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Nov 23 14:55:08 crc kubenswrapper[5050]: E1123 14:55:08.240903 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/de22d979-ac94-4839-b266-2dfa8ae79de2-metrics-certs podName:de22d979-ac94-4839-b266-2dfa8ae79de2 nodeName:}" failed. No retries permitted until 2025-11-23 14:55:08.740871945 +0000 UTC m=+803.907868630 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/de22d979-ac94-4839-b266-2dfa8ae79de2-metrics-certs") pod "controller-6c7b4b5f48-xvbr9" (UID: "de22d979-ac94-4839-b266-2dfa8ae79de2") : secret "controller-certs-secret" not found Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.242117 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/a29c3193-350c-4e8d-9c6f-bfb1e96a4527-metallb-excludel2\") pod \"speaker-dglng\" (UID: \"a29c3193-350c-4e8d-9c6f-bfb1e96a4527\") " pod="metallb-system/speaker-dglng" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.246032 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a29c3193-350c-4e8d-9c6f-bfb1e96a4527-metrics-certs\") pod \"speaker-dglng\" (UID: \"a29c3193-350c-4e8d-9c6f-bfb1e96a4527\") " pod="metallb-system/speaker-dglng" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.247181 5050 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.254308 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/de22d979-ac94-4839-b266-2dfa8ae79de2-cert\") pod \"controller-6c7b4b5f48-xvbr9\" (UID: \"de22d979-ac94-4839-b266-2dfa8ae79de2\") " pod="metallb-system/controller-6c7b4b5f48-xvbr9" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.260222 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vdp9\" (UniqueName: \"kubernetes.io/projected/de22d979-ac94-4839-b266-2dfa8ae79de2-kube-api-access-6vdp9\") pod \"controller-6c7b4b5f48-xvbr9\" (UID: \"de22d979-ac94-4839-b266-2dfa8ae79de2\") " pod="metallb-system/controller-6c7b4b5f48-xvbr9" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.288141 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29vrj\" (UniqueName: \"kubernetes.io/projected/a29c3193-350c-4e8d-9c6f-bfb1e96a4527-kube-api-access-29vrj\") pod \"speaker-dglng\" (UID: \"a29c3193-350c-4e8d-9c6f-bfb1e96a4527\") " pod="metallb-system/speaker-dglng" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.306879 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-c2rcn" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.516942 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-c2rcn"] Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.636492 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-c2rcn" event={"ID":"4c8fffca-2f1a-4b47-beb9-66bf0c19f8e0","Type":"ContainerStarted","Data":"c4a3c3e2da9231812d5e9644f17f1965bef1942545f60cb17cc89695884d93bc"} Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.649204 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4e8f95ac-d360-4b81-9bce-28577dce8e4c-metrics-certs\") pod \"frr-k8s-sn9hl\" (UID: \"4e8f95ac-d360-4b81-9bce-28577dce8e4c\") " pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.656354 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4e8f95ac-d360-4b81-9bce-28577dce8e4c-metrics-certs\") pod \"frr-k8s-sn9hl\" (UID: \"4e8f95ac-d360-4b81-9bce-28577dce8e4c\") " pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.751350 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/de22d979-ac94-4839-b266-2dfa8ae79de2-metrics-certs\") pod \"controller-6c7b4b5f48-xvbr9\" (UID: \"de22d979-ac94-4839-b266-2dfa8ae79de2\") " pod="metallb-system/controller-6c7b4b5f48-xvbr9" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.751550 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/a29c3193-350c-4e8d-9c6f-bfb1e96a4527-memberlist\") pod \"speaker-dglng\" (UID: \"a29c3193-350c-4e8d-9c6f-bfb1e96a4527\") " pod="metallb-system/speaker-dglng" Nov 23 14:55:08 crc kubenswrapper[5050]: E1123 14:55:08.751803 5050 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 23 14:55:08 crc kubenswrapper[5050]: E1123 14:55:08.751952 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a29c3193-350c-4e8d-9c6f-bfb1e96a4527-memberlist podName:a29c3193-350c-4e8d-9c6f-bfb1e96a4527 nodeName:}" failed. No retries permitted until 2025-11-23 14:55:09.751919795 +0000 UTC m=+804.918916320 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/a29c3193-350c-4e8d-9c6f-bfb1e96a4527-memberlist") pod "speaker-dglng" (UID: "a29c3193-350c-4e8d-9c6f-bfb1e96a4527") : secret "metallb-memberlist" not found Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.759831 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/de22d979-ac94-4839-b266-2dfa8ae79de2-metrics-certs\") pod \"controller-6c7b4b5f48-xvbr9\" (UID: \"de22d979-ac94-4839-b266-2dfa8ae79de2\") " pod="metallb-system/controller-6c7b4b5f48-xvbr9" Nov 23 14:55:08 crc kubenswrapper[5050]: I1123 14:55:08.895881 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:09 crc kubenswrapper[5050]: I1123 14:55:09.018063 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-xvbr9" Nov 23 14:55:09 crc kubenswrapper[5050]: I1123 14:55:09.564131 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-xvbr9"] Nov 23 14:55:09 crc kubenswrapper[5050]: I1123 14:55:09.652626 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-xvbr9" event={"ID":"de22d979-ac94-4839-b266-2dfa8ae79de2","Type":"ContainerStarted","Data":"d2ef181ed4cb2c46ed537b3863130f60103913e8e4428cc37ad59113642cd7b8"} Nov 23 14:55:09 crc kubenswrapper[5050]: I1123 14:55:09.655736 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-sn9hl" event={"ID":"4e8f95ac-d360-4b81-9bce-28577dce8e4c","Type":"ContainerStarted","Data":"afceb4170141009db712a73528c0c430534d5b338676030d276b69208bdbba5b"} Nov 23 14:55:09 crc kubenswrapper[5050]: I1123 14:55:09.778697 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/a29c3193-350c-4e8d-9c6f-bfb1e96a4527-memberlist\") pod \"speaker-dglng\" (UID: \"a29c3193-350c-4e8d-9c6f-bfb1e96a4527\") " pod="metallb-system/speaker-dglng" Nov 23 14:55:09 crc kubenswrapper[5050]: E1123 14:55:09.778978 5050 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 23 14:55:09 crc kubenswrapper[5050]: E1123 14:55:09.779107 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a29c3193-350c-4e8d-9c6f-bfb1e96a4527-memberlist podName:a29c3193-350c-4e8d-9c6f-bfb1e96a4527 nodeName:}" failed. No retries permitted until 2025-11-23 14:55:11.77907523 +0000 UTC m=+806.946071715 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/a29c3193-350c-4e8d-9c6f-bfb1e96a4527-memberlist") pod "speaker-dglng" (UID: "a29c3193-350c-4e8d-9c6f-bfb1e96a4527") : secret "metallb-memberlist" not found Nov 23 14:55:10 crc kubenswrapper[5050]: I1123 14:55:10.672593 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-xvbr9" event={"ID":"de22d979-ac94-4839-b266-2dfa8ae79de2","Type":"ContainerStarted","Data":"ac7e8f44ef933dc106c2fea000adb91eacb2c26ef3aec22585a7129a9d89ff62"} Nov 23 14:55:10 crc kubenswrapper[5050]: I1123 14:55:10.672647 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-xvbr9" event={"ID":"de22d979-ac94-4839-b266-2dfa8ae79de2","Type":"ContainerStarted","Data":"1036a94c1f0132517eb1e8c5931d2b2cf4bd2128a5ec687d40f424d141017b57"} Nov 23 14:55:10 crc kubenswrapper[5050]: I1123 14:55:10.672749 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-xvbr9" Nov 23 14:55:10 crc kubenswrapper[5050]: I1123 14:55:10.704550 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-xvbr9" podStartSLOduration=2.704529306 podStartE2EDuration="2.704529306s" podCreationTimestamp="2025-11-23 14:55:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:55:10.703577628 +0000 UTC m=+805.870574123" watchObservedRunningTime="2025-11-23 14:55:10.704529306 +0000 UTC m=+805.871525791" Nov 23 14:55:11 crc kubenswrapper[5050]: I1123 14:55:11.821580 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/a29c3193-350c-4e8d-9c6f-bfb1e96a4527-memberlist\") pod \"speaker-dglng\" (UID: \"a29c3193-350c-4e8d-9c6f-bfb1e96a4527\") " pod="metallb-system/speaker-dglng" Nov 23 14:55:11 crc kubenswrapper[5050]: I1123 14:55:11.834146 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/a29c3193-350c-4e8d-9c6f-bfb1e96a4527-memberlist\") pod \"speaker-dglng\" (UID: \"a29c3193-350c-4e8d-9c6f-bfb1e96a4527\") " pod="metallb-system/speaker-dglng" Nov 23 14:55:11 crc kubenswrapper[5050]: I1123 14:55:11.999680 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-dglng" Nov 23 14:55:12 crc kubenswrapper[5050]: I1123 14:55:12.699814 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-dglng" event={"ID":"a29c3193-350c-4e8d-9c6f-bfb1e96a4527","Type":"ContainerStarted","Data":"6dcdd3c5c02c894457b9f4aa685f01a6411d922f8cf7751310bbb9c2fa14fc78"} Nov 23 14:55:12 crc kubenswrapper[5050]: I1123 14:55:12.700394 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-dglng" event={"ID":"a29c3193-350c-4e8d-9c6f-bfb1e96a4527","Type":"ContainerStarted","Data":"9aa31b7397a1084b49b32ae2c97cd24620f0accc8d14436bb93113eb3d5b810f"} Nov 23 14:55:13 crc kubenswrapper[5050]: I1123 14:55:13.709660 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-dglng" event={"ID":"a29c3193-350c-4e8d-9c6f-bfb1e96a4527","Type":"ContainerStarted","Data":"0ddd41dfe9a55989666656b8ae81d7d2ad80ab98662f79d9ad727546fa374d99"} Nov 23 14:55:13 crc kubenswrapper[5050]: I1123 14:55:13.711792 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-dglng" Nov 23 14:55:13 crc kubenswrapper[5050]: I1123 14:55:13.731764 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-dglng" podStartSLOduration=5.731741724 podStartE2EDuration="5.731741724s" podCreationTimestamp="2025-11-23 14:55:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:55:13.725602646 +0000 UTC m=+808.892599131" watchObservedRunningTime="2025-11-23 14:55:13.731741724 +0000 UTC m=+808.898738209" Nov 23 14:55:17 crc kubenswrapper[5050]: I1123 14:55:17.743820 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-c2rcn" event={"ID":"4c8fffca-2f1a-4b47-beb9-66bf0c19f8e0","Type":"ContainerStarted","Data":"9b3afd752a02700510bd529e95a3f69dbdb7ec36853e541be2839622d9801c25"} Nov 23 14:55:17 crc kubenswrapper[5050]: I1123 14:55:17.744849 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-c2rcn" Nov 23 14:55:17 crc kubenswrapper[5050]: I1123 14:55:17.745787 5050 generic.go:334] "Generic (PLEG): container finished" podID="4e8f95ac-d360-4b81-9bce-28577dce8e4c" containerID="d1684cfa3aaed34a2e953903e0c498aa01cc26e4caf70305323beebbe5961937" exitCode=0 Nov 23 14:55:17 crc kubenswrapper[5050]: I1123 14:55:17.745829 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-sn9hl" event={"ID":"4e8f95ac-d360-4b81-9bce-28577dce8e4c","Type":"ContainerDied","Data":"d1684cfa3aaed34a2e953903e0c498aa01cc26e4caf70305323beebbe5961937"} Nov 23 14:55:17 crc kubenswrapper[5050]: I1123 14:55:17.770219 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-c2rcn" podStartSLOduration=2.724565365 podStartE2EDuration="10.770197737s" podCreationTimestamp="2025-11-23 14:55:07 +0000 UTC" firstStartedPulling="2025-11-23 14:55:08.527980655 +0000 UTC m=+803.694977140" lastFinishedPulling="2025-11-23 14:55:16.573613027 +0000 UTC m=+811.740609512" observedRunningTime="2025-11-23 14:55:17.768114416 +0000 UTC m=+812.935110891" watchObservedRunningTime="2025-11-23 14:55:17.770197737 +0000 UTC m=+812.937194222" Nov 23 14:55:18 crc kubenswrapper[5050]: I1123 14:55:18.756562 5050 generic.go:334] "Generic (PLEG): container finished" podID="4e8f95ac-d360-4b81-9bce-28577dce8e4c" containerID="18d53316656b8f4b0cdb6a2b4b6cc79314327d6219acf86c2bd863a9825b33f3" exitCode=0 Nov 23 14:55:18 crc kubenswrapper[5050]: I1123 14:55:18.756666 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-sn9hl" event={"ID":"4e8f95ac-d360-4b81-9bce-28577dce8e4c","Type":"ContainerDied","Data":"18d53316656b8f4b0cdb6a2b4b6cc79314327d6219acf86c2bd863a9825b33f3"} Nov 23 14:55:19 crc kubenswrapper[5050]: I1123 14:55:19.027142 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-xvbr9" Nov 23 14:55:19 crc kubenswrapper[5050]: I1123 14:55:19.733614 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-wcqwc"] Nov 23 14:55:19 crc kubenswrapper[5050]: I1123 14:55:19.734807 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wcqwc" Nov 23 14:55:19 crc kubenswrapper[5050]: I1123 14:55:19.762102 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wcqwc"] Nov 23 14:55:19 crc kubenswrapper[5050]: I1123 14:55:19.770717 5050 generic.go:334] "Generic (PLEG): container finished" podID="4e8f95ac-d360-4b81-9bce-28577dce8e4c" containerID="55d40674bc7c92e7ababf4a3a5b9c727883befccf51c1f09fc08982635d73520" exitCode=0 Nov 23 14:55:19 crc kubenswrapper[5050]: I1123 14:55:19.770778 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-sn9hl" event={"ID":"4e8f95ac-d360-4b81-9bce-28577dce8e4c","Type":"ContainerDied","Data":"55d40674bc7c92e7ababf4a3a5b9c727883befccf51c1f09fc08982635d73520"} Nov 23 14:55:19 crc kubenswrapper[5050]: I1123 14:55:19.857606 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrrtp\" (UniqueName: \"kubernetes.io/projected/d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0-kube-api-access-vrrtp\") pod \"certified-operators-wcqwc\" (UID: \"d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0\") " pod="openshift-marketplace/certified-operators-wcqwc" Nov 23 14:55:19 crc kubenswrapper[5050]: I1123 14:55:19.858226 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0-catalog-content\") pod \"certified-operators-wcqwc\" (UID: \"d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0\") " pod="openshift-marketplace/certified-operators-wcqwc" Nov 23 14:55:19 crc kubenswrapper[5050]: I1123 14:55:19.858276 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0-utilities\") pod \"certified-operators-wcqwc\" (UID: \"d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0\") " pod="openshift-marketplace/certified-operators-wcqwc" Nov 23 14:55:19 crc kubenswrapper[5050]: I1123 14:55:19.959474 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0-catalog-content\") pod \"certified-operators-wcqwc\" (UID: \"d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0\") " pod="openshift-marketplace/certified-operators-wcqwc" Nov 23 14:55:19 crc kubenswrapper[5050]: I1123 14:55:19.959553 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0-utilities\") pod \"certified-operators-wcqwc\" (UID: \"d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0\") " pod="openshift-marketplace/certified-operators-wcqwc" Nov 23 14:55:19 crc kubenswrapper[5050]: I1123 14:55:19.959613 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrrtp\" (UniqueName: \"kubernetes.io/projected/d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0-kube-api-access-vrrtp\") pod \"certified-operators-wcqwc\" (UID: \"d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0\") " pod="openshift-marketplace/certified-operators-wcqwc" Nov 23 14:55:19 crc kubenswrapper[5050]: I1123 14:55:19.960074 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0-catalog-content\") pod \"certified-operators-wcqwc\" (UID: \"d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0\") " pod="openshift-marketplace/certified-operators-wcqwc" Nov 23 14:55:19 crc kubenswrapper[5050]: I1123 14:55:19.961281 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0-utilities\") pod \"certified-operators-wcqwc\" (UID: \"d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0\") " pod="openshift-marketplace/certified-operators-wcqwc" Nov 23 14:55:19 crc kubenswrapper[5050]: I1123 14:55:19.988381 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrrtp\" (UniqueName: \"kubernetes.io/projected/d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0-kube-api-access-vrrtp\") pod \"certified-operators-wcqwc\" (UID: \"d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0\") " pod="openshift-marketplace/certified-operators-wcqwc" Nov 23 14:55:20 crc kubenswrapper[5050]: I1123 14:55:20.067456 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wcqwc" Nov 23 14:55:20 crc kubenswrapper[5050]: I1123 14:55:20.335628 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wcqwc"] Nov 23 14:55:20 crc kubenswrapper[5050]: I1123 14:55:20.799771 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-sn9hl" event={"ID":"4e8f95ac-d360-4b81-9bce-28577dce8e4c","Type":"ContainerStarted","Data":"aad5a1d7680076f5baa3a9220a46107b021a3dff9607c09bdc991272dc8a0228"} Nov 23 14:55:20 crc kubenswrapper[5050]: I1123 14:55:20.799891 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-sn9hl" event={"ID":"4e8f95ac-d360-4b81-9bce-28577dce8e4c","Type":"ContainerStarted","Data":"1d1218f4100c74c95d6d6e95f23098285bfd111563f382e06350f7d02d63eb83"} Nov 23 14:55:20 crc kubenswrapper[5050]: I1123 14:55:20.799907 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-sn9hl" event={"ID":"4e8f95ac-d360-4b81-9bce-28577dce8e4c","Type":"ContainerStarted","Data":"2c7269a90bfd8502d5bfec378ac7f8b756c621c5587eae5bc278935b7adf4df2"} Nov 23 14:55:20 crc kubenswrapper[5050]: I1123 14:55:20.802236 5050 generic.go:334] "Generic (PLEG): container finished" podID="d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0" containerID="6c14b4182913783aee491e703fe8a7642781bf7efcfb7fe7fca76988704c5dd7" exitCode=0 Nov 23 14:55:20 crc kubenswrapper[5050]: I1123 14:55:20.802387 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wcqwc" event={"ID":"d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0","Type":"ContainerDied","Data":"6c14b4182913783aee491e703fe8a7642781bf7efcfb7fe7fca76988704c5dd7"} Nov 23 14:55:20 crc kubenswrapper[5050]: I1123 14:55:20.802495 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wcqwc" event={"ID":"d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0","Type":"ContainerStarted","Data":"b9d4870857b459dd1711bccdf4e58df8eed96fdfed002df7d07a426f96deb661"} Nov 23 14:55:21 crc kubenswrapper[5050]: I1123 14:55:21.727893 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-88sjx"] Nov 23 14:55:21 crc kubenswrapper[5050]: I1123 14:55:21.731297 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-88sjx" Nov 23 14:55:21 crc kubenswrapper[5050]: I1123 14:55:21.745149 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-88sjx"] Nov 23 14:55:21 crc kubenswrapper[5050]: I1123 14:55:21.788414 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ecf8d41-cf6a-406d-b0d4-f7abe889bf18-catalog-content\") pod \"redhat-marketplace-88sjx\" (UID: \"3ecf8d41-cf6a-406d-b0d4-f7abe889bf18\") " pod="openshift-marketplace/redhat-marketplace-88sjx" Nov 23 14:55:21 crc kubenswrapper[5050]: I1123 14:55:21.788794 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kq4rl\" (UniqueName: \"kubernetes.io/projected/3ecf8d41-cf6a-406d-b0d4-f7abe889bf18-kube-api-access-kq4rl\") pod \"redhat-marketplace-88sjx\" (UID: \"3ecf8d41-cf6a-406d-b0d4-f7abe889bf18\") " pod="openshift-marketplace/redhat-marketplace-88sjx" Nov 23 14:55:21 crc kubenswrapper[5050]: I1123 14:55:21.788887 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ecf8d41-cf6a-406d-b0d4-f7abe889bf18-utilities\") pod \"redhat-marketplace-88sjx\" (UID: \"3ecf8d41-cf6a-406d-b0d4-f7abe889bf18\") " pod="openshift-marketplace/redhat-marketplace-88sjx" Nov 23 14:55:21 crc kubenswrapper[5050]: I1123 14:55:21.835340 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-sn9hl" event={"ID":"4e8f95ac-d360-4b81-9bce-28577dce8e4c","Type":"ContainerStarted","Data":"56513410153b8178a09ee769b0794ed7f07ed68368e04f999bdc4cf0b674a295"} Nov 23 14:55:21 crc kubenswrapper[5050]: I1123 14:55:21.835398 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-sn9hl" event={"ID":"4e8f95ac-d360-4b81-9bce-28577dce8e4c","Type":"ContainerStarted","Data":"0f0d063b8ac1570cb9e5a2739080ba66c5c49e380927be700f8f3fc75b9960b1"} Nov 23 14:55:21 crc kubenswrapper[5050]: I1123 14:55:21.835412 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-sn9hl" event={"ID":"4e8f95ac-d360-4b81-9bce-28577dce8e4c","Type":"ContainerStarted","Data":"86e1d4308a8ee5f8ad6c5fe392e561e6c8efc0ffd1828754ba5887276c1fcdf4"} Nov 23 14:55:21 crc kubenswrapper[5050]: I1123 14:55:21.835625 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:21 crc kubenswrapper[5050]: I1123 14:55:21.868298 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-sn9hl" podStartSLOduration=7.322633981 podStartE2EDuration="14.868271565s" podCreationTimestamp="2025-11-23 14:55:07 +0000 UTC" firstStartedPulling="2025-11-23 14:55:09.070328296 +0000 UTC m=+804.237324791" lastFinishedPulling="2025-11-23 14:55:16.61596588 +0000 UTC m=+811.782962375" observedRunningTime="2025-11-23 14:55:21.867384819 +0000 UTC m=+817.034381314" watchObservedRunningTime="2025-11-23 14:55:21.868271565 +0000 UTC m=+817.035268060" Nov 23 14:55:21 crc kubenswrapper[5050]: I1123 14:55:21.891675 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kq4rl\" (UniqueName: \"kubernetes.io/projected/3ecf8d41-cf6a-406d-b0d4-f7abe889bf18-kube-api-access-kq4rl\") pod \"redhat-marketplace-88sjx\" (UID: \"3ecf8d41-cf6a-406d-b0d4-f7abe889bf18\") " pod="openshift-marketplace/redhat-marketplace-88sjx" Nov 23 14:55:21 crc kubenswrapper[5050]: I1123 14:55:21.891891 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ecf8d41-cf6a-406d-b0d4-f7abe889bf18-utilities\") pod \"redhat-marketplace-88sjx\" (UID: \"3ecf8d41-cf6a-406d-b0d4-f7abe889bf18\") " pod="openshift-marketplace/redhat-marketplace-88sjx" Nov 23 14:55:21 crc kubenswrapper[5050]: I1123 14:55:21.892060 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ecf8d41-cf6a-406d-b0d4-f7abe889bf18-catalog-content\") pod \"redhat-marketplace-88sjx\" (UID: \"3ecf8d41-cf6a-406d-b0d4-f7abe889bf18\") " pod="openshift-marketplace/redhat-marketplace-88sjx" Nov 23 14:55:21 crc kubenswrapper[5050]: I1123 14:55:21.893273 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ecf8d41-cf6a-406d-b0d4-f7abe889bf18-utilities\") pod \"redhat-marketplace-88sjx\" (UID: \"3ecf8d41-cf6a-406d-b0d4-f7abe889bf18\") " pod="openshift-marketplace/redhat-marketplace-88sjx" Nov 23 14:55:21 crc kubenswrapper[5050]: I1123 14:55:21.894214 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ecf8d41-cf6a-406d-b0d4-f7abe889bf18-catalog-content\") pod \"redhat-marketplace-88sjx\" (UID: \"3ecf8d41-cf6a-406d-b0d4-f7abe889bf18\") " pod="openshift-marketplace/redhat-marketplace-88sjx" Nov 23 14:55:21 crc kubenswrapper[5050]: I1123 14:55:21.922896 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kq4rl\" (UniqueName: \"kubernetes.io/projected/3ecf8d41-cf6a-406d-b0d4-f7abe889bf18-kube-api-access-kq4rl\") pod \"redhat-marketplace-88sjx\" (UID: \"3ecf8d41-cf6a-406d-b0d4-f7abe889bf18\") " pod="openshift-marketplace/redhat-marketplace-88sjx" Nov 23 14:55:22 crc kubenswrapper[5050]: I1123 14:55:22.005473 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-dglng" Nov 23 14:55:22 crc kubenswrapper[5050]: I1123 14:55:22.063657 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-88sjx" Nov 23 14:55:22 crc kubenswrapper[5050]: I1123 14:55:22.338069 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-88sjx"] Nov 23 14:55:22 crc kubenswrapper[5050]: W1123 14:55:22.346605 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3ecf8d41_cf6a_406d_b0d4_f7abe889bf18.slice/crio-d4967ac50b770706958983d9624c3a8e5ccf6be27bd7d050df2819f640ec17ac WatchSource:0}: Error finding container d4967ac50b770706958983d9624c3a8e5ccf6be27bd7d050df2819f640ec17ac: Status 404 returned error can't find the container with id d4967ac50b770706958983d9624c3a8e5ccf6be27bd7d050df2819f640ec17ac Nov 23 14:55:22 crc kubenswrapper[5050]: I1123 14:55:22.845635 5050 generic.go:334] "Generic (PLEG): container finished" podID="d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0" containerID="6d4043d1490882052d2b86d14ff2c9f4ab73a8f82038c1b5427fb30b5e15901e" exitCode=0 Nov 23 14:55:22 crc kubenswrapper[5050]: I1123 14:55:22.845734 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wcqwc" event={"ID":"d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0","Type":"ContainerDied","Data":"6d4043d1490882052d2b86d14ff2c9f4ab73a8f82038c1b5427fb30b5e15901e"} Nov 23 14:55:22 crc kubenswrapper[5050]: I1123 14:55:22.848229 5050 generic.go:334] "Generic (PLEG): container finished" podID="3ecf8d41-cf6a-406d-b0d4-f7abe889bf18" containerID="c9de6e9575016e533b084a8c60da95881cfbe691a31c648a2025a2ad043a2563" exitCode=0 Nov 23 14:55:22 crc kubenswrapper[5050]: I1123 14:55:22.848259 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-88sjx" event={"ID":"3ecf8d41-cf6a-406d-b0d4-f7abe889bf18","Type":"ContainerDied","Data":"c9de6e9575016e533b084a8c60da95881cfbe691a31c648a2025a2ad043a2563"} Nov 23 14:55:22 crc kubenswrapper[5050]: I1123 14:55:22.848287 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-88sjx" event={"ID":"3ecf8d41-cf6a-406d-b0d4-f7abe889bf18","Type":"ContainerStarted","Data":"d4967ac50b770706958983d9624c3a8e5ccf6be27bd7d050df2819f640ec17ac"} Nov 23 14:55:23 crc kubenswrapper[5050]: I1123 14:55:23.856343 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wcqwc" event={"ID":"d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0","Type":"ContainerStarted","Data":"956a814f27bc8eec0c7cfe288fd2dbf53eb2bec10a73030b55dda207b78927b4"} Nov 23 14:55:23 crc kubenswrapper[5050]: I1123 14:55:23.882476 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-wcqwc" podStartSLOduration=2.424289227 podStartE2EDuration="4.882434748s" podCreationTimestamp="2025-11-23 14:55:19 +0000 UTC" firstStartedPulling="2025-11-23 14:55:20.806076998 +0000 UTC m=+815.973073493" lastFinishedPulling="2025-11-23 14:55:23.264222519 +0000 UTC m=+818.431219014" observedRunningTime="2025-11-23 14:55:23.875759484 +0000 UTC m=+819.042755969" watchObservedRunningTime="2025-11-23 14:55:23.882434748 +0000 UTC m=+819.049431233" Nov 23 14:55:23 crc kubenswrapper[5050]: I1123 14:55:23.896558 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:23 crc kubenswrapper[5050]: I1123 14:55:23.938864 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:23 crc kubenswrapper[5050]: I1123 14:55:23.979692 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx"] Nov 23 14:55:23 crc kubenswrapper[5050]: I1123 14:55:23.981122 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx" Nov 23 14:55:24 crc kubenswrapper[5050]: I1123 14:55:24.005360 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 23 14:55:24 crc kubenswrapper[5050]: I1123 14:55:24.040734 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx"] Nov 23 14:55:24 crc kubenswrapper[5050]: I1123 14:55:24.041607 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/15d146e6-8be3-42c8-a6b4-45dc085b53a4-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx\" (UID: \"15d146e6-8be3-42c8-a6b4-45dc085b53a4\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx" Nov 23 14:55:24 crc kubenswrapper[5050]: I1123 14:55:24.041645 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jplc\" (UniqueName: \"kubernetes.io/projected/15d146e6-8be3-42c8-a6b4-45dc085b53a4-kube-api-access-8jplc\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx\" (UID: \"15d146e6-8be3-42c8-a6b4-45dc085b53a4\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx" Nov 23 14:55:24 crc kubenswrapper[5050]: I1123 14:55:24.041682 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/15d146e6-8be3-42c8-a6b4-45dc085b53a4-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx\" (UID: \"15d146e6-8be3-42c8-a6b4-45dc085b53a4\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx" Nov 23 14:55:24 crc kubenswrapper[5050]: I1123 14:55:24.143466 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/15d146e6-8be3-42c8-a6b4-45dc085b53a4-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx\" (UID: \"15d146e6-8be3-42c8-a6b4-45dc085b53a4\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx" Nov 23 14:55:24 crc kubenswrapper[5050]: I1123 14:55:24.143543 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jplc\" (UniqueName: \"kubernetes.io/projected/15d146e6-8be3-42c8-a6b4-45dc085b53a4-kube-api-access-8jplc\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx\" (UID: \"15d146e6-8be3-42c8-a6b4-45dc085b53a4\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx" Nov 23 14:55:24 crc kubenswrapper[5050]: I1123 14:55:24.143580 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/15d146e6-8be3-42c8-a6b4-45dc085b53a4-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx\" (UID: \"15d146e6-8be3-42c8-a6b4-45dc085b53a4\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx" Nov 23 14:55:24 crc kubenswrapper[5050]: I1123 14:55:24.144294 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/15d146e6-8be3-42c8-a6b4-45dc085b53a4-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx\" (UID: \"15d146e6-8be3-42c8-a6b4-45dc085b53a4\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx" Nov 23 14:55:24 crc kubenswrapper[5050]: I1123 14:55:24.144439 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/15d146e6-8be3-42c8-a6b4-45dc085b53a4-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx\" (UID: \"15d146e6-8be3-42c8-a6b4-45dc085b53a4\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx" Nov 23 14:55:24 crc kubenswrapper[5050]: I1123 14:55:24.173187 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jplc\" (UniqueName: \"kubernetes.io/projected/15d146e6-8be3-42c8-a6b4-45dc085b53a4-kube-api-access-8jplc\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx\" (UID: \"15d146e6-8be3-42c8-a6b4-45dc085b53a4\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx" Nov 23 14:55:24 crc kubenswrapper[5050]: I1123 14:55:24.321209 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx" Nov 23 14:55:24 crc kubenswrapper[5050]: I1123 14:55:24.779661 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx"] Nov 23 14:55:24 crc kubenswrapper[5050]: W1123 14:55:24.795765 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod15d146e6_8be3_42c8_a6b4_45dc085b53a4.slice/crio-3a67d0267887c54b42bacfc5452d1a6cfe5aad587c32beb8fa96f0414d0e85b2 WatchSource:0}: Error finding container 3a67d0267887c54b42bacfc5452d1a6cfe5aad587c32beb8fa96f0414d0e85b2: Status 404 returned error can't find the container with id 3a67d0267887c54b42bacfc5452d1a6cfe5aad587c32beb8fa96f0414d0e85b2 Nov 23 14:55:24 crc kubenswrapper[5050]: I1123 14:55:24.868066 5050 generic.go:334] "Generic (PLEG): container finished" podID="3ecf8d41-cf6a-406d-b0d4-f7abe889bf18" containerID="36589b51034d2d4af05e4fa6b3ff486ea33966d55f69a1f698b85f102452466b" exitCode=0 Nov 23 14:55:24 crc kubenswrapper[5050]: I1123 14:55:24.868169 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-88sjx" event={"ID":"3ecf8d41-cf6a-406d-b0d4-f7abe889bf18","Type":"ContainerDied","Data":"36589b51034d2d4af05e4fa6b3ff486ea33966d55f69a1f698b85f102452466b"} Nov 23 14:55:24 crc kubenswrapper[5050]: I1123 14:55:24.870741 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx" event={"ID":"15d146e6-8be3-42c8-a6b4-45dc085b53a4","Type":"ContainerStarted","Data":"3a67d0267887c54b42bacfc5452d1a6cfe5aad587c32beb8fa96f0414d0e85b2"} Nov 23 14:55:25 crc kubenswrapper[5050]: I1123 14:55:25.881178 5050 generic.go:334] "Generic (PLEG): container finished" podID="15d146e6-8be3-42c8-a6b4-45dc085b53a4" containerID="2867d726f7f304e8ba7d78a843c6739ee8845844ab329017c7920fe769415359" exitCode=0 Nov 23 14:55:25 crc kubenswrapper[5050]: I1123 14:55:25.881307 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx" event={"ID":"15d146e6-8be3-42c8-a6b4-45dc085b53a4","Type":"ContainerDied","Data":"2867d726f7f304e8ba7d78a843c6739ee8845844ab329017c7920fe769415359"} Nov 23 14:55:25 crc kubenswrapper[5050]: I1123 14:55:25.884616 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-88sjx" event={"ID":"3ecf8d41-cf6a-406d-b0d4-f7abe889bf18","Type":"ContainerStarted","Data":"a3643e59d6d73f4a98673804b420789444fb90b622631b0624e27577a3e13546"} Nov 23 14:55:25 crc kubenswrapper[5050]: I1123 14:55:25.952362 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-88sjx" podStartSLOduration=2.499091297 podStartE2EDuration="4.952336385s" podCreationTimestamp="2025-11-23 14:55:21 +0000 UTC" firstStartedPulling="2025-11-23 14:55:22.850260796 +0000 UTC m=+818.017257281" lastFinishedPulling="2025-11-23 14:55:25.303505854 +0000 UTC m=+820.470502369" observedRunningTime="2025-11-23 14:55:25.950749599 +0000 UTC m=+821.117746094" watchObservedRunningTime="2025-11-23 14:55:25.952336385 +0000 UTC m=+821.119332870" Nov 23 14:55:28 crc kubenswrapper[5050]: I1123 14:55:28.314685 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-c2rcn" Nov 23 14:55:28 crc kubenswrapper[5050]: I1123 14:55:28.720808 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-qrfx4"] Nov 23 14:55:28 crc kubenswrapper[5050]: I1123 14:55:28.722566 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qrfx4" Nov 23 14:55:28 crc kubenswrapper[5050]: I1123 14:55:28.727778 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qrfx4"] Nov 23 14:55:28 crc kubenswrapper[5050]: I1123 14:55:28.824076 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62fgk\" (UniqueName: \"kubernetes.io/projected/a45cf38d-235b-42ab-be26-ef62365f52da-kube-api-access-62fgk\") pod \"redhat-operators-qrfx4\" (UID: \"a45cf38d-235b-42ab-be26-ef62365f52da\") " pod="openshift-marketplace/redhat-operators-qrfx4" Nov 23 14:55:28 crc kubenswrapper[5050]: I1123 14:55:28.824221 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a45cf38d-235b-42ab-be26-ef62365f52da-catalog-content\") pod \"redhat-operators-qrfx4\" (UID: \"a45cf38d-235b-42ab-be26-ef62365f52da\") " pod="openshift-marketplace/redhat-operators-qrfx4" Nov 23 14:55:28 crc kubenswrapper[5050]: I1123 14:55:28.824374 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a45cf38d-235b-42ab-be26-ef62365f52da-utilities\") pod \"redhat-operators-qrfx4\" (UID: \"a45cf38d-235b-42ab-be26-ef62365f52da\") " pod="openshift-marketplace/redhat-operators-qrfx4" Nov 23 14:55:28 crc kubenswrapper[5050]: I1123 14:55:28.926101 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a45cf38d-235b-42ab-be26-ef62365f52da-utilities\") pod \"redhat-operators-qrfx4\" (UID: \"a45cf38d-235b-42ab-be26-ef62365f52da\") " pod="openshift-marketplace/redhat-operators-qrfx4" Nov 23 14:55:28 crc kubenswrapper[5050]: I1123 14:55:28.926205 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62fgk\" (UniqueName: \"kubernetes.io/projected/a45cf38d-235b-42ab-be26-ef62365f52da-kube-api-access-62fgk\") pod \"redhat-operators-qrfx4\" (UID: \"a45cf38d-235b-42ab-be26-ef62365f52da\") " pod="openshift-marketplace/redhat-operators-qrfx4" Nov 23 14:55:28 crc kubenswrapper[5050]: I1123 14:55:28.926250 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a45cf38d-235b-42ab-be26-ef62365f52da-catalog-content\") pod \"redhat-operators-qrfx4\" (UID: \"a45cf38d-235b-42ab-be26-ef62365f52da\") " pod="openshift-marketplace/redhat-operators-qrfx4" Nov 23 14:55:28 crc kubenswrapper[5050]: I1123 14:55:28.927116 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a45cf38d-235b-42ab-be26-ef62365f52da-catalog-content\") pod \"redhat-operators-qrfx4\" (UID: \"a45cf38d-235b-42ab-be26-ef62365f52da\") " pod="openshift-marketplace/redhat-operators-qrfx4" Nov 23 14:55:28 crc kubenswrapper[5050]: I1123 14:55:28.927397 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a45cf38d-235b-42ab-be26-ef62365f52da-utilities\") pod \"redhat-operators-qrfx4\" (UID: \"a45cf38d-235b-42ab-be26-ef62365f52da\") " pod="openshift-marketplace/redhat-operators-qrfx4" Nov 23 14:55:28 crc kubenswrapper[5050]: I1123 14:55:28.948702 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62fgk\" (UniqueName: \"kubernetes.io/projected/a45cf38d-235b-42ab-be26-ef62365f52da-kube-api-access-62fgk\") pod \"redhat-operators-qrfx4\" (UID: \"a45cf38d-235b-42ab-be26-ef62365f52da\") " pod="openshift-marketplace/redhat-operators-qrfx4" Nov 23 14:55:29 crc kubenswrapper[5050]: I1123 14:55:29.046366 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qrfx4" Nov 23 14:55:29 crc kubenswrapper[5050]: I1123 14:55:29.542995 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qrfx4"] Nov 23 14:55:29 crc kubenswrapper[5050]: W1123 14:55:29.562010 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda45cf38d_235b_42ab_be26_ef62365f52da.slice/crio-73f0bd6aabb57d246ae5749b9b9d2441049bc2d0871d41eb9efee6de30bc476b WatchSource:0}: Error finding container 73f0bd6aabb57d246ae5749b9b9d2441049bc2d0871d41eb9efee6de30bc476b: Status 404 returned error can't find the container with id 73f0bd6aabb57d246ae5749b9b9d2441049bc2d0871d41eb9efee6de30bc476b Nov 23 14:55:29 crc kubenswrapper[5050]: I1123 14:55:29.926533 5050 generic.go:334] "Generic (PLEG): container finished" podID="a45cf38d-235b-42ab-be26-ef62365f52da" containerID="6a757d28b363766563b8ab6e6d9ce9022ce2083b8542942cc64e05e43f305401" exitCode=0 Nov 23 14:55:29 crc kubenswrapper[5050]: I1123 14:55:29.926619 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qrfx4" event={"ID":"a45cf38d-235b-42ab-be26-ef62365f52da","Type":"ContainerDied","Data":"6a757d28b363766563b8ab6e6d9ce9022ce2083b8542942cc64e05e43f305401"} Nov 23 14:55:29 crc kubenswrapper[5050]: I1123 14:55:29.926966 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qrfx4" event={"ID":"a45cf38d-235b-42ab-be26-ef62365f52da","Type":"ContainerStarted","Data":"73f0bd6aabb57d246ae5749b9b9d2441049bc2d0871d41eb9efee6de30bc476b"} Nov 23 14:55:29 crc kubenswrapper[5050]: I1123 14:55:29.930004 5050 generic.go:334] "Generic (PLEG): container finished" podID="15d146e6-8be3-42c8-a6b4-45dc085b53a4" containerID="abc08eaf362b61be088a777f312cda8f8372e28db3d91f8f4672e9f1fa14e31f" exitCode=0 Nov 23 14:55:29 crc kubenswrapper[5050]: I1123 14:55:29.930040 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx" event={"ID":"15d146e6-8be3-42c8-a6b4-45dc085b53a4","Type":"ContainerDied","Data":"abc08eaf362b61be088a777f312cda8f8372e28db3d91f8f4672e9f1fa14e31f"} Nov 23 14:55:30 crc kubenswrapper[5050]: I1123 14:55:30.067966 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-wcqwc" Nov 23 14:55:30 crc kubenswrapper[5050]: I1123 14:55:30.068043 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-wcqwc" Nov 23 14:55:30 crc kubenswrapper[5050]: I1123 14:55:30.128512 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-wcqwc" Nov 23 14:55:30 crc kubenswrapper[5050]: I1123 14:55:30.947694 5050 generic.go:334] "Generic (PLEG): container finished" podID="15d146e6-8be3-42c8-a6b4-45dc085b53a4" containerID="38426d0c7b072064d17468359ba918b14cd20d111a87c1bae2dc044ec256ca28" exitCode=0 Nov 23 14:55:30 crc kubenswrapper[5050]: I1123 14:55:30.947811 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx" event={"ID":"15d146e6-8be3-42c8-a6b4-45dc085b53a4","Type":"ContainerDied","Data":"38426d0c7b072064d17468359ba918b14cd20d111a87c1bae2dc044ec256ca28"} Nov 23 14:55:31 crc kubenswrapper[5050]: I1123 14:55:31.000703 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-wcqwc" Nov 23 14:55:31 crc kubenswrapper[5050]: I1123 14:55:31.959320 5050 generic.go:334] "Generic (PLEG): container finished" podID="a45cf38d-235b-42ab-be26-ef62365f52da" containerID="db596b58f53fe07d9c6d4fde34715757450a8f3607f4800b183132042a7a94ff" exitCode=0 Nov 23 14:55:31 crc kubenswrapper[5050]: I1123 14:55:31.959414 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qrfx4" event={"ID":"a45cf38d-235b-42ab-be26-ef62365f52da","Type":"ContainerDied","Data":"db596b58f53fe07d9c6d4fde34715757450a8f3607f4800b183132042a7a94ff"} Nov 23 14:55:32 crc kubenswrapper[5050]: I1123 14:55:32.066302 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-88sjx" Nov 23 14:55:32 crc kubenswrapper[5050]: I1123 14:55:32.066513 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-88sjx" Nov 23 14:55:32 crc kubenswrapper[5050]: I1123 14:55:32.149287 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-88sjx" Nov 23 14:55:32 crc kubenswrapper[5050]: I1123 14:55:32.306277 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wcqwc"] Nov 23 14:55:32 crc kubenswrapper[5050]: I1123 14:55:32.351951 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx" Nov 23 14:55:32 crc kubenswrapper[5050]: I1123 14:55:32.388077 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8jplc\" (UniqueName: \"kubernetes.io/projected/15d146e6-8be3-42c8-a6b4-45dc085b53a4-kube-api-access-8jplc\") pod \"15d146e6-8be3-42c8-a6b4-45dc085b53a4\" (UID: \"15d146e6-8be3-42c8-a6b4-45dc085b53a4\") " Nov 23 14:55:32 crc kubenswrapper[5050]: I1123 14:55:32.388147 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/15d146e6-8be3-42c8-a6b4-45dc085b53a4-bundle\") pod \"15d146e6-8be3-42c8-a6b4-45dc085b53a4\" (UID: \"15d146e6-8be3-42c8-a6b4-45dc085b53a4\") " Nov 23 14:55:32 crc kubenswrapper[5050]: I1123 14:55:32.388232 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/15d146e6-8be3-42c8-a6b4-45dc085b53a4-util\") pod \"15d146e6-8be3-42c8-a6b4-45dc085b53a4\" (UID: \"15d146e6-8be3-42c8-a6b4-45dc085b53a4\") " Nov 23 14:55:32 crc kubenswrapper[5050]: I1123 14:55:32.389257 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/15d146e6-8be3-42c8-a6b4-45dc085b53a4-bundle" (OuterVolumeSpecName: "bundle") pod "15d146e6-8be3-42c8-a6b4-45dc085b53a4" (UID: "15d146e6-8be3-42c8-a6b4-45dc085b53a4"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:55:32 crc kubenswrapper[5050]: I1123 14:55:32.399565 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15d146e6-8be3-42c8-a6b4-45dc085b53a4-kube-api-access-8jplc" (OuterVolumeSpecName: "kube-api-access-8jplc") pod "15d146e6-8be3-42c8-a6b4-45dc085b53a4" (UID: "15d146e6-8be3-42c8-a6b4-45dc085b53a4"). InnerVolumeSpecName "kube-api-access-8jplc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:55:32 crc kubenswrapper[5050]: I1123 14:55:32.400389 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/15d146e6-8be3-42c8-a6b4-45dc085b53a4-util" (OuterVolumeSpecName: "util") pod "15d146e6-8be3-42c8-a6b4-45dc085b53a4" (UID: "15d146e6-8be3-42c8-a6b4-45dc085b53a4"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:55:32 crc kubenswrapper[5050]: I1123 14:55:32.489567 5050 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/15d146e6-8be3-42c8-a6b4-45dc085b53a4-util\") on node \"crc\" DevicePath \"\"" Nov 23 14:55:32 crc kubenswrapper[5050]: I1123 14:55:32.489602 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8jplc\" (UniqueName: \"kubernetes.io/projected/15d146e6-8be3-42c8-a6b4-45dc085b53a4-kube-api-access-8jplc\") on node \"crc\" DevicePath \"\"" Nov 23 14:55:32 crc kubenswrapper[5050]: I1123 14:55:32.489617 5050 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/15d146e6-8be3-42c8-a6b4-45dc085b53a4-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 14:55:32 crc kubenswrapper[5050]: I1123 14:55:32.969180 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qrfx4" event={"ID":"a45cf38d-235b-42ab-be26-ef62365f52da","Type":"ContainerStarted","Data":"b1295cf1e9e52150d4a577db714d853c3c1493d3f1d525d67e4bf9910071b205"} Nov 23 14:55:32 crc kubenswrapper[5050]: I1123 14:55:32.972882 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx" event={"ID":"15d146e6-8be3-42c8-a6b4-45dc085b53a4","Type":"ContainerDied","Data":"3a67d0267887c54b42bacfc5452d1a6cfe5aad587c32beb8fa96f0414d0e85b2"} Nov 23 14:55:32 crc kubenswrapper[5050]: I1123 14:55:32.973052 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3a67d0267887c54b42bacfc5452d1a6cfe5aad587c32beb8fa96f0414d0e85b2" Nov 23 14:55:32 crc kubenswrapper[5050]: I1123 14:55:32.973002 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx" Nov 23 14:55:32 crc kubenswrapper[5050]: I1123 14:55:32.973228 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-wcqwc" podUID="d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0" containerName="registry-server" containerID="cri-o://956a814f27bc8eec0c7cfe288fd2dbf53eb2bec10a73030b55dda207b78927b4" gracePeriod=2 Nov 23 14:55:32 crc kubenswrapper[5050]: I1123 14:55:32.992056 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-qrfx4" podStartSLOduration=2.498558301 podStartE2EDuration="4.992013679s" podCreationTimestamp="2025-11-23 14:55:28 +0000 UTC" firstStartedPulling="2025-11-23 14:55:29.928156013 +0000 UTC m=+825.095152498" lastFinishedPulling="2025-11-23 14:55:32.421611391 +0000 UTC m=+827.588607876" observedRunningTime="2025-11-23 14:55:32.98861609 +0000 UTC m=+828.155612635" watchObservedRunningTime="2025-11-23 14:55:32.992013679 +0000 UTC m=+828.159010164" Nov 23 14:55:33 crc kubenswrapper[5050]: I1123 14:55:33.028147 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-88sjx" Nov 23 14:55:33 crc kubenswrapper[5050]: I1123 14:55:33.402152 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wcqwc" Nov 23 14:55:33 crc kubenswrapper[5050]: I1123 14:55:33.504045 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vrrtp\" (UniqueName: \"kubernetes.io/projected/d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0-kube-api-access-vrrtp\") pod \"d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0\" (UID: \"d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0\") " Nov 23 14:55:33 crc kubenswrapper[5050]: I1123 14:55:33.504196 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0-catalog-content\") pod \"d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0\" (UID: \"d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0\") " Nov 23 14:55:33 crc kubenswrapper[5050]: I1123 14:55:33.504241 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0-utilities\") pod \"d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0\" (UID: \"d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0\") " Nov 23 14:55:33 crc kubenswrapper[5050]: I1123 14:55:33.505673 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0-utilities" (OuterVolumeSpecName: "utilities") pod "d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0" (UID: "d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:55:33 crc kubenswrapper[5050]: I1123 14:55:33.517774 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0-kube-api-access-vrrtp" (OuterVolumeSpecName: "kube-api-access-vrrtp") pod "d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0" (UID: "d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0"). InnerVolumeSpecName "kube-api-access-vrrtp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:55:33 crc kubenswrapper[5050]: I1123 14:55:33.591216 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0" (UID: "d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:55:33 crc kubenswrapper[5050]: I1123 14:55:33.606303 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vrrtp\" (UniqueName: \"kubernetes.io/projected/d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0-kube-api-access-vrrtp\") on node \"crc\" DevicePath \"\"" Nov 23 14:55:33 crc kubenswrapper[5050]: I1123 14:55:33.606340 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 14:55:33 crc kubenswrapper[5050]: I1123 14:55:33.606359 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 14:55:33 crc kubenswrapper[5050]: I1123 14:55:33.983177 5050 generic.go:334] "Generic (PLEG): container finished" podID="d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0" containerID="956a814f27bc8eec0c7cfe288fd2dbf53eb2bec10a73030b55dda207b78927b4" exitCode=0 Nov 23 14:55:33 crc kubenswrapper[5050]: I1123 14:55:33.983328 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wcqwc" Nov 23 14:55:33 crc kubenswrapper[5050]: I1123 14:55:33.983313 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wcqwc" event={"ID":"d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0","Type":"ContainerDied","Data":"956a814f27bc8eec0c7cfe288fd2dbf53eb2bec10a73030b55dda207b78927b4"} Nov 23 14:55:33 crc kubenswrapper[5050]: I1123 14:55:33.984711 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wcqwc" event={"ID":"d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0","Type":"ContainerDied","Data":"b9d4870857b459dd1711bccdf4e58df8eed96fdfed002df7d07a426f96deb661"} Nov 23 14:55:33 crc kubenswrapper[5050]: I1123 14:55:33.984772 5050 scope.go:117] "RemoveContainer" containerID="956a814f27bc8eec0c7cfe288fd2dbf53eb2bec10a73030b55dda207b78927b4" Nov 23 14:55:34 crc kubenswrapper[5050]: I1123 14:55:34.003972 5050 scope.go:117] "RemoveContainer" containerID="6d4043d1490882052d2b86d14ff2c9f4ab73a8f82038c1b5427fb30b5e15901e" Nov 23 14:55:34 crc kubenswrapper[5050]: I1123 14:55:34.019701 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wcqwc"] Nov 23 14:55:34 crc kubenswrapper[5050]: I1123 14:55:34.023295 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-wcqwc"] Nov 23 14:55:34 crc kubenswrapper[5050]: I1123 14:55:34.042422 5050 scope.go:117] "RemoveContainer" containerID="6c14b4182913783aee491e703fe8a7642781bf7efcfb7fe7fca76988704c5dd7" Nov 23 14:55:34 crc kubenswrapper[5050]: I1123 14:55:34.061684 5050 scope.go:117] "RemoveContainer" containerID="956a814f27bc8eec0c7cfe288fd2dbf53eb2bec10a73030b55dda207b78927b4" Nov 23 14:55:34 crc kubenswrapper[5050]: E1123 14:55:34.062802 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"956a814f27bc8eec0c7cfe288fd2dbf53eb2bec10a73030b55dda207b78927b4\": container with ID starting with 956a814f27bc8eec0c7cfe288fd2dbf53eb2bec10a73030b55dda207b78927b4 not found: ID does not exist" containerID="956a814f27bc8eec0c7cfe288fd2dbf53eb2bec10a73030b55dda207b78927b4" Nov 23 14:55:34 crc kubenswrapper[5050]: I1123 14:55:34.062927 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"956a814f27bc8eec0c7cfe288fd2dbf53eb2bec10a73030b55dda207b78927b4"} err="failed to get container status \"956a814f27bc8eec0c7cfe288fd2dbf53eb2bec10a73030b55dda207b78927b4\": rpc error: code = NotFound desc = could not find container \"956a814f27bc8eec0c7cfe288fd2dbf53eb2bec10a73030b55dda207b78927b4\": container with ID starting with 956a814f27bc8eec0c7cfe288fd2dbf53eb2bec10a73030b55dda207b78927b4 not found: ID does not exist" Nov 23 14:55:34 crc kubenswrapper[5050]: I1123 14:55:34.063076 5050 scope.go:117] "RemoveContainer" containerID="6d4043d1490882052d2b86d14ff2c9f4ab73a8f82038c1b5427fb30b5e15901e" Nov 23 14:55:34 crc kubenswrapper[5050]: E1123 14:55:34.064189 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d4043d1490882052d2b86d14ff2c9f4ab73a8f82038c1b5427fb30b5e15901e\": container with ID starting with 6d4043d1490882052d2b86d14ff2c9f4ab73a8f82038c1b5427fb30b5e15901e not found: ID does not exist" containerID="6d4043d1490882052d2b86d14ff2c9f4ab73a8f82038c1b5427fb30b5e15901e" Nov 23 14:55:34 crc kubenswrapper[5050]: I1123 14:55:34.064299 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d4043d1490882052d2b86d14ff2c9f4ab73a8f82038c1b5427fb30b5e15901e"} err="failed to get container status \"6d4043d1490882052d2b86d14ff2c9f4ab73a8f82038c1b5427fb30b5e15901e\": rpc error: code = NotFound desc = could not find container \"6d4043d1490882052d2b86d14ff2c9f4ab73a8f82038c1b5427fb30b5e15901e\": container with ID starting with 6d4043d1490882052d2b86d14ff2c9f4ab73a8f82038c1b5427fb30b5e15901e not found: ID does not exist" Nov 23 14:55:34 crc kubenswrapper[5050]: I1123 14:55:34.064379 5050 scope.go:117] "RemoveContainer" containerID="6c14b4182913783aee491e703fe8a7642781bf7efcfb7fe7fca76988704c5dd7" Nov 23 14:55:34 crc kubenswrapper[5050]: E1123 14:55:34.065522 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6c14b4182913783aee491e703fe8a7642781bf7efcfb7fe7fca76988704c5dd7\": container with ID starting with 6c14b4182913783aee491e703fe8a7642781bf7efcfb7fe7fca76988704c5dd7 not found: ID does not exist" containerID="6c14b4182913783aee491e703fe8a7642781bf7efcfb7fe7fca76988704c5dd7" Nov 23 14:55:34 crc kubenswrapper[5050]: I1123 14:55:34.065590 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6c14b4182913783aee491e703fe8a7642781bf7efcfb7fe7fca76988704c5dd7"} err="failed to get container status \"6c14b4182913783aee491e703fe8a7642781bf7efcfb7fe7fca76988704c5dd7\": rpc error: code = NotFound desc = could not find container \"6c14b4182913783aee491e703fe8a7642781bf7efcfb7fe7fca76988704c5dd7\": container with ID starting with 6c14b4182913783aee491e703fe8a7642781bf7efcfb7fe7fca76988704c5dd7 not found: ID does not exist" Nov 23 14:55:35 crc kubenswrapper[5050]: I1123 14:55:35.569655 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0" path="/var/lib/kubelet/pods/d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0/volumes" Nov 23 14:55:35 crc kubenswrapper[5050]: I1123 14:55:35.904622 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-88sjx"] Nov 23 14:55:36 crc kubenswrapper[5050]: I1123 14:55:36.030273 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-88sjx" podUID="3ecf8d41-cf6a-406d-b0d4-f7abe889bf18" containerName="registry-server" containerID="cri-o://a3643e59d6d73f4a98673804b420789444fb90b622631b0624e27577a3e13546" gracePeriod=2 Nov 23 14:55:36 crc kubenswrapper[5050]: I1123 14:55:36.939360 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-88sjx" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.038175 5050 generic.go:334] "Generic (PLEG): container finished" podID="3ecf8d41-cf6a-406d-b0d4-f7abe889bf18" containerID="a3643e59d6d73f4a98673804b420789444fb90b622631b0624e27577a3e13546" exitCode=0 Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.038224 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-88sjx" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.038236 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-88sjx" event={"ID":"3ecf8d41-cf6a-406d-b0d4-f7abe889bf18","Type":"ContainerDied","Data":"a3643e59d6d73f4a98673804b420789444fb90b622631b0624e27577a3e13546"} Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.038289 5050 scope.go:117] "RemoveContainer" containerID="a3643e59d6d73f4a98673804b420789444fb90b622631b0624e27577a3e13546" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.038459 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-88sjx" event={"ID":"3ecf8d41-cf6a-406d-b0d4-f7abe889bf18","Type":"ContainerDied","Data":"d4967ac50b770706958983d9624c3a8e5ccf6be27bd7d050df2819f640ec17ac"} Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.054403 5050 scope.go:117] "RemoveContainer" containerID="36589b51034d2d4af05e4fa6b3ff486ea33966d55f69a1f698b85f102452466b" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.064676 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ecf8d41-cf6a-406d-b0d4-f7abe889bf18-catalog-content\") pod \"3ecf8d41-cf6a-406d-b0d4-f7abe889bf18\" (UID: \"3ecf8d41-cf6a-406d-b0d4-f7abe889bf18\") " Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.064793 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ecf8d41-cf6a-406d-b0d4-f7abe889bf18-utilities\") pod \"3ecf8d41-cf6a-406d-b0d4-f7abe889bf18\" (UID: \"3ecf8d41-cf6a-406d-b0d4-f7abe889bf18\") " Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.064823 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kq4rl\" (UniqueName: \"kubernetes.io/projected/3ecf8d41-cf6a-406d-b0d4-f7abe889bf18-kube-api-access-kq4rl\") pod \"3ecf8d41-cf6a-406d-b0d4-f7abe889bf18\" (UID: \"3ecf8d41-cf6a-406d-b0d4-f7abe889bf18\") " Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.066000 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ecf8d41-cf6a-406d-b0d4-f7abe889bf18-utilities" (OuterVolumeSpecName: "utilities") pod "3ecf8d41-cf6a-406d-b0d4-f7abe889bf18" (UID: "3ecf8d41-cf6a-406d-b0d4-f7abe889bf18"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.078838 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ecf8d41-cf6a-406d-b0d4-f7abe889bf18-kube-api-access-kq4rl" (OuterVolumeSpecName: "kube-api-access-kq4rl") pod "3ecf8d41-cf6a-406d-b0d4-f7abe889bf18" (UID: "3ecf8d41-cf6a-406d-b0d4-f7abe889bf18"). InnerVolumeSpecName "kube-api-access-kq4rl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.079808 5050 scope.go:117] "RemoveContainer" containerID="c9de6e9575016e533b084a8c60da95881cfbe691a31c648a2025a2ad043a2563" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.095055 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ecf8d41-cf6a-406d-b0d4-f7abe889bf18-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3ecf8d41-cf6a-406d-b0d4-f7abe889bf18" (UID: "3ecf8d41-cf6a-406d-b0d4-f7abe889bf18"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.113880 5050 scope.go:117] "RemoveContainer" containerID="a3643e59d6d73f4a98673804b420789444fb90b622631b0624e27577a3e13546" Nov 23 14:55:37 crc kubenswrapper[5050]: E1123 14:55:37.114501 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3643e59d6d73f4a98673804b420789444fb90b622631b0624e27577a3e13546\": container with ID starting with a3643e59d6d73f4a98673804b420789444fb90b622631b0624e27577a3e13546 not found: ID does not exist" containerID="a3643e59d6d73f4a98673804b420789444fb90b622631b0624e27577a3e13546" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.114584 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3643e59d6d73f4a98673804b420789444fb90b622631b0624e27577a3e13546"} err="failed to get container status \"a3643e59d6d73f4a98673804b420789444fb90b622631b0624e27577a3e13546\": rpc error: code = NotFound desc = could not find container \"a3643e59d6d73f4a98673804b420789444fb90b622631b0624e27577a3e13546\": container with ID starting with a3643e59d6d73f4a98673804b420789444fb90b622631b0624e27577a3e13546 not found: ID does not exist" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.114627 5050 scope.go:117] "RemoveContainer" containerID="36589b51034d2d4af05e4fa6b3ff486ea33966d55f69a1f698b85f102452466b" Nov 23 14:55:37 crc kubenswrapper[5050]: E1123 14:55:37.115696 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"36589b51034d2d4af05e4fa6b3ff486ea33966d55f69a1f698b85f102452466b\": container with ID starting with 36589b51034d2d4af05e4fa6b3ff486ea33966d55f69a1f698b85f102452466b not found: ID does not exist" containerID="36589b51034d2d4af05e4fa6b3ff486ea33966d55f69a1f698b85f102452466b" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.115763 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36589b51034d2d4af05e4fa6b3ff486ea33966d55f69a1f698b85f102452466b"} err="failed to get container status \"36589b51034d2d4af05e4fa6b3ff486ea33966d55f69a1f698b85f102452466b\": rpc error: code = NotFound desc = could not find container \"36589b51034d2d4af05e4fa6b3ff486ea33966d55f69a1f698b85f102452466b\": container with ID starting with 36589b51034d2d4af05e4fa6b3ff486ea33966d55f69a1f698b85f102452466b not found: ID does not exist" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.115797 5050 scope.go:117] "RemoveContainer" containerID="c9de6e9575016e533b084a8c60da95881cfbe691a31c648a2025a2ad043a2563" Nov 23 14:55:37 crc kubenswrapper[5050]: E1123 14:55:37.116139 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9de6e9575016e533b084a8c60da95881cfbe691a31c648a2025a2ad043a2563\": container with ID starting with c9de6e9575016e533b084a8c60da95881cfbe691a31c648a2025a2ad043a2563 not found: ID does not exist" containerID="c9de6e9575016e533b084a8c60da95881cfbe691a31c648a2025a2ad043a2563" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.116198 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9de6e9575016e533b084a8c60da95881cfbe691a31c648a2025a2ad043a2563"} err="failed to get container status \"c9de6e9575016e533b084a8c60da95881cfbe691a31c648a2025a2ad043a2563\": rpc error: code = NotFound desc = could not find container \"c9de6e9575016e533b084a8c60da95881cfbe691a31c648a2025a2ad043a2563\": container with ID starting with c9de6e9575016e533b084a8c60da95881cfbe691a31c648a2025a2ad043a2563 not found: ID does not exist" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.166848 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ecf8d41-cf6a-406d-b0d4-f7abe889bf18-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.166887 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ecf8d41-cf6a-406d-b0d4-f7abe889bf18-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.166903 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kq4rl\" (UniqueName: \"kubernetes.io/projected/3ecf8d41-cf6a-406d-b0d4-f7abe889bf18-kube-api-access-kq4rl\") on node \"crc\" DevicePath \"\"" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.383886 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-88sjx"] Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.387381 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bb78f"] Nov 23 14:55:37 crc kubenswrapper[5050]: E1123 14:55:37.387696 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15d146e6-8be3-42c8-a6b4-45dc085b53a4" containerName="extract" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.387715 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="15d146e6-8be3-42c8-a6b4-45dc085b53a4" containerName="extract" Nov 23 14:55:37 crc kubenswrapper[5050]: E1123 14:55:37.387724 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ecf8d41-cf6a-406d-b0d4-f7abe889bf18" containerName="extract-utilities" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.387731 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ecf8d41-cf6a-406d-b0d4-f7abe889bf18" containerName="extract-utilities" Nov 23 14:55:37 crc kubenswrapper[5050]: E1123 14:55:37.387744 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ecf8d41-cf6a-406d-b0d4-f7abe889bf18" containerName="extract-content" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.387750 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ecf8d41-cf6a-406d-b0d4-f7abe889bf18" containerName="extract-content" Nov 23 14:55:37 crc kubenswrapper[5050]: E1123 14:55:37.387764 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ecf8d41-cf6a-406d-b0d4-f7abe889bf18" containerName="registry-server" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.387770 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ecf8d41-cf6a-406d-b0d4-f7abe889bf18" containerName="registry-server" Nov 23 14:55:37 crc kubenswrapper[5050]: E1123 14:55:37.387781 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0" containerName="registry-server" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.387788 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0" containerName="registry-server" Nov 23 14:55:37 crc kubenswrapper[5050]: E1123 14:55:37.387801 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15d146e6-8be3-42c8-a6b4-45dc085b53a4" containerName="util" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.387807 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="15d146e6-8be3-42c8-a6b4-45dc085b53a4" containerName="util" Nov 23 14:55:37 crc kubenswrapper[5050]: E1123 14:55:37.387815 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15d146e6-8be3-42c8-a6b4-45dc085b53a4" containerName="pull" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.387821 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="15d146e6-8be3-42c8-a6b4-45dc085b53a4" containerName="pull" Nov 23 14:55:37 crc kubenswrapper[5050]: E1123 14:55:37.387830 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0" containerName="extract-content" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.387836 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0" containerName="extract-content" Nov 23 14:55:37 crc kubenswrapper[5050]: E1123 14:55:37.387848 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0" containerName="extract-utilities" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.387854 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0" containerName="extract-utilities" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.387966 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="15d146e6-8be3-42c8-a6b4-45dc085b53a4" containerName="extract" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.387977 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ecf8d41-cf6a-406d-b0d4-f7abe889bf18" containerName="registry-server" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.387987 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3cf6c79-b0c8-4b13-b81d-c3c05bf903d0" containerName="registry-server" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.388502 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bb78f" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.390693 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-88sjx"] Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.392308 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.392795 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.393751 5050 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-75wcx" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.409313 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bb78f"] Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.472025 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x567q\" (UniqueName: \"kubernetes.io/projected/2169ccf2-e154-4ad5-a83c-9f48808a0797-kube-api-access-x567q\") pod \"cert-manager-operator-controller-manager-64cf6dff88-bb78f\" (UID: \"2169ccf2-e154-4ad5-a83c-9f48808a0797\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bb78f" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.472213 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/2169ccf2-e154-4ad5-a83c-9f48808a0797-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-bb78f\" (UID: \"2169ccf2-e154-4ad5-a83c-9f48808a0797\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bb78f" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.556687 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ecf8d41-cf6a-406d-b0d4-f7abe889bf18" path="/var/lib/kubelet/pods/3ecf8d41-cf6a-406d-b0d4-f7abe889bf18/volumes" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.573912 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/2169ccf2-e154-4ad5-a83c-9f48808a0797-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-bb78f\" (UID: \"2169ccf2-e154-4ad5-a83c-9f48808a0797\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bb78f" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.574290 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x567q\" (UniqueName: \"kubernetes.io/projected/2169ccf2-e154-4ad5-a83c-9f48808a0797-kube-api-access-x567q\") pod \"cert-manager-operator-controller-manager-64cf6dff88-bb78f\" (UID: \"2169ccf2-e154-4ad5-a83c-9f48808a0797\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bb78f" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.574637 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/2169ccf2-e154-4ad5-a83c-9f48808a0797-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-bb78f\" (UID: \"2169ccf2-e154-4ad5-a83c-9f48808a0797\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bb78f" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.613857 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x567q\" (UniqueName: \"kubernetes.io/projected/2169ccf2-e154-4ad5-a83c-9f48808a0797-kube-api-access-x567q\") pod \"cert-manager-operator-controller-manager-64cf6dff88-bb78f\" (UID: \"2169ccf2-e154-4ad5-a83c-9f48808a0797\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bb78f" Nov 23 14:55:37 crc kubenswrapper[5050]: I1123 14:55:37.706602 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bb78f" Nov 23 14:55:38 crc kubenswrapper[5050]: I1123 14:55:38.180706 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bb78f"] Nov 23 14:55:38 crc kubenswrapper[5050]: I1123 14:55:38.899364 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-sn9hl" Nov 23 14:55:39 crc kubenswrapper[5050]: I1123 14:55:39.047263 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-qrfx4" Nov 23 14:55:39 crc kubenswrapper[5050]: I1123 14:55:39.047346 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-qrfx4" Nov 23 14:55:39 crc kubenswrapper[5050]: I1123 14:55:39.061262 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bb78f" event={"ID":"2169ccf2-e154-4ad5-a83c-9f48808a0797","Type":"ContainerStarted","Data":"91ec440dcfdafd15fa7b0566164965a8c53dec010611982ecec124672cae5fcf"} Nov 23 14:55:40 crc kubenswrapper[5050]: I1123 14:55:40.087886 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-qrfx4" podUID="a45cf38d-235b-42ab-be26-ef62365f52da" containerName="registry-server" probeResult="failure" output=< Nov 23 14:55:40 crc kubenswrapper[5050]: timeout: failed to connect service ":50051" within 1s Nov 23 14:55:40 crc kubenswrapper[5050]: > Nov 23 14:55:46 crc kubenswrapper[5050]: I1123 14:55:46.134408 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bb78f" event={"ID":"2169ccf2-e154-4ad5-a83c-9f48808a0797","Type":"ContainerStarted","Data":"d1c5727b488d4c363c3b89fcb4f7b1b612ba983ea0ae2ff370d4a4ab64b2585d"} Nov 23 14:55:46 crc kubenswrapper[5050]: I1123 14:55:46.166695 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bb78f" podStartSLOduration=2.214326814 podStartE2EDuration="9.166665606s" podCreationTimestamp="2025-11-23 14:55:37 +0000 UTC" firstStartedPulling="2025-11-23 14:55:38.195544132 +0000 UTC m=+833.362540627" lastFinishedPulling="2025-11-23 14:55:45.147882934 +0000 UTC m=+840.314879419" observedRunningTime="2025-11-23 14:55:46.163283818 +0000 UTC m=+841.330280353" watchObservedRunningTime="2025-11-23 14:55:46.166665606 +0000 UTC m=+841.333662101" Nov 23 14:55:48 crc kubenswrapper[5050]: I1123 14:55:48.880373 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-cj4jv"] Nov 23 14:55:48 crc kubenswrapper[5050]: I1123 14:55:48.882190 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-cj4jv" Nov 23 14:55:48 crc kubenswrapper[5050]: I1123 14:55:48.884996 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 23 14:55:48 crc kubenswrapper[5050]: I1123 14:55:48.885101 5050 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-2p4z9" Nov 23 14:55:48 crc kubenswrapper[5050]: I1123 14:55:48.885226 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 23 14:55:48 crc kubenswrapper[5050]: I1123 14:55:48.885821 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-cj4jv"] Nov 23 14:55:48 crc kubenswrapper[5050]: I1123 14:55:48.958700 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4c3f2d51-526a-409b-ada4-4ea14e622593-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-cj4jv\" (UID: \"4c3f2d51-526a-409b-ada4-4ea14e622593\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-cj4jv" Nov 23 14:55:48 crc kubenswrapper[5050]: I1123 14:55:48.958887 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmhsj\" (UniqueName: \"kubernetes.io/projected/4c3f2d51-526a-409b-ada4-4ea14e622593-kube-api-access-qmhsj\") pod \"cert-manager-webhook-f4fb5df64-cj4jv\" (UID: \"4c3f2d51-526a-409b-ada4-4ea14e622593\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-cj4jv" Nov 23 14:55:49 crc kubenswrapper[5050]: I1123 14:55:49.060371 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4c3f2d51-526a-409b-ada4-4ea14e622593-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-cj4jv\" (UID: \"4c3f2d51-526a-409b-ada4-4ea14e622593\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-cj4jv" Nov 23 14:55:49 crc kubenswrapper[5050]: I1123 14:55:49.060579 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmhsj\" (UniqueName: \"kubernetes.io/projected/4c3f2d51-526a-409b-ada4-4ea14e622593-kube-api-access-qmhsj\") pod \"cert-manager-webhook-f4fb5df64-cj4jv\" (UID: \"4c3f2d51-526a-409b-ada4-4ea14e622593\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-cj4jv" Nov 23 14:55:49 crc kubenswrapper[5050]: I1123 14:55:49.092463 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4c3f2d51-526a-409b-ada4-4ea14e622593-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-cj4jv\" (UID: \"4c3f2d51-526a-409b-ada4-4ea14e622593\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-cj4jv" Nov 23 14:55:49 crc kubenswrapper[5050]: I1123 14:55:49.092542 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmhsj\" (UniqueName: \"kubernetes.io/projected/4c3f2d51-526a-409b-ada4-4ea14e622593-kube-api-access-qmhsj\") pod \"cert-manager-webhook-f4fb5df64-cj4jv\" (UID: \"4c3f2d51-526a-409b-ada4-4ea14e622593\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-cj4jv" Nov 23 14:55:49 crc kubenswrapper[5050]: I1123 14:55:49.128111 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-qrfx4" Nov 23 14:55:49 crc kubenswrapper[5050]: I1123 14:55:49.179342 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-qrfx4" Nov 23 14:55:49 crc kubenswrapper[5050]: I1123 14:55:49.214930 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-cj4jv" Nov 23 14:55:49 crc kubenswrapper[5050]: I1123 14:55:49.470202 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-cj4jv"] Nov 23 14:55:50 crc kubenswrapper[5050]: I1123 14:55:50.167615 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-cj4jv" event={"ID":"4c3f2d51-526a-409b-ada4-4ea14e622593","Type":"ContainerStarted","Data":"a7b4f1ad71b5bd0d8804e9d6104f55daf4d303a459c1011f20f268c42fb3f147"} Nov 23 14:55:51 crc kubenswrapper[5050]: I1123 14:55:51.222062 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-2lqmr"] Nov 23 14:55:51 crc kubenswrapper[5050]: I1123 14:55:51.223387 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-2lqmr" Nov 23 14:55:51 crc kubenswrapper[5050]: I1123 14:55:51.226186 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-2lqmr"] Nov 23 14:55:51 crc kubenswrapper[5050]: I1123 14:55:51.226612 5050 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-pbrr8" Nov 23 14:55:51 crc kubenswrapper[5050]: I1123 14:55:51.304653 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dclp\" (UniqueName: \"kubernetes.io/projected/d2eb929a-7d9e-4f85-845c-731255ed042d-kube-api-access-9dclp\") pod \"cert-manager-cainjector-855d9ccff4-2lqmr\" (UID: \"d2eb929a-7d9e-4f85-845c-731255ed042d\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-2lqmr" Nov 23 14:55:51 crc kubenswrapper[5050]: I1123 14:55:51.304903 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d2eb929a-7d9e-4f85-845c-731255ed042d-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-2lqmr\" (UID: \"d2eb929a-7d9e-4f85-845c-731255ed042d\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-2lqmr" Nov 23 14:55:51 crc kubenswrapper[5050]: I1123 14:55:51.406728 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dclp\" (UniqueName: \"kubernetes.io/projected/d2eb929a-7d9e-4f85-845c-731255ed042d-kube-api-access-9dclp\") pod \"cert-manager-cainjector-855d9ccff4-2lqmr\" (UID: \"d2eb929a-7d9e-4f85-845c-731255ed042d\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-2lqmr" Nov 23 14:55:51 crc kubenswrapper[5050]: I1123 14:55:51.406789 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d2eb929a-7d9e-4f85-845c-731255ed042d-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-2lqmr\" (UID: \"d2eb929a-7d9e-4f85-845c-731255ed042d\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-2lqmr" Nov 23 14:55:51 crc kubenswrapper[5050]: I1123 14:55:51.432387 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dclp\" (UniqueName: \"kubernetes.io/projected/d2eb929a-7d9e-4f85-845c-731255ed042d-kube-api-access-9dclp\") pod \"cert-manager-cainjector-855d9ccff4-2lqmr\" (UID: \"d2eb929a-7d9e-4f85-845c-731255ed042d\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-2lqmr" Nov 23 14:55:51 crc kubenswrapper[5050]: I1123 14:55:51.451182 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d2eb929a-7d9e-4f85-845c-731255ed042d-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-2lqmr\" (UID: \"d2eb929a-7d9e-4f85-845c-731255ed042d\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-2lqmr" Nov 23 14:55:51 crc kubenswrapper[5050]: I1123 14:55:51.505016 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qrfx4"] Nov 23 14:55:51 crc kubenswrapper[5050]: I1123 14:55:51.505302 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-qrfx4" podUID="a45cf38d-235b-42ab-be26-ef62365f52da" containerName="registry-server" containerID="cri-o://b1295cf1e9e52150d4a577db714d853c3c1493d3f1d525d67e4bf9910071b205" gracePeriod=2 Nov 23 14:55:51 crc kubenswrapper[5050]: I1123 14:55:51.545974 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-2lqmr" Nov 23 14:55:52 crc kubenswrapper[5050]: I1123 14:55:52.065302 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qrfx4" Nov 23 14:55:52 crc kubenswrapper[5050]: I1123 14:55:52.095980 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-2lqmr"] Nov 23 14:55:52 crc kubenswrapper[5050]: I1123 14:55:52.122179 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a45cf38d-235b-42ab-be26-ef62365f52da-utilities\") pod \"a45cf38d-235b-42ab-be26-ef62365f52da\" (UID: \"a45cf38d-235b-42ab-be26-ef62365f52da\") " Nov 23 14:55:52 crc kubenswrapper[5050]: I1123 14:55:52.122368 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a45cf38d-235b-42ab-be26-ef62365f52da-catalog-content\") pod \"a45cf38d-235b-42ab-be26-ef62365f52da\" (UID: \"a45cf38d-235b-42ab-be26-ef62365f52da\") " Nov 23 14:55:52 crc kubenswrapper[5050]: I1123 14:55:52.122486 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-62fgk\" (UniqueName: \"kubernetes.io/projected/a45cf38d-235b-42ab-be26-ef62365f52da-kube-api-access-62fgk\") pod \"a45cf38d-235b-42ab-be26-ef62365f52da\" (UID: \"a45cf38d-235b-42ab-be26-ef62365f52da\") " Nov 23 14:55:52 crc kubenswrapper[5050]: I1123 14:55:52.123366 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a45cf38d-235b-42ab-be26-ef62365f52da-utilities" (OuterVolumeSpecName: "utilities") pod "a45cf38d-235b-42ab-be26-ef62365f52da" (UID: "a45cf38d-235b-42ab-be26-ef62365f52da"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:55:52 crc kubenswrapper[5050]: I1123 14:55:52.132108 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a45cf38d-235b-42ab-be26-ef62365f52da-kube-api-access-62fgk" (OuterVolumeSpecName: "kube-api-access-62fgk") pod "a45cf38d-235b-42ab-be26-ef62365f52da" (UID: "a45cf38d-235b-42ab-be26-ef62365f52da"). InnerVolumeSpecName "kube-api-access-62fgk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:55:52 crc kubenswrapper[5050]: I1123 14:55:52.191607 5050 generic.go:334] "Generic (PLEG): container finished" podID="a45cf38d-235b-42ab-be26-ef62365f52da" containerID="b1295cf1e9e52150d4a577db714d853c3c1493d3f1d525d67e4bf9910071b205" exitCode=0 Nov 23 14:55:52 crc kubenswrapper[5050]: I1123 14:55:52.191685 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qrfx4" Nov 23 14:55:52 crc kubenswrapper[5050]: I1123 14:55:52.191691 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qrfx4" event={"ID":"a45cf38d-235b-42ab-be26-ef62365f52da","Type":"ContainerDied","Data":"b1295cf1e9e52150d4a577db714d853c3c1493d3f1d525d67e4bf9910071b205"} Nov 23 14:55:52 crc kubenswrapper[5050]: I1123 14:55:52.191832 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qrfx4" event={"ID":"a45cf38d-235b-42ab-be26-ef62365f52da","Type":"ContainerDied","Data":"73f0bd6aabb57d246ae5749b9b9d2441049bc2d0871d41eb9efee6de30bc476b"} Nov 23 14:55:52 crc kubenswrapper[5050]: I1123 14:55:52.191879 5050 scope.go:117] "RemoveContainer" containerID="b1295cf1e9e52150d4a577db714d853c3c1493d3f1d525d67e4bf9910071b205" Nov 23 14:55:52 crc kubenswrapper[5050]: I1123 14:55:52.194086 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-2lqmr" event={"ID":"d2eb929a-7d9e-4f85-845c-731255ed042d","Type":"ContainerStarted","Data":"99b9f690b1026ade28e1826aa694cb3390ae8c79afc4980e55c97e7f65612212"} Nov 23 14:55:52 crc kubenswrapper[5050]: I1123 14:55:52.211600 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a45cf38d-235b-42ab-be26-ef62365f52da-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a45cf38d-235b-42ab-be26-ef62365f52da" (UID: "a45cf38d-235b-42ab-be26-ef62365f52da"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:55:52 crc kubenswrapper[5050]: I1123 14:55:52.212729 5050 scope.go:117] "RemoveContainer" containerID="db596b58f53fe07d9c6d4fde34715757450a8f3607f4800b183132042a7a94ff" Nov 23 14:55:52 crc kubenswrapper[5050]: I1123 14:55:52.224299 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a45cf38d-235b-42ab-be26-ef62365f52da-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 14:55:52 crc kubenswrapper[5050]: I1123 14:55:52.224328 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-62fgk\" (UniqueName: \"kubernetes.io/projected/a45cf38d-235b-42ab-be26-ef62365f52da-kube-api-access-62fgk\") on node \"crc\" DevicePath \"\"" Nov 23 14:55:52 crc kubenswrapper[5050]: I1123 14:55:52.224340 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a45cf38d-235b-42ab-be26-ef62365f52da-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 14:55:52 crc kubenswrapper[5050]: I1123 14:55:52.236128 5050 scope.go:117] "RemoveContainer" containerID="6a757d28b363766563b8ab6e6d9ce9022ce2083b8542942cc64e05e43f305401" Nov 23 14:55:52 crc kubenswrapper[5050]: I1123 14:55:52.269161 5050 scope.go:117] "RemoveContainer" containerID="b1295cf1e9e52150d4a577db714d853c3c1493d3f1d525d67e4bf9910071b205" Nov 23 14:55:52 crc kubenswrapper[5050]: E1123 14:55:52.272389 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1295cf1e9e52150d4a577db714d853c3c1493d3f1d525d67e4bf9910071b205\": container with ID starting with b1295cf1e9e52150d4a577db714d853c3c1493d3f1d525d67e4bf9910071b205 not found: ID does not exist" containerID="b1295cf1e9e52150d4a577db714d853c3c1493d3f1d525d67e4bf9910071b205" Nov 23 14:55:52 crc kubenswrapper[5050]: I1123 14:55:52.272477 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1295cf1e9e52150d4a577db714d853c3c1493d3f1d525d67e4bf9910071b205"} err="failed to get container status \"b1295cf1e9e52150d4a577db714d853c3c1493d3f1d525d67e4bf9910071b205\": rpc error: code = NotFound desc = could not find container \"b1295cf1e9e52150d4a577db714d853c3c1493d3f1d525d67e4bf9910071b205\": container with ID starting with b1295cf1e9e52150d4a577db714d853c3c1493d3f1d525d67e4bf9910071b205 not found: ID does not exist" Nov 23 14:55:52 crc kubenswrapper[5050]: I1123 14:55:52.272516 5050 scope.go:117] "RemoveContainer" containerID="db596b58f53fe07d9c6d4fde34715757450a8f3607f4800b183132042a7a94ff" Nov 23 14:55:52 crc kubenswrapper[5050]: E1123 14:55:52.273115 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db596b58f53fe07d9c6d4fde34715757450a8f3607f4800b183132042a7a94ff\": container with ID starting with db596b58f53fe07d9c6d4fde34715757450a8f3607f4800b183132042a7a94ff not found: ID does not exist" containerID="db596b58f53fe07d9c6d4fde34715757450a8f3607f4800b183132042a7a94ff" Nov 23 14:55:52 crc kubenswrapper[5050]: I1123 14:55:52.273137 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db596b58f53fe07d9c6d4fde34715757450a8f3607f4800b183132042a7a94ff"} err="failed to get container status \"db596b58f53fe07d9c6d4fde34715757450a8f3607f4800b183132042a7a94ff\": rpc error: code = NotFound desc = could not find container \"db596b58f53fe07d9c6d4fde34715757450a8f3607f4800b183132042a7a94ff\": container with ID starting with db596b58f53fe07d9c6d4fde34715757450a8f3607f4800b183132042a7a94ff not found: ID does not exist" Nov 23 14:55:52 crc kubenswrapper[5050]: I1123 14:55:52.273151 5050 scope.go:117] "RemoveContainer" containerID="6a757d28b363766563b8ab6e6d9ce9022ce2083b8542942cc64e05e43f305401" Nov 23 14:55:52 crc kubenswrapper[5050]: E1123 14:55:52.273622 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6a757d28b363766563b8ab6e6d9ce9022ce2083b8542942cc64e05e43f305401\": container with ID starting with 6a757d28b363766563b8ab6e6d9ce9022ce2083b8542942cc64e05e43f305401 not found: ID does not exist" containerID="6a757d28b363766563b8ab6e6d9ce9022ce2083b8542942cc64e05e43f305401" Nov 23 14:55:52 crc kubenswrapper[5050]: I1123 14:55:52.273682 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a757d28b363766563b8ab6e6d9ce9022ce2083b8542942cc64e05e43f305401"} err="failed to get container status \"6a757d28b363766563b8ab6e6d9ce9022ce2083b8542942cc64e05e43f305401\": rpc error: code = NotFound desc = could not find container \"6a757d28b363766563b8ab6e6d9ce9022ce2083b8542942cc64e05e43f305401\": container with ID starting with 6a757d28b363766563b8ab6e6d9ce9022ce2083b8542942cc64e05e43f305401 not found: ID does not exist" Nov 23 14:55:52 crc kubenswrapper[5050]: I1123 14:55:52.535075 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qrfx4"] Nov 23 14:55:52 crc kubenswrapper[5050]: I1123 14:55:52.538060 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-qrfx4"] Nov 23 14:55:53 crc kubenswrapper[5050]: I1123 14:55:53.559291 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a45cf38d-235b-42ab-be26-ef62365f52da" path="/var/lib/kubelet/pods/a45cf38d-235b-42ab-be26-ef62365f52da/volumes" Nov 23 14:55:59 crc kubenswrapper[5050]: I1123 14:55:59.282087 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-cj4jv" event={"ID":"4c3f2d51-526a-409b-ada4-4ea14e622593","Type":"ContainerStarted","Data":"e8eb5f0b0bccf5f14312c5e41ef47e17b8c9b45dbcaedf45f1d140972d734bb9"} Nov 23 14:55:59 crc kubenswrapper[5050]: I1123 14:55:59.283253 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-f4fb5df64-cj4jv" Nov 23 14:55:59 crc kubenswrapper[5050]: I1123 14:55:59.288684 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-2lqmr" event={"ID":"d2eb929a-7d9e-4f85-845c-731255ed042d","Type":"ContainerStarted","Data":"1792142363e1e24d399ff07f7a6738fc0baf0e77e7440b868b6595b8be75c8c4"} Nov 23 14:55:59 crc kubenswrapper[5050]: I1123 14:55:59.333676 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-855d9ccff4-2lqmr" podStartSLOduration=2.074942013 podStartE2EDuration="8.333649619s" podCreationTimestamp="2025-11-23 14:55:51 +0000 UTC" firstStartedPulling="2025-11-23 14:55:52.10821672 +0000 UTC m=+847.275213205" lastFinishedPulling="2025-11-23 14:55:58.366924326 +0000 UTC m=+853.533920811" observedRunningTime="2025-11-23 14:55:59.330984212 +0000 UTC m=+854.497980707" watchObservedRunningTime="2025-11-23 14:55:59.333649619 +0000 UTC m=+854.500646104" Nov 23 14:55:59 crc kubenswrapper[5050]: I1123 14:55:59.335858 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-f4fb5df64-cj4jv" podStartSLOduration=2.458913308 podStartE2EDuration="11.335849833s" podCreationTimestamp="2025-11-23 14:55:48 +0000 UTC" firstStartedPulling="2025-11-23 14:55:49.495804999 +0000 UTC m=+844.662801484" lastFinishedPulling="2025-11-23 14:55:58.372741524 +0000 UTC m=+853.539738009" observedRunningTime="2025-11-23 14:55:59.308522304 +0000 UTC m=+854.475518809" watchObservedRunningTime="2025-11-23 14:55:59.335849833 +0000 UTC m=+854.502846318" Nov 23 14:56:02 crc kubenswrapper[5050]: I1123 14:56:02.126070 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-86cb77c54b-htrhf"] Nov 23 14:56:02 crc kubenswrapper[5050]: E1123 14:56:02.126932 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a45cf38d-235b-42ab-be26-ef62365f52da" containerName="extract-utilities" Nov 23 14:56:02 crc kubenswrapper[5050]: I1123 14:56:02.126956 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="a45cf38d-235b-42ab-be26-ef62365f52da" containerName="extract-utilities" Nov 23 14:56:02 crc kubenswrapper[5050]: E1123 14:56:02.126984 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a45cf38d-235b-42ab-be26-ef62365f52da" containerName="registry-server" Nov 23 14:56:02 crc kubenswrapper[5050]: I1123 14:56:02.126996 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="a45cf38d-235b-42ab-be26-ef62365f52da" containerName="registry-server" Nov 23 14:56:02 crc kubenswrapper[5050]: E1123 14:56:02.127038 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a45cf38d-235b-42ab-be26-ef62365f52da" containerName="extract-content" Nov 23 14:56:02 crc kubenswrapper[5050]: I1123 14:56:02.127053 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="a45cf38d-235b-42ab-be26-ef62365f52da" containerName="extract-content" Nov 23 14:56:02 crc kubenswrapper[5050]: I1123 14:56:02.127234 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="a45cf38d-235b-42ab-be26-ef62365f52da" containerName="registry-server" Nov 23 14:56:02 crc kubenswrapper[5050]: I1123 14:56:02.128008 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-htrhf" Nov 23 14:56:02 crc kubenswrapper[5050]: I1123 14:56:02.130203 5050 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-6tgj9" Nov 23 14:56:02 crc kubenswrapper[5050]: I1123 14:56:02.133914 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-htrhf"] Nov 23 14:56:02 crc kubenswrapper[5050]: I1123 14:56:02.179159 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7870f636-5b79-41e3-a118-aa1d664760d9-bound-sa-token\") pod \"cert-manager-86cb77c54b-htrhf\" (UID: \"7870f636-5b79-41e3-a118-aa1d664760d9\") " pod="cert-manager/cert-manager-86cb77c54b-htrhf" Nov 23 14:56:02 crc kubenswrapper[5050]: I1123 14:56:02.179290 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pcckg\" (UniqueName: \"kubernetes.io/projected/7870f636-5b79-41e3-a118-aa1d664760d9-kube-api-access-pcckg\") pod \"cert-manager-86cb77c54b-htrhf\" (UID: \"7870f636-5b79-41e3-a118-aa1d664760d9\") " pod="cert-manager/cert-manager-86cb77c54b-htrhf" Nov 23 14:56:02 crc kubenswrapper[5050]: I1123 14:56:02.280666 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7870f636-5b79-41e3-a118-aa1d664760d9-bound-sa-token\") pod \"cert-manager-86cb77c54b-htrhf\" (UID: \"7870f636-5b79-41e3-a118-aa1d664760d9\") " pod="cert-manager/cert-manager-86cb77c54b-htrhf" Nov 23 14:56:02 crc kubenswrapper[5050]: I1123 14:56:02.281080 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pcckg\" (UniqueName: \"kubernetes.io/projected/7870f636-5b79-41e3-a118-aa1d664760d9-kube-api-access-pcckg\") pod \"cert-manager-86cb77c54b-htrhf\" (UID: \"7870f636-5b79-41e3-a118-aa1d664760d9\") " pod="cert-manager/cert-manager-86cb77c54b-htrhf" Nov 23 14:56:02 crc kubenswrapper[5050]: I1123 14:56:02.309465 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7870f636-5b79-41e3-a118-aa1d664760d9-bound-sa-token\") pod \"cert-manager-86cb77c54b-htrhf\" (UID: \"7870f636-5b79-41e3-a118-aa1d664760d9\") " pod="cert-manager/cert-manager-86cb77c54b-htrhf" Nov 23 14:56:02 crc kubenswrapper[5050]: I1123 14:56:02.311965 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pcckg\" (UniqueName: \"kubernetes.io/projected/7870f636-5b79-41e3-a118-aa1d664760d9-kube-api-access-pcckg\") pod \"cert-manager-86cb77c54b-htrhf\" (UID: \"7870f636-5b79-41e3-a118-aa1d664760d9\") " pod="cert-manager/cert-manager-86cb77c54b-htrhf" Nov 23 14:56:02 crc kubenswrapper[5050]: I1123 14:56:02.517200 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-htrhf" Nov 23 14:56:03 crc kubenswrapper[5050]: I1123 14:56:03.047402 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-htrhf"] Nov 23 14:56:03 crc kubenswrapper[5050]: I1123 14:56:03.321170 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-htrhf" event={"ID":"7870f636-5b79-41e3-a118-aa1d664760d9","Type":"ContainerStarted","Data":"93e42d5adf3aef1a414470afed075186f21fa360ed1cca039f4cd8cb1c3f3e86"} Nov 23 14:56:04 crc kubenswrapper[5050]: I1123 14:56:04.219947 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-f4fb5df64-cj4jv" Nov 23 14:56:04 crc kubenswrapper[5050]: I1123 14:56:04.331738 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-htrhf" event={"ID":"7870f636-5b79-41e3-a118-aa1d664760d9","Type":"ContainerStarted","Data":"fb46a64ea8dc48744740d2b0a1780a360e3f1fa7c8e5ca73fd929290bc10a06a"} Nov 23 14:56:04 crc kubenswrapper[5050]: I1123 14:56:04.353897 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-86cb77c54b-htrhf" podStartSLOduration=2.353876065 podStartE2EDuration="2.353876065s" podCreationTimestamp="2025-11-23 14:56:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:56:04.350728424 +0000 UTC m=+859.517724909" watchObservedRunningTime="2025-11-23 14:56:04.353876065 +0000 UTC m=+859.520872550" Nov 23 14:56:11 crc kubenswrapper[5050]: I1123 14:56:11.531246 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-xv2z8"] Nov 23 14:56:11 crc kubenswrapper[5050]: I1123 14:56:11.535187 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-xv2z8" Nov 23 14:56:11 crc kubenswrapper[5050]: I1123 14:56:11.537604 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-xv2z8"] Nov 23 14:56:11 crc kubenswrapper[5050]: I1123 14:56:11.540679 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 23 14:56:11 crc kubenswrapper[5050]: I1123 14:56:11.541046 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 23 14:56:11 crc kubenswrapper[5050]: I1123 14:56:11.541213 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-9n8gv" Nov 23 14:56:11 crc kubenswrapper[5050]: I1123 14:56:11.730972 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vj5d\" (UniqueName: \"kubernetes.io/projected/dd9d725e-b12d-4112-bf7b-bfc832976c8c-kube-api-access-6vj5d\") pod \"openstack-operator-index-xv2z8\" (UID: \"dd9d725e-b12d-4112-bf7b-bfc832976c8c\") " pod="openstack-operators/openstack-operator-index-xv2z8" Nov 23 14:56:11 crc kubenswrapper[5050]: I1123 14:56:11.833528 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vj5d\" (UniqueName: \"kubernetes.io/projected/dd9d725e-b12d-4112-bf7b-bfc832976c8c-kube-api-access-6vj5d\") pod \"openstack-operator-index-xv2z8\" (UID: \"dd9d725e-b12d-4112-bf7b-bfc832976c8c\") " pod="openstack-operators/openstack-operator-index-xv2z8" Nov 23 14:56:11 crc kubenswrapper[5050]: I1123 14:56:11.861337 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vj5d\" (UniqueName: \"kubernetes.io/projected/dd9d725e-b12d-4112-bf7b-bfc832976c8c-kube-api-access-6vj5d\") pod \"openstack-operator-index-xv2z8\" (UID: \"dd9d725e-b12d-4112-bf7b-bfc832976c8c\") " pod="openstack-operators/openstack-operator-index-xv2z8" Nov 23 14:56:11 crc kubenswrapper[5050]: I1123 14:56:11.888178 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-xv2z8" Nov 23 14:56:12 crc kubenswrapper[5050]: I1123 14:56:12.347728 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-xv2z8"] Nov 23 14:56:12 crc kubenswrapper[5050]: I1123 14:56:12.402739 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-xv2z8" event={"ID":"dd9d725e-b12d-4112-bf7b-bfc832976c8c","Type":"ContainerStarted","Data":"a916bbb90a9aae8111d4a0196796daffa381396f2f68a824ab8f3bc00b263f74"} Nov 23 14:56:15 crc kubenswrapper[5050]: I1123 14:56:15.430112 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-xv2z8" event={"ID":"dd9d725e-b12d-4112-bf7b-bfc832976c8c","Type":"ContainerStarted","Data":"43994c092f7c2911d4dade27c750fe34978652c6babbeb4178addcaea0a74684"} Nov 23 14:56:15 crc kubenswrapper[5050]: I1123 14:56:15.461830 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-xv2z8" podStartSLOduration=2.299565324 podStartE2EDuration="4.461799087s" podCreationTimestamp="2025-11-23 14:56:11 +0000 UTC" firstStartedPulling="2025-11-23 14:56:12.35841938 +0000 UTC m=+867.525415865" lastFinishedPulling="2025-11-23 14:56:14.520653143 +0000 UTC m=+869.687649628" observedRunningTime="2025-11-23 14:56:15.453159178 +0000 UTC m=+870.620155663" watchObservedRunningTime="2025-11-23 14:56:15.461799087 +0000 UTC m=+870.628795602" Nov 23 14:56:17 crc kubenswrapper[5050]: I1123 14:56:17.310214 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-xv2z8"] Nov 23 14:56:17 crc kubenswrapper[5050]: I1123 14:56:17.448298 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-xv2z8" podUID="dd9d725e-b12d-4112-bf7b-bfc832976c8c" containerName="registry-server" containerID="cri-o://43994c092f7c2911d4dade27c750fe34978652c6babbeb4178addcaea0a74684" gracePeriod=2 Nov 23 14:56:17 crc kubenswrapper[5050]: I1123 14:56:17.919744 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-gp5f8"] Nov 23 14:56:17 crc kubenswrapper[5050]: I1123 14:56:17.921300 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-gp5f8" Nov 23 14:56:17 crc kubenswrapper[5050]: I1123 14:56:17.925061 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-gp5f8"] Nov 23 14:56:17 crc kubenswrapper[5050]: I1123 14:56:17.944269 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dd7d2\" (UniqueName: \"kubernetes.io/projected/5c4bbe08-f458-46ae-bcbd-7457e59ffc18-kube-api-access-dd7d2\") pod \"openstack-operator-index-gp5f8\" (UID: \"5c4bbe08-f458-46ae-bcbd-7457e59ffc18\") " pod="openstack-operators/openstack-operator-index-gp5f8" Nov 23 14:56:17 crc kubenswrapper[5050]: I1123 14:56:17.987010 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-xv2z8" Nov 23 14:56:18 crc kubenswrapper[5050]: I1123 14:56:18.045723 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dd7d2\" (UniqueName: \"kubernetes.io/projected/5c4bbe08-f458-46ae-bcbd-7457e59ffc18-kube-api-access-dd7d2\") pod \"openstack-operator-index-gp5f8\" (UID: \"5c4bbe08-f458-46ae-bcbd-7457e59ffc18\") " pod="openstack-operators/openstack-operator-index-gp5f8" Nov 23 14:56:18 crc kubenswrapper[5050]: I1123 14:56:18.068703 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dd7d2\" (UniqueName: \"kubernetes.io/projected/5c4bbe08-f458-46ae-bcbd-7457e59ffc18-kube-api-access-dd7d2\") pod \"openstack-operator-index-gp5f8\" (UID: \"5c4bbe08-f458-46ae-bcbd-7457e59ffc18\") " pod="openstack-operators/openstack-operator-index-gp5f8" Nov 23 14:56:18 crc kubenswrapper[5050]: I1123 14:56:18.147777 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6vj5d\" (UniqueName: \"kubernetes.io/projected/dd9d725e-b12d-4112-bf7b-bfc832976c8c-kube-api-access-6vj5d\") pod \"dd9d725e-b12d-4112-bf7b-bfc832976c8c\" (UID: \"dd9d725e-b12d-4112-bf7b-bfc832976c8c\") " Nov 23 14:56:18 crc kubenswrapper[5050]: I1123 14:56:18.153909 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd9d725e-b12d-4112-bf7b-bfc832976c8c-kube-api-access-6vj5d" (OuterVolumeSpecName: "kube-api-access-6vj5d") pod "dd9d725e-b12d-4112-bf7b-bfc832976c8c" (UID: "dd9d725e-b12d-4112-bf7b-bfc832976c8c"). InnerVolumeSpecName "kube-api-access-6vj5d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:56:18 crc kubenswrapper[5050]: I1123 14:56:18.249880 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6vj5d\" (UniqueName: \"kubernetes.io/projected/dd9d725e-b12d-4112-bf7b-bfc832976c8c-kube-api-access-6vj5d\") on node \"crc\" DevicePath \"\"" Nov 23 14:56:18 crc kubenswrapper[5050]: I1123 14:56:18.285293 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-gp5f8" Nov 23 14:56:18 crc kubenswrapper[5050]: I1123 14:56:18.456845 5050 generic.go:334] "Generic (PLEG): container finished" podID="dd9d725e-b12d-4112-bf7b-bfc832976c8c" containerID="43994c092f7c2911d4dade27c750fe34978652c6babbeb4178addcaea0a74684" exitCode=0 Nov 23 14:56:18 crc kubenswrapper[5050]: I1123 14:56:18.456951 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-xv2z8" Nov 23 14:56:18 crc kubenswrapper[5050]: I1123 14:56:18.456991 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-xv2z8" event={"ID":"dd9d725e-b12d-4112-bf7b-bfc832976c8c","Type":"ContainerDied","Data":"43994c092f7c2911d4dade27c750fe34978652c6babbeb4178addcaea0a74684"} Nov 23 14:56:18 crc kubenswrapper[5050]: I1123 14:56:18.457382 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-xv2z8" event={"ID":"dd9d725e-b12d-4112-bf7b-bfc832976c8c","Type":"ContainerDied","Data":"a916bbb90a9aae8111d4a0196796daffa381396f2f68a824ab8f3bc00b263f74"} Nov 23 14:56:18 crc kubenswrapper[5050]: I1123 14:56:18.457409 5050 scope.go:117] "RemoveContainer" containerID="43994c092f7c2911d4dade27c750fe34978652c6babbeb4178addcaea0a74684" Nov 23 14:56:18 crc kubenswrapper[5050]: I1123 14:56:18.496886 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-xv2z8"] Nov 23 14:56:18 crc kubenswrapper[5050]: I1123 14:56:18.500726 5050 scope.go:117] "RemoveContainer" containerID="43994c092f7c2911d4dade27c750fe34978652c6babbeb4178addcaea0a74684" Nov 23 14:56:18 crc kubenswrapper[5050]: E1123 14:56:18.503100 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"43994c092f7c2911d4dade27c750fe34978652c6babbeb4178addcaea0a74684\": container with ID starting with 43994c092f7c2911d4dade27c750fe34978652c6babbeb4178addcaea0a74684 not found: ID does not exist" containerID="43994c092f7c2911d4dade27c750fe34978652c6babbeb4178addcaea0a74684" Nov 23 14:56:18 crc kubenswrapper[5050]: I1123 14:56:18.503170 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"43994c092f7c2911d4dade27c750fe34978652c6babbeb4178addcaea0a74684"} err="failed to get container status \"43994c092f7c2911d4dade27c750fe34978652c6babbeb4178addcaea0a74684\": rpc error: code = NotFound desc = could not find container \"43994c092f7c2911d4dade27c750fe34978652c6babbeb4178addcaea0a74684\": container with ID starting with 43994c092f7c2911d4dade27c750fe34978652c6babbeb4178addcaea0a74684 not found: ID does not exist" Nov 23 14:56:18 crc kubenswrapper[5050]: I1123 14:56:18.513572 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-xv2z8"] Nov 23 14:56:18 crc kubenswrapper[5050]: I1123 14:56:18.816756 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-gp5f8"] Nov 23 14:56:18 crc kubenswrapper[5050]: W1123 14:56:18.824719 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5c4bbe08_f458_46ae_bcbd_7457e59ffc18.slice/crio-c7717b1374f7629e2062c2d0efab6f298a5ab285a51acbd7b1f25c5a48b7a57b WatchSource:0}: Error finding container c7717b1374f7629e2062c2d0efab6f298a5ab285a51acbd7b1f25c5a48b7a57b: Status 404 returned error can't find the container with id c7717b1374f7629e2062c2d0efab6f298a5ab285a51acbd7b1f25c5a48b7a57b Nov 23 14:56:19 crc kubenswrapper[5050]: I1123 14:56:19.471234 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-gp5f8" event={"ID":"5c4bbe08-f458-46ae-bcbd-7457e59ffc18","Type":"ContainerStarted","Data":"23eb7306fd300068538dce546036adbf83b6c351d85b2b7e730d4a0d5b4463ce"} Nov 23 14:56:19 crc kubenswrapper[5050]: I1123 14:56:19.472762 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-gp5f8" event={"ID":"5c4bbe08-f458-46ae-bcbd-7457e59ffc18","Type":"ContainerStarted","Data":"c7717b1374f7629e2062c2d0efab6f298a5ab285a51acbd7b1f25c5a48b7a57b"} Nov 23 14:56:19 crc kubenswrapper[5050]: I1123 14:56:19.501109 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-gp5f8" podStartSLOduration=2.4438903180000002 podStartE2EDuration="2.501072019s" podCreationTimestamp="2025-11-23 14:56:17 +0000 UTC" firstStartedPulling="2025-11-23 14:56:18.832092892 +0000 UTC m=+873.999089377" lastFinishedPulling="2025-11-23 14:56:18.889274583 +0000 UTC m=+874.056271078" observedRunningTime="2025-11-23 14:56:19.496429325 +0000 UTC m=+874.663425880" watchObservedRunningTime="2025-11-23 14:56:19.501072019 +0000 UTC m=+874.668068544" Nov 23 14:56:19 crc kubenswrapper[5050]: I1123 14:56:19.561114 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd9d725e-b12d-4112-bf7b-bfc832976c8c" path="/var/lib/kubelet/pods/dd9d725e-b12d-4112-bf7b-bfc832976c8c/volumes" Nov 23 14:56:28 crc kubenswrapper[5050]: I1123 14:56:28.285900 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-gp5f8" Nov 23 14:56:28 crc kubenswrapper[5050]: I1123 14:56:28.286587 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-gp5f8" Nov 23 14:56:28 crc kubenswrapper[5050]: I1123 14:56:28.322380 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-gp5f8" Nov 23 14:56:28 crc kubenswrapper[5050]: I1123 14:56:28.582873 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-gp5f8" Nov 23 14:56:29 crc kubenswrapper[5050]: I1123 14:56:29.224939 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 14:56:29 crc kubenswrapper[5050]: I1123 14:56:29.225186 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 14:56:42 crc kubenswrapper[5050]: I1123 14:56:42.791168 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh"] Nov 23 14:56:42 crc kubenswrapper[5050]: E1123 14:56:42.792107 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd9d725e-b12d-4112-bf7b-bfc832976c8c" containerName="registry-server" Nov 23 14:56:42 crc kubenswrapper[5050]: I1123 14:56:42.792122 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd9d725e-b12d-4112-bf7b-bfc832976c8c" containerName="registry-server" Nov 23 14:56:42 crc kubenswrapper[5050]: I1123 14:56:42.792244 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd9d725e-b12d-4112-bf7b-bfc832976c8c" containerName="registry-server" Nov 23 14:56:42 crc kubenswrapper[5050]: I1123 14:56:42.793207 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh" Nov 23 14:56:42 crc kubenswrapper[5050]: I1123 14:56:42.799876 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-ngk97" Nov 23 14:56:42 crc kubenswrapper[5050]: I1123 14:56:42.820967 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh"] Nov 23 14:56:42 crc kubenswrapper[5050]: I1123 14:56:42.960423 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ad085e2d-80ec-4ee9-a7af-6eb49955b26f-util\") pod \"e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh\" (UID: \"ad085e2d-80ec-4ee9-a7af-6eb49955b26f\") " pod="openstack-operators/e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh" Nov 23 14:56:42 crc kubenswrapper[5050]: I1123 14:56:42.960776 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ad085e2d-80ec-4ee9-a7af-6eb49955b26f-bundle\") pod \"e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh\" (UID: \"ad085e2d-80ec-4ee9-a7af-6eb49955b26f\") " pod="openstack-operators/e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh" Nov 23 14:56:42 crc kubenswrapper[5050]: I1123 14:56:42.960933 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwzrl\" (UniqueName: \"kubernetes.io/projected/ad085e2d-80ec-4ee9-a7af-6eb49955b26f-kube-api-access-wwzrl\") pod \"e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh\" (UID: \"ad085e2d-80ec-4ee9-a7af-6eb49955b26f\") " pod="openstack-operators/e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh" Nov 23 14:56:43 crc kubenswrapper[5050]: I1123 14:56:43.062681 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ad085e2d-80ec-4ee9-a7af-6eb49955b26f-bundle\") pod \"e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh\" (UID: \"ad085e2d-80ec-4ee9-a7af-6eb49955b26f\") " pod="openstack-operators/e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh" Nov 23 14:56:43 crc kubenswrapper[5050]: I1123 14:56:43.062826 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwzrl\" (UniqueName: \"kubernetes.io/projected/ad085e2d-80ec-4ee9-a7af-6eb49955b26f-kube-api-access-wwzrl\") pod \"e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh\" (UID: \"ad085e2d-80ec-4ee9-a7af-6eb49955b26f\") " pod="openstack-operators/e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh" Nov 23 14:56:43 crc kubenswrapper[5050]: I1123 14:56:43.062930 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ad085e2d-80ec-4ee9-a7af-6eb49955b26f-util\") pod \"e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh\" (UID: \"ad085e2d-80ec-4ee9-a7af-6eb49955b26f\") " pod="openstack-operators/e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh" Nov 23 14:56:43 crc kubenswrapper[5050]: I1123 14:56:43.064177 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ad085e2d-80ec-4ee9-a7af-6eb49955b26f-util\") pod \"e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh\" (UID: \"ad085e2d-80ec-4ee9-a7af-6eb49955b26f\") " pod="openstack-operators/e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh" Nov 23 14:56:43 crc kubenswrapper[5050]: I1123 14:56:43.064329 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ad085e2d-80ec-4ee9-a7af-6eb49955b26f-bundle\") pod \"e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh\" (UID: \"ad085e2d-80ec-4ee9-a7af-6eb49955b26f\") " pod="openstack-operators/e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh" Nov 23 14:56:43 crc kubenswrapper[5050]: I1123 14:56:43.097101 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwzrl\" (UniqueName: \"kubernetes.io/projected/ad085e2d-80ec-4ee9-a7af-6eb49955b26f-kube-api-access-wwzrl\") pod \"e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh\" (UID: \"ad085e2d-80ec-4ee9-a7af-6eb49955b26f\") " pod="openstack-operators/e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh" Nov 23 14:56:43 crc kubenswrapper[5050]: I1123 14:56:43.125420 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh" Nov 23 14:56:43 crc kubenswrapper[5050]: I1123 14:56:43.602599 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh"] Nov 23 14:56:43 crc kubenswrapper[5050]: I1123 14:56:43.695108 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh" event={"ID":"ad085e2d-80ec-4ee9-a7af-6eb49955b26f","Type":"ContainerStarted","Data":"0826a87199a960bb2d07e8686b37f547b9bb1b53338da776bb55d0368ee0a9ee"} Nov 23 14:56:44 crc kubenswrapper[5050]: I1123 14:56:44.708534 5050 generic.go:334] "Generic (PLEG): container finished" podID="ad085e2d-80ec-4ee9-a7af-6eb49955b26f" containerID="094ffba1931da0d63d1d23d450dd30312ce7cbea694b00c89f95d550efb771c1" exitCode=0 Nov 23 14:56:44 crc kubenswrapper[5050]: I1123 14:56:44.708667 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh" event={"ID":"ad085e2d-80ec-4ee9-a7af-6eb49955b26f","Type":"ContainerDied","Data":"094ffba1931da0d63d1d23d450dd30312ce7cbea694b00c89f95d550efb771c1"} Nov 23 14:56:45 crc kubenswrapper[5050]: I1123 14:56:45.728754 5050 generic.go:334] "Generic (PLEG): container finished" podID="ad085e2d-80ec-4ee9-a7af-6eb49955b26f" containerID="8a0306cae1245aba15887766f42c7685b0f703cb6a8428f78509b193b0991a39" exitCode=0 Nov 23 14:56:45 crc kubenswrapper[5050]: I1123 14:56:45.729572 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh" event={"ID":"ad085e2d-80ec-4ee9-a7af-6eb49955b26f","Type":"ContainerDied","Data":"8a0306cae1245aba15887766f42c7685b0f703cb6a8428f78509b193b0991a39"} Nov 23 14:56:46 crc kubenswrapper[5050]: I1123 14:56:46.745213 5050 generic.go:334] "Generic (PLEG): container finished" podID="ad085e2d-80ec-4ee9-a7af-6eb49955b26f" containerID="c18d72c9fc380c3d2a4b1bd0d8f7b8d8ec11070fe511d41c975330c1c71955f7" exitCode=0 Nov 23 14:56:46 crc kubenswrapper[5050]: I1123 14:56:46.745254 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh" event={"ID":"ad085e2d-80ec-4ee9-a7af-6eb49955b26f","Type":"ContainerDied","Data":"c18d72c9fc380c3d2a4b1bd0d8f7b8d8ec11070fe511d41c975330c1c71955f7"} Nov 23 14:56:48 crc kubenswrapper[5050]: I1123 14:56:48.076331 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh" Nov 23 14:56:48 crc kubenswrapper[5050]: I1123 14:56:48.246881 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wwzrl\" (UniqueName: \"kubernetes.io/projected/ad085e2d-80ec-4ee9-a7af-6eb49955b26f-kube-api-access-wwzrl\") pod \"ad085e2d-80ec-4ee9-a7af-6eb49955b26f\" (UID: \"ad085e2d-80ec-4ee9-a7af-6eb49955b26f\") " Nov 23 14:56:48 crc kubenswrapper[5050]: I1123 14:56:48.246977 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ad085e2d-80ec-4ee9-a7af-6eb49955b26f-bundle\") pod \"ad085e2d-80ec-4ee9-a7af-6eb49955b26f\" (UID: \"ad085e2d-80ec-4ee9-a7af-6eb49955b26f\") " Nov 23 14:56:48 crc kubenswrapper[5050]: I1123 14:56:48.247168 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ad085e2d-80ec-4ee9-a7af-6eb49955b26f-util\") pod \"ad085e2d-80ec-4ee9-a7af-6eb49955b26f\" (UID: \"ad085e2d-80ec-4ee9-a7af-6eb49955b26f\") " Nov 23 14:56:48 crc kubenswrapper[5050]: I1123 14:56:48.248191 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad085e2d-80ec-4ee9-a7af-6eb49955b26f-bundle" (OuterVolumeSpecName: "bundle") pod "ad085e2d-80ec-4ee9-a7af-6eb49955b26f" (UID: "ad085e2d-80ec-4ee9-a7af-6eb49955b26f"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:56:48 crc kubenswrapper[5050]: I1123 14:56:48.258047 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad085e2d-80ec-4ee9-a7af-6eb49955b26f-kube-api-access-wwzrl" (OuterVolumeSpecName: "kube-api-access-wwzrl") pod "ad085e2d-80ec-4ee9-a7af-6eb49955b26f" (UID: "ad085e2d-80ec-4ee9-a7af-6eb49955b26f"). InnerVolumeSpecName "kube-api-access-wwzrl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:56:48 crc kubenswrapper[5050]: I1123 14:56:48.260697 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad085e2d-80ec-4ee9-a7af-6eb49955b26f-util" (OuterVolumeSpecName: "util") pod "ad085e2d-80ec-4ee9-a7af-6eb49955b26f" (UID: "ad085e2d-80ec-4ee9-a7af-6eb49955b26f"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:56:48 crc kubenswrapper[5050]: I1123 14:56:48.349210 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wwzrl\" (UniqueName: \"kubernetes.io/projected/ad085e2d-80ec-4ee9-a7af-6eb49955b26f-kube-api-access-wwzrl\") on node \"crc\" DevicePath \"\"" Nov 23 14:56:48 crc kubenswrapper[5050]: I1123 14:56:48.349252 5050 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ad085e2d-80ec-4ee9-a7af-6eb49955b26f-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 14:56:48 crc kubenswrapper[5050]: I1123 14:56:48.349264 5050 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ad085e2d-80ec-4ee9-a7af-6eb49955b26f-util\") on node \"crc\" DevicePath \"\"" Nov 23 14:56:48 crc kubenswrapper[5050]: I1123 14:56:48.762154 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh" event={"ID":"ad085e2d-80ec-4ee9-a7af-6eb49955b26f","Type":"ContainerDied","Data":"0826a87199a960bb2d07e8686b37f547b9bb1b53338da776bb55d0368ee0a9ee"} Nov 23 14:56:48 crc kubenswrapper[5050]: I1123 14:56:48.762592 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0826a87199a960bb2d07e8686b37f547b9bb1b53338da776bb55d0368ee0a9ee" Nov 23 14:56:48 crc kubenswrapper[5050]: I1123 14:56:48.762285 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh" Nov 23 14:56:55 crc kubenswrapper[5050]: I1123 14:56:55.144218 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-597d69585c-7b9fh"] Nov 23 14:56:55 crc kubenswrapper[5050]: E1123 14:56:55.144822 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad085e2d-80ec-4ee9-a7af-6eb49955b26f" containerName="util" Nov 23 14:56:55 crc kubenswrapper[5050]: I1123 14:56:55.144837 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad085e2d-80ec-4ee9-a7af-6eb49955b26f" containerName="util" Nov 23 14:56:55 crc kubenswrapper[5050]: E1123 14:56:55.144857 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad085e2d-80ec-4ee9-a7af-6eb49955b26f" containerName="pull" Nov 23 14:56:55 crc kubenswrapper[5050]: I1123 14:56:55.144865 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad085e2d-80ec-4ee9-a7af-6eb49955b26f" containerName="pull" Nov 23 14:56:55 crc kubenswrapper[5050]: E1123 14:56:55.144890 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad085e2d-80ec-4ee9-a7af-6eb49955b26f" containerName="extract" Nov 23 14:56:55 crc kubenswrapper[5050]: I1123 14:56:55.144899 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad085e2d-80ec-4ee9-a7af-6eb49955b26f" containerName="extract" Nov 23 14:56:55 crc kubenswrapper[5050]: I1123 14:56:55.145030 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad085e2d-80ec-4ee9-a7af-6eb49955b26f" containerName="extract" Nov 23 14:56:55 crc kubenswrapper[5050]: I1123 14:56:55.145703 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-597d69585c-7b9fh" Nov 23 14:56:55 crc kubenswrapper[5050]: I1123 14:56:55.149708 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-77kpl" Nov 23 14:56:55 crc kubenswrapper[5050]: I1123 14:56:55.166503 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-597d69585c-7b9fh"] Nov 23 14:56:55 crc kubenswrapper[5050]: I1123 14:56:55.307019 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28fn6\" (UniqueName: \"kubernetes.io/projected/0db89f87-ca55-4247-977c-f40ffe5ade4c-kube-api-access-28fn6\") pod \"openstack-operator-controller-operator-597d69585c-7b9fh\" (UID: \"0db89f87-ca55-4247-977c-f40ffe5ade4c\") " pod="openstack-operators/openstack-operator-controller-operator-597d69585c-7b9fh" Nov 23 14:56:55 crc kubenswrapper[5050]: I1123 14:56:55.409071 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28fn6\" (UniqueName: \"kubernetes.io/projected/0db89f87-ca55-4247-977c-f40ffe5ade4c-kube-api-access-28fn6\") pod \"openstack-operator-controller-operator-597d69585c-7b9fh\" (UID: \"0db89f87-ca55-4247-977c-f40ffe5ade4c\") " pod="openstack-operators/openstack-operator-controller-operator-597d69585c-7b9fh" Nov 23 14:56:55 crc kubenswrapper[5050]: I1123 14:56:55.433559 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28fn6\" (UniqueName: \"kubernetes.io/projected/0db89f87-ca55-4247-977c-f40ffe5ade4c-kube-api-access-28fn6\") pod \"openstack-operator-controller-operator-597d69585c-7b9fh\" (UID: \"0db89f87-ca55-4247-977c-f40ffe5ade4c\") " pod="openstack-operators/openstack-operator-controller-operator-597d69585c-7b9fh" Nov 23 14:56:55 crc kubenswrapper[5050]: I1123 14:56:55.475501 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-597d69585c-7b9fh" Nov 23 14:56:55 crc kubenswrapper[5050]: I1123 14:56:55.940245 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-597d69585c-7b9fh"] Nov 23 14:56:55 crc kubenswrapper[5050]: W1123 14:56:55.954291 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0db89f87_ca55_4247_977c_f40ffe5ade4c.slice/crio-3b18cf16b88e8a2f9b6263d7cb5e0a2175b15012012bf12242100225bf3c827a WatchSource:0}: Error finding container 3b18cf16b88e8a2f9b6263d7cb5e0a2175b15012012bf12242100225bf3c827a: Status 404 returned error can't find the container with id 3b18cf16b88e8a2f9b6263d7cb5e0a2175b15012012bf12242100225bf3c827a Nov 23 14:56:56 crc kubenswrapper[5050]: I1123 14:56:56.830569 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-597d69585c-7b9fh" event={"ID":"0db89f87-ca55-4247-977c-f40ffe5ade4c","Type":"ContainerStarted","Data":"3b18cf16b88e8a2f9b6263d7cb5e0a2175b15012012bf12242100225bf3c827a"} Nov 23 14:56:59 crc kubenswrapper[5050]: I1123 14:56:59.225090 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 14:56:59 crc kubenswrapper[5050]: I1123 14:56:59.225822 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 14:57:03 crc kubenswrapper[5050]: I1123 14:57:03.888344 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-597d69585c-7b9fh" event={"ID":"0db89f87-ca55-4247-977c-f40ffe5ade4c","Type":"ContainerStarted","Data":"f03ea3fcb13f1dc3730b0064578fdfde55bf66459dea10e4062263a9a414b35d"} Nov 23 14:57:07 crc kubenswrapper[5050]: I1123 14:57:07.920066 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-597d69585c-7b9fh" event={"ID":"0db89f87-ca55-4247-977c-f40ffe5ade4c","Type":"ContainerStarted","Data":"52919a28eb7d86e32358c69895cc04debdd29b0100db74c7a03fdc5cddac8e87"} Nov 23 14:57:07 crc kubenswrapper[5050]: I1123 14:57:07.920952 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-597d69585c-7b9fh" Nov 23 14:57:07 crc kubenswrapper[5050]: I1123 14:57:07.924903 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-597d69585c-7b9fh" Nov 23 14:57:07 crc kubenswrapper[5050]: I1123 14:57:07.956433 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-597d69585c-7b9fh" podStartSLOduration=1.272718498 podStartE2EDuration="12.956401487s" podCreationTimestamp="2025-11-23 14:56:55 +0000 UTC" firstStartedPulling="2025-11-23 14:56:55.960079062 +0000 UTC m=+911.127075547" lastFinishedPulling="2025-11-23 14:57:07.643762021 +0000 UTC m=+922.810758536" observedRunningTime="2025-11-23 14:57:07.950673572 +0000 UTC m=+923.117670067" watchObservedRunningTime="2025-11-23 14:57:07.956401487 +0000 UTC m=+923.123397992" Nov 23 14:57:27 crc kubenswrapper[5050]: I1123 14:57:27.884436 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-75fb479bcc-2qdnf"] Nov 23 14:57:27 crc kubenswrapper[5050]: I1123 14:57:27.886021 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-2qdnf" Nov 23 14:57:27 crc kubenswrapper[5050]: I1123 14:57:27.902381 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6498cbf48f-h5gcg"] Nov 23 14:57:27 crc kubenswrapper[5050]: I1123 14:57:27.903542 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-h5gcg" Nov 23 14:57:27 crc kubenswrapper[5050]: I1123 14:57:27.904201 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-75fb479bcc-2qdnf"] Nov 23 14:57:27 crc kubenswrapper[5050]: I1123 14:57:27.906766 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-5hjkq" Nov 23 14:57:27 crc kubenswrapper[5050]: I1123 14:57:27.917280 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-fn29v" Nov 23 14:57:27 crc kubenswrapper[5050]: I1123 14:57:27.918519 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6498cbf48f-h5gcg"] Nov 23 14:57:27 crc kubenswrapper[5050]: I1123 14:57:27.941463 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-7969689c84-r5lpg"] Nov 23 14:57:27 crc kubenswrapper[5050]: I1123 14:57:27.942583 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-7969689c84-r5lpg" Nov 23 14:57:27 crc kubenswrapper[5050]: I1123 14:57:27.945472 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-vgc74" Nov 23 14:57:27 crc kubenswrapper[5050]: I1123 14:57:27.946891 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-767ccfd65f-cp2sk"] Nov 23 14:57:27 crc kubenswrapper[5050]: I1123 14:57:27.949480 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5frt\" (UniqueName: \"kubernetes.io/projected/b5fbd22d-ccfc-4a42-8b4b-f6566d36a8c1-kube-api-access-m5frt\") pod \"cinder-operator-controller-manager-6498cbf48f-h5gcg\" (UID: \"b5fbd22d-ccfc-4a42-8b4b-f6566d36a8c1\") " pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-h5gcg" Nov 23 14:57:27 crc kubenswrapper[5050]: I1123 14:57:27.949545 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48x95\" (UniqueName: \"kubernetes.io/projected/8f0ad77e-339a-4a2e-a700-c52790877f4c-kube-api-access-48x95\") pod \"glance-operator-controller-manager-7969689c84-r5lpg\" (UID: \"8f0ad77e-339a-4a2e-a700-c52790877f4c\") " pod="openstack-operators/glance-operator-controller-manager-7969689c84-r5lpg" Nov 23 14:57:27 crc kubenswrapper[5050]: I1123 14:57:27.949578 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cntm\" (UniqueName: \"kubernetes.io/projected/e4ff3d33-3a5f-4ba3-ae25-308b7ad6d389-kube-api-access-2cntm\") pod \"barbican-operator-controller-manager-75fb479bcc-2qdnf\" (UID: \"e4ff3d33-3a5f-4ba3-ae25-308b7ad6d389\") " pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-2qdnf" Nov 23 14:57:27 crc kubenswrapper[5050]: I1123 14:57:27.955977 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-cp2sk" Nov 23 14:57:27 crc kubenswrapper[5050]: I1123 14:57:27.961961 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-vdtks" Nov 23 14:57:27 crc kubenswrapper[5050]: I1123 14:57:27.970706 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-7969689c84-r5lpg"] Nov 23 14:57:27 crc kubenswrapper[5050]: I1123 14:57:27.991548 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-767ccfd65f-cp2sk"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.003528 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-56f54d6746-g2g7z"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.004843 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-g2g7z" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.011913 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-kqhxh" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.028524 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-598f69df5d-t4m5g"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.055016 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48x95\" (UniqueName: \"kubernetes.io/projected/8f0ad77e-339a-4a2e-a700-c52790877f4c-kube-api-access-48x95\") pod \"glance-operator-controller-manager-7969689c84-r5lpg\" (UID: \"8f0ad77e-339a-4a2e-a700-c52790877f4c\") " pod="openstack-operators/glance-operator-controller-manager-7969689c84-r5lpg" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.055267 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2cntm\" (UniqueName: \"kubernetes.io/projected/e4ff3d33-3a5f-4ba3-ae25-308b7ad6d389-kube-api-access-2cntm\") pod \"barbican-operator-controller-manager-75fb479bcc-2qdnf\" (UID: \"e4ff3d33-3a5f-4ba3-ae25-308b7ad6d389\") " pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-2qdnf" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.055595 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5frt\" (UniqueName: \"kubernetes.io/projected/b5fbd22d-ccfc-4a42-8b4b-f6566d36a8c1-kube-api-access-m5frt\") pod \"cinder-operator-controller-manager-6498cbf48f-h5gcg\" (UID: \"b5fbd22d-ccfc-4a42-8b4b-f6566d36a8c1\") " pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-h5gcg" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.055824 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whgml\" (UniqueName: \"kubernetes.io/projected/96580238-85b0-481c-91f2-be0888975fb7-kube-api-access-whgml\") pod \"heat-operator-controller-manager-56f54d6746-g2g7z\" (UID: \"96580238-85b0-481c-91f2-be0888975fb7\") " pod="openstack-operators/heat-operator-controller-manager-56f54d6746-g2g7z" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.080173 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-t4m5g" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.087999 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-5w4fp" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.094061 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-598f69df5d-t4m5g"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.106165 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-56f54d6746-g2g7z"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.125245 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5frt\" (UniqueName: \"kubernetes.io/projected/b5fbd22d-ccfc-4a42-8b4b-f6566d36a8c1-kube-api-access-m5frt\") pod \"cinder-operator-controller-manager-6498cbf48f-h5gcg\" (UID: \"b5fbd22d-ccfc-4a42-8b4b-f6566d36a8c1\") " pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-h5gcg" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.127685 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-99b499f4-4p57r"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.130962 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48x95\" (UniqueName: \"kubernetes.io/projected/8f0ad77e-339a-4a2e-a700-c52790877f4c-kube-api-access-48x95\") pod \"glance-operator-controller-manager-7969689c84-r5lpg\" (UID: \"8f0ad77e-339a-4a2e-a700-c52790877f4c\") " pod="openstack-operators/glance-operator-controller-manager-7969689c84-r5lpg" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.131936 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-4p57r" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.141463 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2cntm\" (UniqueName: \"kubernetes.io/projected/e4ff3d33-3a5f-4ba3-ae25-308b7ad6d389-kube-api-access-2cntm\") pod \"barbican-operator-controller-manager-75fb479bcc-2qdnf\" (UID: \"e4ff3d33-3a5f-4ba3-ae25-308b7ad6d389\") " pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-2qdnf" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.141808 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-q46h8" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.142555 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-6dd8864d7c-pc65p"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.143981 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-pc65p" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.149138 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-pvfmb" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.149266 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.161918 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smpp2\" (UniqueName: \"kubernetes.io/projected/97c24c4c-3ca8-4c5b-8b81-a64edf49b0bb-kube-api-access-smpp2\") pod \"ironic-operator-controller-manager-99b499f4-4p57r\" (UID: \"97c24c4c-3ca8-4c5b-8b81-a64edf49b0bb\") " pod="openstack-operators/ironic-operator-controller-manager-99b499f4-4p57r" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.162043 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/09386696-43fd-4bd4-9afd-bba22b85c546-cert\") pod \"infra-operator-controller-manager-6dd8864d7c-pc65p\" (UID: \"09386696-43fd-4bd4-9afd-bba22b85c546\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-pc65p" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.162078 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rlxrb\" (UniqueName: \"kubernetes.io/projected/17b553bf-a9b6-4807-be93-845263e46bda-kube-api-access-rlxrb\") pod \"horizon-operator-controller-manager-598f69df5d-t4m5g\" (UID: \"17b553bf-a9b6-4807-be93-845263e46bda\") " pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-t4m5g" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.162121 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llcq8\" (UniqueName: \"kubernetes.io/projected/09386696-43fd-4bd4-9afd-bba22b85c546-kube-api-access-llcq8\") pod \"infra-operator-controller-manager-6dd8864d7c-pc65p\" (UID: \"09386696-43fd-4bd4-9afd-bba22b85c546\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-pc65p" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.162156 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sm49b\" (UniqueName: \"kubernetes.io/projected/0f64e767-a4c5-4f8c-9dc9-0c09328b5d83-kube-api-access-sm49b\") pod \"designate-operator-controller-manager-767ccfd65f-cp2sk\" (UID: \"0f64e767-a4c5-4f8c-9dc9-0c09328b5d83\") " pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-cp2sk" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.162201 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whgml\" (UniqueName: \"kubernetes.io/projected/96580238-85b0-481c-91f2-be0888975fb7-kube-api-access-whgml\") pod \"heat-operator-controller-manager-56f54d6746-g2g7z\" (UID: \"96580238-85b0-481c-91f2-be0888975fb7\") " pod="openstack-operators/heat-operator-controller-manager-56f54d6746-g2g7z" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.178774 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-99b499f4-4p57r"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.191739 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whgml\" (UniqueName: \"kubernetes.io/projected/96580238-85b0-481c-91f2-be0888975fb7-kube-api-access-whgml\") pod \"heat-operator-controller-manager-56f54d6746-g2g7z\" (UID: \"96580238-85b0-481c-91f2-be0888975fb7\") " pod="openstack-operators/heat-operator-controller-manager-56f54d6746-g2g7z" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.198286 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-6dd8864d7c-pc65p"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.204103 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7454b96578-spv2t"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.213512 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-2qdnf" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.216081 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-spv2t" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.217417 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-h5gcg" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.229936 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7454b96578-spv2t"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.230789 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-rpk95" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.235421 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-58f887965d-4856d"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.236900 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58f887965d-4856d" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.239551 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-wzmf8" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.247297 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-78bd47f458-nqlmd"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.251737 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-nqlmd" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.255211 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-rrrrz" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.256305 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-54b5986bb8-8tnhz"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.258650 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-8tnhz" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.258686 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-7969689c84-r5lpg" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.263123 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/09386696-43fd-4bd4-9afd-bba22b85c546-cert\") pod \"infra-operator-controller-manager-6dd8864d7c-pc65p\" (UID: \"09386696-43fd-4bd4-9afd-bba22b85c546\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-pc65p" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.263167 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rlxrb\" (UniqueName: \"kubernetes.io/projected/17b553bf-a9b6-4807-be93-845263e46bda-kube-api-access-rlxrb\") pod \"horizon-operator-controller-manager-598f69df5d-t4m5g\" (UID: \"17b553bf-a9b6-4807-be93-845263e46bda\") " pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-t4m5g" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.263203 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fwwsr\" (UniqueName: \"kubernetes.io/projected/ef8b813f-cf8f-46a0-ad63-259d5aa2dbb2-kube-api-access-fwwsr\") pod \"manila-operator-controller-manager-58f887965d-4856d\" (UID: \"ef8b813f-cf8f-46a0-ad63-259d5aa2dbb2\") " pod="openstack-operators/manila-operator-controller-manager-58f887965d-4856d" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.263227 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llcq8\" (UniqueName: \"kubernetes.io/projected/09386696-43fd-4bd4-9afd-bba22b85c546-kube-api-access-llcq8\") pod \"infra-operator-controller-manager-6dd8864d7c-pc65p\" (UID: \"09386696-43fd-4bd4-9afd-bba22b85c546\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-pc65p" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.263257 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sm49b\" (UniqueName: \"kubernetes.io/projected/0f64e767-a4c5-4f8c-9dc9-0c09328b5d83-kube-api-access-sm49b\") pod \"designate-operator-controller-manager-767ccfd65f-cp2sk\" (UID: \"0f64e767-a4c5-4f8c-9dc9-0c09328b5d83\") " pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-cp2sk" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.263304 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smpp2\" (UniqueName: \"kubernetes.io/projected/97c24c4c-3ca8-4c5b-8b81-a64edf49b0bb-kube-api-access-smpp2\") pod \"ironic-operator-controller-manager-99b499f4-4p57r\" (UID: \"97c24c4c-3ca8-4c5b-8b81-a64edf49b0bb\") " pod="openstack-operators/ironic-operator-controller-manager-99b499f4-4p57r" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.263330 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9hk9\" (UniqueName: \"kubernetes.io/projected/e9c42c8d-d147-4b90-bcb4-5b2771121504-kube-api-access-r9hk9\") pod \"keystone-operator-controller-manager-7454b96578-spv2t\" (UID: \"e9c42c8d-d147-4b90-bcb4-5b2771121504\") " pod="openstack-operators/keystone-operator-controller-manager-7454b96578-spv2t" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.263360 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9sdh\" (UniqueName: \"kubernetes.io/projected/8c522f94-22c6-4569-a62b-18f9ed2f3b3f-kube-api-access-b9sdh\") pod \"neutron-operator-controller-manager-78bd47f458-nqlmd\" (UID: \"8c522f94-22c6-4569-a62b-18f9ed2f3b3f\") " pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-nqlmd" Nov 23 14:57:28 crc kubenswrapper[5050]: E1123 14:57:28.263538 5050 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 23 14:57:28 crc kubenswrapper[5050]: E1123 14:57:28.263584 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/09386696-43fd-4bd4-9afd-bba22b85c546-cert podName:09386696-43fd-4bd4-9afd-bba22b85c546 nodeName:}" failed. No retries permitted until 2025-11-23 14:57:28.763564879 +0000 UTC m=+943.930561364 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/09386696-43fd-4bd4-9afd-bba22b85c546-cert") pod "infra-operator-controller-manager-6dd8864d7c-pc65p" (UID: "09386696-43fd-4bd4-9afd-bba22b85c546") : secret "infra-operator-webhook-server-cert" not found Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.271682 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-dpzd7" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.271994 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-78bd47f458-nqlmd"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.294378 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-54b5986bb8-8tnhz"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.307108 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llcq8\" (UniqueName: \"kubernetes.io/projected/09386696-43fd-4bd4-9afd-bba22b85c546-kube-api-access-llcq8\") pod \"infra-operator-controller-manager-6dd8864d7c-pc65p\" (UID: \"09386696-43fd-4bd4-9afd-bba22b85c546\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-pc65p" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.324519 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smpp2\" (UniqueName: \"kubernetes.io/projected/97c24c4c-3ca8-4c5b-8b81-a64edf49b0bb-kube-api-access-smpp2\") pod \"ironic-operator-controller-manager-99b499f4-4p57r\" (UID: \"97c24c4c-3ca8-4c5b-8b81-a64edf49b0bb\") " pod="openstack-operators/ironic-operator-controller-manager-99b499f4-4p57r" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.335553 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58f887965d-4856d"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.341111 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-g2g7z" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.349318 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rlxrb\" (UniqueName: \"kubernetes.io/projected/17b553bf-a9b6-4807-be93-845263e46bda-kube-api-access-rlxrb\") pod \"horizon-operator-controller-manager-598f69df5d-t4m5g\" (UID: \"17b553bf-a9b6-4807-be93-845263e46bda\") " pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-t4m5g" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.349919 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sm49b\" (UniqueName: \"kubernetes.io/projected/0f64e767-a4c5-4f8c-9dc9-0c09328b5d83-kube-api-access-sm49b\") pod \"designate-operator-controller-manager-767ccfd65f-cp2sk\" (UID: \"0f64e767-a4c5-4f8c-9dc9-0c09328b5d83\") " pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-cp2sk" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.366256 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fwwsr\" (UniqueName: \"kubernetes.io/projected/ef8b813f-cf8f-46a0-ad63-259d5aa2dbb2-kube-api-access-fwwsr\") pod \"manila-operator-controller-manager-58f887965d-4856d\" (UID: \"ef8b813f-cf8f-46a0-ad63-259d5aa2dbb2\") " pod="openstack-operators/manila-operator-controller-manager-58f887965d-4856d" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.366378 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dsqw6\" (UniqueName: \"kubernetes.io/projected/1b42304a-8ccd-4015-9e74-295ea7abd0fd-kube-api-access-dsqw6\") pod \"mariadb-operator-controller-manager-54b5986bb8-8tnhz\" (UID: \"1b42304a-8ccd-4015-9e74-295ea7abd0fd\") " pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-8tnhz" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.366412 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9hk9\" (UniqueName: \"kubernetes.io/projected/e9c42c8d-d147-4b90-bcb4-5b2771121504-kube-api-access-r9hk9\") pod \"keystone-operator-controller-manager-7454b96578-spv2t\" (UID: \"e9c42c8d-d147-4b90-bcb4-5b2771121504\") " pod="openstack-operators/keystone-operator-controller-manager-7454b96578-spv2t" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.366455 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9sdh\" (UniqueName: \"kubernetes.io/projected/8c522f94-22c6-4569-a62b-18f9ed2f3b3f-kube-api-access-b9sdh\") pod \"neutron-operator-controller-manager-78bd47f458-nqlmd\" (UID: \"8c522f94-22c6-4569-a62b-18f9ed2f3b3f\") " pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-nqlmd" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.374115 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-cfbb9c588-d9m8q"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.375946 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-d9m8q" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.379766 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-fc5qq" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.390562 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-mzmmd"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.392083 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-mzmmd" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.401281 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-cfbb9c588-d9m8q"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.408227 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9hk9\" (UniqueName: \"kubernetes.io/projected/e9c42c8d-d147-4b90-bcb4-5b2771121504-kube-api-access-r9hk9\") pod \"keystone-operator-controller-manager-7454b96578-spv2t\" (UID: \"e9c42c8d-d147-4b90-bcb4-5b2771121504\") " pod="openstack-operators/keystone-operator-controller-manager-7454b96578-spv2t" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.410971 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-bnr67" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.417528 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-t4m5g" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.419127 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fwwsr\" (UniqueName: \"kubernetes.io/projected/ef8b813f-cf8f-46a0-ad63-259d5aa2dbb2-kube-api-access-fwwsr\") pod \"manila-operator-controller-manager-58f887965d-4856d\" (UID: \"ef8b813f-cf8f-46a0-ad63-259d5aa2dbb2\") " pod="openstack-operators/manila-operator-controller-manager-58f887965d-4856d" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.419880 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-mzmmd"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.425937 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9sdh\" (UniqueName: \"kubernetes.io/projected/8c522f94-22c6-4569-a62b-18f9ed2f3b3f-kube-api-access-b9sdh\") pod \"neutron-operator-controller-manager-78bd47f458-nqlmd\" (UID: \"8c522f94-22c6-4569-a62b-18f9ed2f3b3f\") " pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-nqlmd" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.456823 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-54fc5f65b7-k9c9p"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.458139 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-k9c9p" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.461316 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-5xds2" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.480822 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-54fc5f65b7-k9c9p"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.481051 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dsqw6\" (UniqueName: \"kubernetes.io/projected/1b42304a-8ccd-4015-9e74-295ea7abd0fd-kube-api-access-dsqw6\") pod \"mariadb-operator-controller-manager-54b5986bb8-8tnhz\" (UID: \"1b42304a-8ccd-4015-9e74-295ea7abd0fd\") " pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-8tnhz" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.481128 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwp8g\" (UniqueName: \"kubernetes.io/projected/b886cbd7-ef95-46b5-b817-d041b5b96cec-kube-api-access-qwp8g\") pod \"octavia-operator-controller-manager-54cfbf4c7d-mzmmd\" (UID: \"b886cbd7-ef95-46b5-b817-d041b5b96cec\") " pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-mzmmd" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.492698 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-zksdq"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.494045 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-zksdq" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.499479 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-zlxs6" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.499483 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.531075 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-4p57r" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.536730 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-zksdq"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.547199 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dsqw6\" (UniqueName: \"kubernetes.io/projected/1b42304a-8ccd-4015-9e74-295ea7abd0fd-kube-api-access-dsqw6\") pod \"mariadb-operator-controller-manager-54b5986bb8-8tnhz\" (UID: \"1b42304a-8ccd-4015-9e74-295ea7abd0fd\") " pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-8tnhz" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.550658 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b797b8dff-xc9s4"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.552144 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-xc9s4" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.563468 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-z979x" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.607782 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-d656998f4-fjsvj"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.610567 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-cp2sk" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.617034 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qwp8g\" (UniqueName: \"kubernetes.io/projected/b886cbd7-ef95-46b5-b817-d041b5b96cec-kube-api-access-qwp8g\") pod \"octavia-operator-controller-manager-54cfbf4c7d-mzmmd\" (UID: \"b886cbd7-ef95-46b5-b817-d041b5b96cec\") " pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-mzmmd" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.617172 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7kxtt\" (UniqueName: \"kubernetes.io/projected/52c3e5f3-3fee-4095-97f2-e7260eb08d3d-kube-api-access-7kxtt\") pod \"ovn-operator-controller-manager-54fc5f65b7-k9c9p\" (UID: \"52c3e5f3-3fee-4095-97f2-e7260eb08d3d\") " pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-k9c9p" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.617225 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dssqx\" (UniqueName: \"kubernetes.io/projected/d54a2a99-ffe4-422e-bbae-54371be5a57e-kube-api-access-dssqx\") pod \"nova-operator-controller-manager-cfbb9c588-d9m8q\" (UID: \"d54a2a99-ffe4-422e-bbae-54371be5a57e\") " pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-d9m8q" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.619954 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-spv2t" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.650089 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58f887965d-4856d" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.672621 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-nqlmd" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.678225 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-8tnhz" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.731113 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dssqx\" (UniqueName: \"kubernetes.io/projected/d54a2a99-ffe4-422e-bbae-54371be5a57e-kube-api-access-dssqx\") pod \"nova-operator-controller-manager-cfbb9c588-d9m8q\" (UID: \"d54a2a99-ffe4-422e-bbae-54371be5a57e\") " pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-d9m8q" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.731234 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9483bc3e-409c-426c-a0ea-34b75543ac3c-cert\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-zksdq\" (UID: \"9483bc3e-409c-426c-a0ea-34b75543ac3c\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-zksdq" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.731281 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9klj6\" (UniqueName: \"kubernetes.io/projected/5de89bc7-53c3-4bf9-a791-b9b0b6a30603-kube-api-access-9klj6\") pod \"placement-operator-controller-manager-5b797b8dff-xc9s4\" (UID: \"5de89bc7-53c3-4bf9-a791-b9b0b6a30603\") " pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-xc9s4" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.731420 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dpc9f\" (UniqueName: \"kubernetes.io/projected/9483bc3e-409c-426c-a0ea-34b75543ac3c-kube-api-access-dpc9f\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-zksdq\" (UID: \"9483bc3e-409c-426c-a0ea-34b75543ac3c\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-zksdq" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.731542 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7kxtt\" (UniqueName: \"kubernetes.io/projected/52c3e5f3-3fee-4095-97f2-e7260eb08d3d-kube-api-access-7kxtt\") pod \"ovn-operator-controller-manager-54fc5f65b7-k9c9p\" (UID: \"52c3e5f3-3fee-4095-97f2-e7260eb08d3d\") " pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-k9c9p" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.731571 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwp8g\" (UniqueName: \"kubernetes.io/projected/b886cbd7-ef95-46b5-b817-d041b5b96cec-kube-api-access-qwp8g\") pod \"octavia-operator-controller-manager-54cfbf4c7d-mzmmd\" (UID: \"b886cbd7-ef95-46b5-b817-d041b5b96cec\") " pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-mzmmd" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.765556 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-mzmmd" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.827817 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b797b8dff-xc9s4"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.827959 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d656998f4-fjsvj" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.831257 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d656998f4-fjsvj"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.837696 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dssqx\" (UniqueName: \"kubernetes.io/projected/d54a2a99-ffe4-422e-bbae-54371be5a57e-kube-api-access-dssqx\") pod \"nova-operator-controller-manager-cfbb9c588-d9m8q\" (UID: \"d54a2a99-ffe4-422e-bbae-54371be5a57e\") " pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-d9m8q" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.837994 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-jtj5d" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.838540 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7kxtt\" (UniqueName: \"kubernetes.io/projected/52c3e5f3-3fee-4095-97f2-e7260eb08d3d-kube-api-access-7kxtt\") pod \"ovn-operator-controller-manager-54fc5f65b7-k9c9p\" (UID: \"52c3e5f3-3fee-4095-97f2-e7260eb08d3d\") " pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-k9c9p" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.839353 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9483bc3e-409c-426c-a0ea-34b75543ac3c-cert\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-zksdq\" (UID: \"9483bc3e-409c-426c-a0ea-34b75543ac3c\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-zksdq" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.839385 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9klj6\" (UniqueName: \"kubernetes.io/projected/5de89bc7-53c3-4bf9-a791-b9b0b6a30603-kube-api-access-9klj6\") pod \"placement-operator-controller-manager-5b797b8dff-xc9s4\" (UID: \"5de89bc7-53c3-4bf9-a791-b9b0b6a30603\") " pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-xc9s4" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.839465 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/09386696-43fd-4bd4-9afd-bba22b85c546-cert\") pod \"infra-operator-controller-manager-6dd8864d7c-pc65p\" (UID: \"09386696-43fd-4bd4-9afd-bba22b85c546\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-pc65p" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.839488 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dpc9f\" (UniqueName: \"kubernetes.io/projected/9483bc3e-409c-426c-a0ea-34b75543ac3c-kube-api-access-dpc9f\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-zksdq\" (UID: \"9483bc3e-409c-426c-a0ea-34b75543ac3c\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-zksdq" Nov 23 14:57:28 crc kubenswrapper[5050]: E1123 14:57:28.839497 5050 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 23 14:57:28 crc kubenswrapper[5050]: E1123 14:57:28.839546 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9483bc3e-409c-426c-a0ea-34b75543ac3c-cert podName:9483bc3e-409c-426c-a0ea-34b75543ac3c nodeName:}" failed. No retries permitted until 2025-11-23 14:57:29.33952862 +0000 UTC m=+944.506525105 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/9483bc3e-409c-426c-a0ea-34b75543ac3c-cert") pod "openstack-baremetal-operator-controller-manager-8c7444f48-zksdq" (UID: "9483bc3e-409c-426c-a0ea-34b75543ac3c") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.856919 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/09386696-43fd-4bd4-9afd-bba22b85c546-cert\") pod \"infra-operator-controller-manager-6dd8864d7c-pc65p\" (UID: \"09386696-43fd-4bd4-9afd-bba22b85c546\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-pc65p" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.857311 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-8wkkq"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.859359 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-8wkkq" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.866264 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-pqbgx" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.867293 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-pc65p" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.873896 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-b4c496f69-zdcb4"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.875607 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-b4c496f69-zdcb4" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.882358 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-b4c496f69-zdcb4"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.885385 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-jbd8k" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.896425 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dpc9f\" (UniqueName: \"kubernetes.io/projected/9483bc3e-409c-426c-a0ea-34b75543ac3c-kube-api-access-dpc9f\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-zksdq\" (UID: \"9483bc3e-409c-426c-a0ea-34b75543ac3c\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-zksdq" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.920505 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-8wkkq"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.921188 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9klj6\" (UniqueName: \"kubernetes.io/projected/5de89bc7-53c3-4bf9-a791-b9b0b6a30603-kube-api-access-9klj6\") pod \"placement-operator-controller-manager-5b797b8dff-xc9s4\" (UID: \"5de89bc7-53c3-4bf9-a791-b9b0b6a30603\") " pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-xc9s4" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.937725 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-8c6448b9f-m7f26"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.944623 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6npjj\" (UniqueName: \"kubernetes.io/projected/78533aec-d2a4-4691-8960-a5ca3abd34a8-kube-api-access-6npjj\") pod \"telemetry-operator-controller-manager-6d4bf84b58-8wkkq\" (UID: \"78533aec-d2a4-4691-8960-a5ca3abd34a8\") " pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-8wkkq" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.944704 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6pr7\" (UniqueName: \"kubernetes.io/projected/c937d003-eeeb-49db-b2fe-8bc1ffde5f94-kube-api-access-w6pr7\") pod \"swift-operator-controller-manager-d656998f4-fjsvj\" (UID: \"c937d003-eeeb-49db-b2fe-8bc1ffde5f94\") " pod="openstack-operators/swift-operator-controller-manager-d656998f4-fjsvj" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.951378 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-m7f26" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.955123 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-npvkq" Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.977089 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-8c6448b9f-m7f26"] Nov 23 14:57:28 crc kubenswrapper[5050]: I1123 14:57:28.979204 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-xc9s4" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.024954 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-669b8498dc-87h5k"] Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.026257 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-669b8498dc-87h5k" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.031235 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-75mkf" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.031902 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.047070 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8mpf\" (UniqueName: \"kubernetes.io/projected/4b036715-70f2-493c-9e9c-27228473af65-kube-api-access-q8mpf\") pod \"test-operator-controller-manager-b4c496f69-zdcb4\" (UID: \"4b036715-70f2-493c-9e9c-27228473af65\") " pod="openstack-operators/test-operator-controller-manager-b4c496f69-zdcb4" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.048030 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6npjj\" (UniqueName: \"kubernetes.io/projected/78533aec-d2a4-4691-8960-a5ca3abd34a8-kube-api-access-6npjj\") pod \"telemetry-operator-controller-manager-6d4bf84b58-8wkkq\" (UID: \"78533aec-d2a4-4691-8960-a5ca3abd34a8\") " pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-8wkkq" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.048199 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-65v2d\" (UniqueName: \"kubernetes.io/projected/9e9d6f21-c5ab-438c-99ee-433f1eaa9093-kube-api-access-65v2d\") pod \"watcher-operator-controller-manager-8c6448b9f-m7f26\" (UID: \"9e9d6f21-c5ab-438c-99ee-433f1eaa9093\") " pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-m7f26" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.048377 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6pr7\" (UniqueName: \"kubernetes.io/projected/c937d003-eeeb-49db-b2fe-8bc1ffde5f94-kube-api-access-w6pr7\") pod \"swift-operator-controller-manager-d656998f4-fjsvj\" (UID: \"c937d003-eeeb-49db-b2fe-8bc1ffde5f94\") " pod="openstack-operators/swift-operator-controller-manager-d656998f4-fjsvj" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.047420 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-d9m8q" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.071971 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-669b8498dc-87h5k"] Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.083083 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6npjj\" (UniqueName: \"kubernetes.io/projected/78533aec-d2a4-4691-8960-a5ca3abd34a8-kube-api-access-6npjj\") pod \"telemetry-operator-controller-manager-6d4bf84b58-8wkkq\" (UID: \"78533aec-d2a4-4691-8960-a5ca3abd34a8\") " pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-8wkkq" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.087892 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6pr7\" (UniqueName: \"kubernetes.io/projected/c937d003-eeeb-49db-b2fe-8bc1ffde5f94-kube-api-access-w6pr7\") pod \"swift-operator-controller-manager-d656998f4-fjsvj\" (UID: \"c937d003-eeeb-49db-b2fe-8bc1ffde5f94\") " pod="openstack-operators/swift-operator-controller-manager-d656998f4-fjsvj" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.129566 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-k9c9p" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.130277 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-k4khg"] Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.131614 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-k4khg" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.136249 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-qvxgr" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.142729 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-k4khg"] Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.150159 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8mpf\" (UniqueName: \"kubernetes.io/projected/4b036715-70f2-493c-9e9c-27228473af65-kube-api-access-q8mpf\") pod \"test-operator-controller-manager-b4c496f69-zdcb4\" (UID: \"4b036715-70f2-493c-9e9c-27228473af65\") " pod="openstack-operators/test-operator-controller-manager-b4c496f69-zdcb4" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.150229 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/68864336-7cb2-4deb-9ef9-159d78b04fc1-cert\") pod \"openstack-operator-controller-manager-669b8498dc-87h5k\" (UID: \"68864336-7cb2-4deb-9ef9-159d78b04fc1\") " pod="openstack-operators/openstack-operator-controller-manager-669b8498dc-87h5k" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.150303 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r8bdt\" (UniqueName: \"kubernetes.io/projected/68864336-7cb2-4deb-9ef9-159d78b04fc1-kube-api-access-r8bdt\") pod \"openstack-operator-controller-manager-669b8498dc-87h5k\" (UID: \"68864336-7cb2-4deb-9ef9-159d78b04fc1\") " pod="openstack-operators/openstack-operator-controller-manager-669b8498dc-87h5k" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.150326 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-65v2d\" (UniqueName: \"kubernetes.io/projected/9e9d6f21-c5ab-438c-99ee-433f1eaa9093-kube-api-access-65v2d\") pod \"watcher-operator-controller-manager-8c6448b9f-m7f26\" (UID: \"9e9d6f21-c5ab-438c-99ee-433f1eaa9093\") " pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-m7f26" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.176099 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-65v2d\" (UniqueName: \"kubernetes.io/projected/9e9d6f21-c5ab-438c-99ee-433f1eaa9093-kube-api-access-65v2d\") pod \"watcher-operator-controller-manager-8c6448b9f-m7f26\" (UID: \"9e9d6f21-c5ab-438c-99ee-433f1eaa9093\") " pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-m7f26" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.179476 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8mpf\" (UniqueName: \"kubernetes.io/projected/4b036715-70f2-493c-9e9c-27228473af65-kube-api-access-q8mpf\") pod \"test-operator-controller-manager-b4c496f69-zdcb4\" (UID: \"4b036715-70f2-493c-9e9c-27228473af65\") " pod="openstack-operators/test-operator-controller-manager-b4c496f69-zdcb4" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.196246 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d656998f4-fjsvj" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.211398 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-75fb479bcc-2qdnf"] Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.220021 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-8wkkq" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.226365 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.226436 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.226527 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.227898 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1b270f8e19ea537b9c90473c9519c04a4a641a83e215048ece7d9c5619e16b60"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.227981 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://1b270f8e19ea537b9c90473c9519c04a4a641a83e215048ece7d9c5619e16b60" gracePeriod=600 Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.252543 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-b4c496f69-zdcb4" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.254956 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b96cz\" (UniqueName: \"kubernetes.io/projected/14e92b7f-abf0-4e72-b7b0-ff9b8d1e199b-kube-api-access-b96cz\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-k4khg\" (UID: \"14e92b7f-abf0-4e72-b7b0-ff9b8d1e199b\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-k4khg" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.260005 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/68864336-7cb2-4deb-9ef9-159d78b04fc1-cert\") pod \"openstack-operator-controller-manager-669b8498dc-87h5k\" (UID: \"68864336-7cb2-4deb-9ef9-159d78b04fc1\") " pod="openstack-operators/openstack-operator-controller-manager-669b8498dc-87h5k" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.260133 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r8bdt\" (UniqueName: \"kubernetes.io/projected/68864336-7cb2-4deb-9ef9-159d78b04fc1-kube-api-access-r8bdt\") pod \"openstack-operator-controller-manager-669b8498dc-87h5k\" (UID: \"68864336-7cb2-4deb-9ef9-159d78b04fc1\") " pod="openstack-operators/openstack-operator-controller-manager-669b8498dc-87h5k" Nov 23 14:57:29 crc kubenswrapper[5050]: E1123 14:57:29.262297 5050 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 23 14:57:29 crc kubenswrapper[5050]: E1123 14:57:29.262393 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/68864336-7cb2-4deb-9ef9-159d78b04fc1-cert podName:68864336-7cb2-4deb-9ef9-159d78b04fc1 nodeName:}" failed. No retries permitted until 2025-11-23 14:57:29.762365748 +0000 UTC m=+944.929362233 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/68864336-7cb2-4deb-9ef9-159d78b04fc1-cert") pod "openstack-operator-controller-manager-669b8498dc-87h5k" (UID: "68864336-7cb2-4deb-9ef9-159d78b04fc1") : secret "webhook-server-cert" not found Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.284216 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-m7f26" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.289300 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r8bdt\" (UniqueName: \"kubernetes.io/projected/68864336-7cb2-4deb-9ef9-159d78b04fc1-kube-api-access-r8bdt\") pod \"openstack-operator-controller-manager-669b8498dc-87h5k\" (UID: \"68864336-7cb2-4deb-9ef9-159d78b04fc1\") " pod="openstack-operators/openstack-operator-controller-manager-669b8498dc-87h5k" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.348705 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-56f54d6746-g2g7z"] Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.364373 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9483bc3e-409c-426c-a0ea-34b75543ac3c-cert\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-zksdq\" (UID: \"9483bc3e-409c-426c-a0ea-34b75543ac3c\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-zksdq" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.367077 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b96cz\" (UniqueName: \"kubernetes.io/projected/14e92b7f-abf0-4e72-b7b0-ff9b8d1e199b-kube-api-access-b96cz\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-k4khg\" (UID: \"14e92b7f-abf0-4e72-b7b0-ff9b8d1e199b\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-k4khg" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.380790 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9483bc3e-409c-426c-a0ea-34b75543ac3c-cert\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-zksdq\" (UID: \"9483bc3e-409c-426c-a0ea-34b75543ac3c\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-zksdq" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.395485 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b96cz\" (UniqueName: \"kubernetes.io/projected/14e92b7f-abf0-4e72-b7b0-ff9b8d1e199b-kube-api-access-b96cz\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-k4khg\" (UID: \"14e92b7f-abf0-4e72-b7b0-ff9b8d1e199b\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-k4khg" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.450163 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-zksdq" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.587774 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6498cbf48f-h5gcg"] Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.588209 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-7969689c84-r5lpg"] Nov 23 14:57:29 crc kubenswrapper[5050]: W1123 14:57:29.588890 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8f0ad77e_339a_4a2e_a700_c52790877f4c.slice/crio-c3a6097978a4fff089ba8b56bc01b75e84850a7fa6bba1bf38d9f66be986b8e3 WatchSource:0}: Error finding container c3a6097978a4fff089ba8b56bc01b75e84850a7fa6bba1bf38d9f66be986b8e3: Status 404 returned error can't find the container with id c3a6097978a4fff089ba8b56bc01b75e84850a7fa6bba1bf38d9f66be986b8e3 Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.591985 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-598f69df5d-t4m5g"] Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.626674 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-k4khg" Nov 23 14:57:29 crc kubenswrapper[5050]: W1123 14:57:29.629369 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb5fbd22d_ccfc_4a42_8b4b_f6566d36a8c1.slice/crio-aea649b89bde54780b7d21050d49ad6052192a8ae07b1797ec86e744a83ce9a7 WatchSource:0}: Error finding container aea649b89bde54780b7d21050d49ad6052192a8ae07b1797ec86e744a83ce9a7: Status 404 returned error can't find the container with id aea649b89bde54780b7d21050d49ad6052192a8ae07b1797ec86e744a83ce9a7 Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.776540 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/68864336-7cb2-4deb-9ef9-159d78b04fc1-cert\") pod \"openstack-operator-controller-manager-669b8498dc-87h5k\" (UID: \"68864336-7cb2-4deb-9ef9-159d78b04fc1\") " pod="openstack-operators/openstack-operator-controller-manager-669b8498dc-87h5k" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.786030 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/68864336-7cb2-4deb-9ef9-159d78b04fc1-cert\") pod \"openstack-operator-controller-manager-669b8498dc-87h5k\" (UID: \"68864336-7cb2-4deb-9ef9-159d78b04fc1\") " pod="openstack-operators/openstack-operator-controller-manager-669b8498dc-87h5k" Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.955625 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-99b499f4-4p57r"] Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.962545 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-78bd47f458-nqlmd"] Nov 23 14:57:29 crc kubenswrapper[5050]: I1123 14:57:29.969959 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-669b8498dc-87h5k" Nov 23 14:57:30 crc kubenswrapper[5050]: I1123 14:57:30.135310 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-4p57r" event={"ID":"97c24c4c-3ca8-4c5b-8b81-a64edf49b0bb","Type":"ContainerStarted","Data":"de79ae1174b00b203fb4c945ed573ad5962b89130c5ff2ee68a73d99d2981b86"} Nov 23 14:57:30 crc kubenswrapper[5050]: I1123 14:57:30.140662 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-h5gcg" event={"ID":"b5fbd22d-ccfc-4a42-8b4b-f6566d36a8c1","Type":"ContainerStarted","Data":"aea649b89bde54780b7d21050d49ad6052192a8ae07b1797ec86e744a83ce9a7"} Nov 23 14:57:30 crc kubenswrapper[5050]: I1123 14:57:30.140819 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-t4m5g" event={"ID":"17b553bf-a9b6-4807-be93-845263e46bda","Type":"ContainerStarted","Data":"708d773d0446ea51c47cb5bbb5a1e7cb0c55424441a11ebf0746a4a234ba98d3"} Nov 23 14:57:30 crc kubenswrapper[5050]: I1123 14:57:30.145059 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-g2g7z" event={"ID":"96580238-85b0-481c-91f2-be0888975fb7","Type":"ContainerStarted","Data":"cc50eab00bab7a974df8df7ebba498aab8f808bf5630ce29656f18913aad670e"} Nov 23 14:57:30 crc kubenswrapper[5050]: I1123 14:57:30.147657 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-7969689c84-r5lpg" event={"ID":"8f0ad77e-339a-4a2e-a700-c52790877f4c","Type":"ContainerStarted","Data":"c3a6097978a4fff089ba8b56bc01b75e84850a7fa6bba1bf38d9f66be986b8e3"} Nov 23 14:57:30 crc kubenswrapper[5050]: I1123 14:57:30.152296 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-nqlmd" event={"ID":"8c522f94-22c6-4569-a62b-18f9ed2f3b3f","Type":"ContainerStarted","Data":"e5dc07be16f6a331937918f4586604989e28dd7a090b6798c559d5c0e6bfbe56"} Nov 23 14:57:30 crc kubenswrapper[5050]: I1123 14:57:30.153886 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-2qdnf" event={"ID":"e4ff3d33-3a5f-4ba3-ae25-308b7ad6d389","Type":"ContainerStarted","Data":"58c6eb93e5944b6f52fd4c605d5947653f56d1958369880c68288f48e4a84e01"} Nov 23 14:57:30 crc kubenswrapper[5050]: I1123 14:57:30.157094 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="1b270f8e19ea537b9c90473c9519c04a4a641a83e215048ece7d9c5619e16b60" exitCode=0 Nov 23 14:57:30 crc kubenswrapper[5050]: I1123 14:57:30.157140 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"1b270f8e19ea537b9c90473c9519c04a4a641a83e215048ece7d9c5619e16b60"} Nov 23 14:57:30 crc kubenswrapper[5050]: I1123 14:57:30.157166 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"2f1968a148f9134c159c0b58dbe311e0c835edf7fb66133145fa860c5ae063e0"} Nov 23 14:57:30 crc kubenswrapper[5050]: I1123 14:57:30.157185 5050 scope.go:117] "RemoveContainer" containerID="f192bfe46cd44b6e123a66e929ac2df13733eb956bf55d1780f3aab83a1b4eec" Nov 23 14:57:30 crc kubenswrapper[5050]: I1123 14:57:30.341254 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b797b8dff-xc9s4"] Nov 23 14:57:30 crc kubenswrapper[5050]: I1123 14:57:30.350675 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-mzmmd"] Nov 23 14:57:30 crc kubenswrapper[5050]: W1123 14:57:30.361312 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5de89bc7_53c3_4bf9_a791_b9b0b6a30603.slice/crio-dff1a406386580e58c4958d38801473eff5abc10ab4a381296190aac1c9cee39 WatchSource:0}: Error finding container dff1a406386580e58c4958d38801473eff5abc10ab4a381296190aac1c9cee39: Status 404 returned error can't find the container with id dff1a406386580e58c4958d38801473eff5abc10ab4a381296190aac1c9cee39 Nov 23 14:57:30 crc kubenswrapper[5050]: W1123 14:57:30.369776 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef8b813f_cf8f_46a0_ad63_259d5aa2dbb2.slice/crio-c182acfb06fb05369a139a65738d2c4d6c74f8fa324de5fca5ca399c02f2ac51 WatchSource:0}: Error finding container c182acfb06fb05369a139a65738d2c4d6c74f8fa324de5fca5ca399c02f2ac51: Status 404 returned error can't find the container with id c182acfb06fb05369a139a65738d2c4d6c74f8fa324de5fca5ca399c02f2ac51 Nov 23 14:57:30 crc kubenswrapper[5050]: W1123 14:57:30.374398 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod52c3e5f3_3fee_4095_97f2_e7260eb08d3d.slice/crio-3926dc2330662d3338d567473d3dadb96dd1c1c077ef50acc85a04daf3eb0f0f WatchSource:0}: Error finding container 3926dc2330662d3338d567473d3dadb96dd1c1c077ef50acc85a04daf3eb0f0f: Status 404 returned error can't find the container with id 3926dc2330662d3338d567473d3dadb96dd1c1c077ef50acc85a04daf3eb0f0f Nov 23 14:57:30 crc kubenswrapper[5050]: I1123 14:57:30.383207 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58f887965d-4856d"] Nov 23 14:57:30 crc kubenswrapper[5050]: W1123 14:57:30.394868 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod09386696_43fd_4bd4_9afd_bba22b85c546.slice/crio-21baa8927b82b1b5a26f818649b405beb174b0d0eed6f989444ba567d8276854 WatchSource:0}: Error finding container 21baa8927b82b1b5a26f818649b405beb174b0d0eed6f989444ba567d8276854: Status 404 returned error can't find the container with id 21baa8927b82b1b5a26f818649b405beb174b0d0eed6f989444ba567d8276854 Nov 23 14:57:30 crc kubenswrapper[5050]: I1123 14:57:30.403922 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7454b96578-spv2t"] Nov 23 14:57:30 crc kubenswrapper[5050]: I1123 14:57:30.412336 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-54b5986bb8-8tnhz"] Nov 23 14:57:30 crc kubenswrapper[5050]: I1123 14:57:30.416654 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-767ccfd65f-cp2sk"] Nov 23 14:57:30 crc kubenswrapper[5050]: I1123 14:57:30.422017 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-54fc5f65b7-k9c9p"] Nov 23 14:57:30 crc kubenswrapper[5050]: I1123 14:57:30.425877 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-6dd8864d7c-pc65p"] Nov 23 14:57:30 crc kubenswrapper[5050]: I1123 14:57:30.592771 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-b4c496f69-zdcb4"] Nov 23 14:57:30 crc kubenswrapper[5050]: I1123 14:57:30.614456 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-zksdq"] Nov 23 14:57:30 crc kubenswrapper[5050]: I1123 14:57:30.628957 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-k4khg"] Nov 23 14:57:30 crc kubenswrapper[5050]: I1123 14:57:30.637712 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-8c6448b9f-m7f26"] Nov 23 14:57:30 crc kubenswrapper[5050]: I1123 14:57:30.643232 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-8wkkq"] Nov 23 14:57:30 crc kubenswrapper[5050]: I1123 14:57:30.647030 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d656998f4-fjsvj"] Nov 23 14:57:30 crc kubenswrapper[5050]: I1123 14:57:30.655567 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-cfbb9c588-d9m8q"] Nov 23 14:57:30 crc kubenswrapper[5050]: E1123 14:57:30.657071 5050 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-b96cz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-5f97d8c699-k4khg_openstack-operators(14e92b7f-abf0-4e72-b7b0-ff9b8d1e199b): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 23 14:57:30 crc kubenswrapper[5050]: E1123 14:57:30.658367 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-k4khg" podUID="14e92b7f-abf0-4e72-b7b0-ff9b8d1e199b" Nov 23 14:57:30 crc kubenswrapper[5050]: I1123 14:57:30.660147 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-669b8498dc-87h5k"] Nov 23 14:57:30 crc kubenswrapper[5050]: E1123 14:57:30.663844 5050 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:5324a6d2f76fc3041023b0cbd09a733ef2b59f310d390e4d6483d219eb96494f,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6npjj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-6d4bf84b58-8wkkq_openstack-operators(78533aec-d2a4-4691-8960-a5ca3abd34a8): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 23 14:57:30 crc kubenswrapper[5050]: W1123 14:57:30.665462 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd54a2a99_ffe4_422e_bbae_54371be5a57e.slice/crio-a7680156fb6f60d44305092090e0f3fbf9500fc722c24fdfb15254462ce0305a WatchSource:0}: Error finding container a7680156fb6f60d44305092090e0f3fbf9500fc722c24fdfb15254462ce0305a: Status 404 returned error can't find the container with id a7680156fb6f60d44305092090e0f3fbf9500fc722c24fdfb15254462ce0305a Nov 23 14:57:30 crc kubenswrapper[5050]: E1123 14:57:30.666233 5050 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-q8mpf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-b4c496f69-zdcb4_openstack-operators(4b036715-70f2-493c-9e9c-27228473af65): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 23 14:57:30 crc kubenswrapper[5050]: E1123 14:57:30.669205 5050 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dssqx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-cfbb9c588-d9m8q_openstack-operators(d54a2a99-ffe4-422e-bbae-54371be5a57e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 23 14:57:30 crc kubenswrapper[5050]: W1123 14:57:30.669897 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc937d003_eeeb_49db_b2fe_8bc1ffde5f94.slice/crio-edb9cf0cedfaf77b4166aee365627917299aad4af7428d7bb73cde473294d91d WatchSource:0}: Error finding container edb9cf0cedfaf77b4166aee365627917299aad4af7428d7bb73cde473294d91d: Status 404 returned error can't find the container with id edb9cf0cedfaf77b4166aee365627917299aad4af7428d7bb73cde473294d91d Nov 23 14:57:30 crc kubenswrapper[5050]: W1123 14:57:30.672261 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod68864336_7cb2_4deb_9ef9_159d78b04fc1.slice/crio-cecceff1bd1e13f3db7ca2f83d2c66ed3cbfa4a23d3d21667723aac9afe4f798 WatchSource:0}: Error finding container cecceff1bd1e13f3db7ca2f83d2c66ed3cbfa4a23d3d21667723aac9afe4f798: Status 404 returned error can't find the container with id cecceff1bd1e13f3db7ca2f83d2c66ed3cbfa4a23d3d21667723aac9afe4f798 Nov 23 14:57:30 crc kubenswrapper[5050]: E1123 14:57:30.673642 5050 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-w6pr7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-d656998f4-fjsvj_openstack-operators(c937d003-eeeb-49db-b2fe-8bc1ffde5f94): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 23 14:57:30 crc kubenswrapper[5050]: W1123 14:57:30.678720 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9483bc3e_409c_426c_a0ea_34b75543ac3c.slice/crio-1265b09fd904b085e2d5f1e76ae402d9402a94081052597676bcc6245d0d38cf WatchSource:0}: Error finding container 1265b09fd904b085e2d5f1e76ae402d9402a94081052597676bcc6245d0d38cf: Status 404 returned error can't find the container with id 1265b09fd904b085e2d5f1e76ae402d9402a94081052597676bcc6245d0d38cf Nov 23 14:57:30 crc kubenswrapper[5050]: E1123 14:57:30.685969 5050 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter:v0.15.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_API_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_PROC_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-processor:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler:release-0.7.12,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter:v1.5.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter:v1.10.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine:current-podified,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dpc9f,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-8c7444f48-zksdq_openstack-operators(9483bc3e-409c-426c-a0ea-34b75543ac3c): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 23 14:57:31 crc kubenswrapper[5050]: E1123 14:57:31.038766 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/swift-operator-controller-manager-d656998f4-fjsvj" podUID="c937d003-eeeb-49db-b2fe-8bc1ffde5f94" Nov 23 14:57:31 crc kubenswrapper[5050]: E1123 14:57:31.040378 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-d9m8q" podUID="d54a2a99-ffe4-422e-bbae-54371be5a57e" Nov 23 14:57:31 crc kubenswrapper[5050]: E1123 14:57:31.137190 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-8wkkq" podUID="78533aec-d2a4-4691-8960-a5ca3abd34a8" Nov 23 14:57:31 crc kubenswrapper[5050]: E1123 14:57:31.192702 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-b4c496f69-zdcb4" podUID="4b036715-70f2-493c-9e9c-27228473af65" Nov 23 14:57:31 crc kubenswrapper[5050]: E1123 14:57:31.248319 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-zksdq" podUID="9483bc3e-409c-426c-a0ea-34b75543ac3c" Nov 23 14:57:31 crc kubenswrapper[5050]: I1123 14:57:31.254217 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58f887965d-4856d" event={"ID":"ef8b813f-cf8f-46a0-ad63-259d5aa2dbb2","Type":"ContainerStarted","Data":"c182acfb06fb05369a139a65738d2c4d6c74f8fa324de5fca5ca399c02f2ac51"} Nov 23 14:57:31 crc kubenswrapper[5050]: I1123 14:57:31.277583 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-b4c496f69-zdcb4" event={"ID":"4b036715-70f2-493c-9e9c-27228473af65","Type":"ContainerStarted","Data":"dd56c6916634aa56d5e0f55feeecb865d7399c0514f432c16bd27af097bb145f"} Nov 23 14:57:31 crc kubenswrapper[5050]: I1123 14:57:31.277658 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-b4c496f69-zdcb4" event={"ID":"4b036715-70f2-493c-9e9c-27228473af65","Type":"ContainerStarted","Data":"8454279941dd493dc3ec4b2aaae10ec7c924c95b4ee541228155d4814f33425c"} Nov 23 14:57:31 crc kubenswrapper[5050]: E1123 14:57:31.285402 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d\\\"\"" pod="openstack-operators/test-operator-controller-manager-b4c496f69-zdcb4" podUID="4b036715-70f2-493c-9e9c-27228473af65" Nov 23 14:57:31 crc kubenswrapper[5050]: I1123 14:57:31.285686 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-d9m8q" event={"ID":"d54a2a99-ffe4-422e-bbae-54371be5a57e","Type":"ContainerStarted","Data":"d70b9e81b7343fe5749bc0f8a4ba7319dc0f4d75d2d9a129a1df11b4daac368b"} Nov 23 14:57:31 crc kubenswrapper[5050]: I1123 14:57:31.285707 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-d9m8q" event={"ID":"d54a2a99-ffe4-422e-bbae-54371be5a57e","Type":"ContainerStarted","Data":"a7680156fb6f60d44305092090e0f3fbf9500fc722c24fdfb15254462ce0305a"} Nov 23 14:57:31 crc kubenswrapper[5050]: E1123 14:57:31.288660 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7\\\"\"" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-d9m8q" podUID="d54a2a99-ffe4-422e-bbae-54371be5a57e" Nov 23 14:57:31 crc kubenswrapper[5050]: I1123 14:57:31.289784 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-spv2t" event={"ID":"e9c42c8d-d147-4b90-bcb4-5b2771121504","Type":"ContainerStarted","Data":"0ad301dff13e5789d7fb8ca5896104f63b2cdd96316251733386461e9b493e82"} Nov 23 14:57:31 crc kubenswrapper[5050]: I1123 14:57:31.346824 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-zksdq" event={"ID":"9483bc3e-409c-426c-a0ea-34b75543ac3c","Type":"ContainerStarted","Data":"1265b09fd904b085e2d5f1e76ae402d9402a94081052597676bcc6245d0d38cf"} Nov 23 14:57:31 crc kubenswrapper[5050]: E1123 14:57:31.348518 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-zksdq" podUID="9483bc3e-409c-426c-a0ea-34b75543ac3c" Nov 23 14:57:31 crc kubenswrapper[5050]: I1123 14:57:31.386763 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-xc9s4" event={"ID":"5de89bc7-53c3-4bf9-a791-b9b0b6a30603","Type":"ContainerStarted","Data":"dff1a406386580e58c4958d38801473eff5abc10ab4a381296190aac1c9cee39"} Nov 23 14:57:31 crc kubenswrapper[5050]: I1123 14:57:31.409493 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-k4khg" event={"ID":"14e92b7f-abf0-4e72-b7b0-ff9b8d1e199b","Type":"ContainerStarted","Data":"65adc997fa519c38617910b46fa49f05defbce7f21698a2b054c32cb559f1957"} Nov 23 14:57:31 crc kubenswrapper[5050]: I1123 14:57:31.436598 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-8wkkq" event={"ID":"78533aec-d2a4-4691-8960-a5ca3abd34a8","Type":"ContainerStarted","Data":"c0021ed0a5a9cc86bbaf6cf58afadb16e9bceff933cb0d4f107bbdaddb8419e0"} Nov 23 14:57:31 crc kubenswrapper[5050]: I1123 14:57:31.436660 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-8wkkq" event={"ID":"78533aec-d2a4-4691-8960-a5ca3abd34a8","Type":"ContainerStarted","Data":"3449217155e6b418c5ed8083c21daf2ceb3cd67ea728528e7eae43a7910d08a0"} Nov 23 14:57:31 crc kubenswrapper[5050]: I1123 14:57:31.445915 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-8tnhz" event={"ID":"1b42304a-8ccd-4015-9e74-295ea7abd0fd","Type":"ContainerStarted","Data":"89aff97285ea8635a7e1770278e5475e5f2675e01ed6a25a05344880330cba98"} Nov 23 14:57:31 crc kubenswrapper[5050]: I1123 14:57:31.475222 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-m7f26" event={"ID":"9e9d6f21-c5ab-438c-99ee-433f1eaa9093","Type":"ContainerStarted","Data":"349df5fc2b6bb3269abb0004eeb0840da06a209800aa5195e106935b94644fc4"} Nov 23 14:57:31 crc kubenswrapper[5050]: E1123 14:57:31.476333 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:5324a6d2f76fc3041023b0cbd09a733ef2b59f310d390e4d6483d219eb96494f\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-8wkkq" podUID="78533aec-d2a4-4691-8960-a5ca3abd34a8" Nov 23 14:57:31 crc kubenswrapper[5050]: E1123 14:57:31.480808 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-k4khg" podUID="14e92b7f-abf0-4e72-b7b0-ff9b8d1e199b" Nov 23 14:57:31 crc kubenswrapper[5050]: I1123 14:57:31.491967 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-k9c9p" event={"ID":"52c3e5f3-3fee-4095-97f2-e7260eb08d3d","Type":"ContainerStarted","Data":"3926dc2330662d3338d567473d3dadb96dd1c1c077ef50acc85a04daf3eb0f0f"} Nov 23 14:57:31 crc kubenswrapper[5050]: I1123 14:57:31.511681 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d656998f4-fjsvj" event={"ID":"c937d003-eeeb-49db-b2fe-8bc1ffde5f94","Type":"ContainerStarted","Data":"91c8ca1286d09339300cdcd18ad52638313c52cbc5b7557fb51d716e2f3236ed"} Nov 23 14:57:31 crc kubenswrapper[5050]: I1123 14:57:31.511981 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d656998f4-fjsvj" event={"ID":"c937d003-eeeb-49db-b2fe-8bc1ffde5f94","Type":"ContainerStarted","Data":"edb9cf0cedfaf77b4166aee365627917299aad4af7428d7bb73cde473294d91d"} Nov 23 14:57:31 crc kubenswrapper[5050]: E1123 14:57:31.519678 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0\\\"\"" pod="openstack-operators/swift-operator-controller-manager-d656998f4-fjsvj" podUID="c937d003-eeeb-49db-b2fe-8bc1ffde5f94" Nov 23 14:57:31 crc kubenswrapper[5050]: I1123 14:57:31.525074 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-pc65p" event={"ID":"09386696-43fd-4bd4-9afd-bba22b85c546","Type":"ContainerStarted","Data":"21baa8927b82b1b5a26f818649b405beb174b0d0eed6f989444ba567d8276854"} Nov 23 14:57:31 crc kubenswrapper[5050]: I1123 14:57:31.531021 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-cp2sk" event={"ID":"0f64e767-a4c5-4f8c-9dc9-0c09328b5d83","Type":"ContainerStarted","Data":"177655d56c1bf6b8d641d5e90620ab26ed93c8ca0cc9a38104d2b28e0907a75c"} Nov 23 14:57:31 crc kubenswrapper[5050]: I1123 14:57:31.545539 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-669b8498dc-87h5k" event={"ID":"68864336-7cb2-4deb-9ef9-159d78b04fc1","Type":"ContainerStarted","Data":"8e67334a934af6ba827a1cd0944d34430750c3f0f2d4747bec6e0e399dfdf214"} Nov 23 14:57:31 crc kubenswrapper[5050]: I1123 14:57:31.545584 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-669b8498dc-87h5k" event={"ID":"68864336-7cb2-4deb-9ef9-159d78b04fc1","Type":"ContainerStarted","Data":"cecceff1bd1e13f3db7ca2f83d2c66ed3cbfa4a23d3d21667723aac9afe4f798"} Nov 23 14:57:31 crc kubenswrapper[5050]: I1123 14:57:31.547332 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-669b8498dc-87h5k" Nov 23 14:57:31 crc kubenswrapper[5050]: I1123 14:57:31.593587 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-mzmmd" event={"ID":"b886cbd7-ef95-46b5-b817-d041b5b96cec","Type":"ContainerStarted","Data":"5f1fbf41998c2febcd8bcb7429eb7f7d9cf3d44038dfce7dd4b5b7eac23f4bf9"} Nov 23 14:57:32 crc kubenswrapper[5050]: I1123 14:57:32.612948 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-669b8498dc-87h5k" event={"ID":"68864336-7cb2-4deb-9ef9-159d78b04fc1","Type":"ContainerStarted","Data":"8c9432abe65b10f46075e0bf22e5f9d43632ec41324e5c73997e31ff511fb2ba"} Nov 23 14:57:32 crc kubenswrapper[5050]: I1123 14:57:32.616654 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-zksdq" event={"ID":"9483bc3e-409c-426c-a0ea-34b75543ac3c","Type":"ContainerStarted","Data":"be1b6399a8d7af3952eb8c257e563b91f3e13199f5777e7b66538a50bbeced16"} Nov 23 14:57:32 crc kubenswrapper[5050]: E1123 14:57:32.619532 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0\\\"\"" pod="openstack-operators/swift-operator-controller-manager-d656998f4-fjsvj" podUID="c937d003-eeeb-49db-b2fe-8bc1ffde5f94" Nov 23 14:57:32 crc kubenswrapper[5050]: E1123 14:57:32.619559 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-k4khg" podUID="14e92b7f-abf0-4e72-b7b0-ff9b8d1e199b" Nov 23 14:57:32 crc kubenswrapper[5050]: E1123 14:57:32.619917 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7\\\"\"" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-d9m8q" podUID="d54a2a99-ffe4-422e-bbae-54371be5a57e" Nov 23 14:57:32 crc kubenswrapper[5050]: E1123 14:57:32.621076 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:5324a6d2f76fc3041023b0cbd09a733ef2b59f310d390e4d6483d219eb96494f\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-8wkkq" podUID="78533aec-d2a4-4691-8960-a5ca3abd34a8" Nov 23 14:57:32 crc kubenswrapper[5050]: E1123 14:57:32.621205 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-zksdq" podUID="9483bc3e-409c-426c-a0ea-34b75543ac3c" Nov 23 14:57:32 crc kubenswrapper[5050]: E1123 14:57:32.625522 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d\\\"\"" pod="openstack-operators/test-operator-controller-manager-b4c496f69-zdcb4" podUID="4b036715-70f2-493c-9e9c-27228473af65" Nov 23 14:57:32 crc kubenswrapper[5050]: I1123 14:57:32.651965 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-669b8498dc-87h5k" podStartSLOduration=4.651938817 podStartE2EDuration="4.651938817s" podCreationTimestamp="2025-11-23 14:57:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:57:31.589890839 +0000 UTC m=+946.756887314" watchObservedRunningTime="2025-11-23 14:57:32.651938817 +0000 UTC m=+947.818935312" Nov 23 14:57:33 crc kubenswrapper[5050]: E1123 14:57:33.626231 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-zksdq" podUID="9483bc3e-409c-426c-a0ea-34b75543ac3c" Nov 23 14:57:39 crc kubenswrapper[5050]: I1123 14:57:39.977298 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-669b8498dc-87h5k" Nov 23 14:57:43 crc kubenswrapper[5050]: E1123 14:57:43.496891 5050 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13" Nov 23 14:57:43 crc kubenswrapper[5050]: E1123 14:57:43.497543 5050 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qwp8g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-54cfbf4c7d-mzmmd_openstack-operators(b886cbd7-ef95-46b5-b817-d041b5b96cec): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 23 14:57:51 crc kubenswrapper[5050]: E1123 14:57:51.473548 5050 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f" Nov 23 14:57:51 crc kubenswrapper[5050]: E1123 14:57:51.474264 5050 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-65v2d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-8c6448b9f-m7f26_openstack-operators(9e9d6f21-c5ab-438c-99ee-433f1eaa9093): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 23 14:57:51 crc kubenswrapper[5050]: E1123 14:57:51.482830 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-mzmmd" podUID="b886cbd7-ef95-46b5-b817-d041b5b96cec" Nov 23 14:57:51 crc kubenswrapper[5050]: I1123 14:57:51.830857 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-nqlmd" event={"ID":"8c522f94-22c6-4569-a62b-18f9ed2f3b3f","Type":"ContainerStarted","Data":"7c53b2daf873ac48c230ba752f5d1cad0167ec54cc3f731b494a2d1c64be1df1"} Nov 23 14:57:51 crc kubenswrapper[5050]: I1123 14:57:51.887967 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-cp2sk" event={"ID":"0f64e767-a4c5-4f8c-9dc9-0c09328b5d83","Type":"ContainerStarted","Data":"95979162dac06a7d295c0cb26354a0687c3ffca3cdf2e995eb904659c402051b"} Nov 23 14:57:51 crc kubenswrapper[5050]: I1123 14:57:51.900532 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-t4m5g" event={"ID":"17b553bf-a9b6-4807-be93-845263e46bda","Type":"ContainerStarted","Data":"3f2201342183a3ac28308c48bff1a149aee2c6236acd37195665ea22b7ab8d85"} Nov 23 14:57:51 crc kubenswrapper[5050]: I1123 14:57:51.930881 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-7969689c84-r5lpg" event={"ID":"8f0ad77e-339a-4a2e-a700-c52790877f4c","Type":"ContainerStarted","Data":"c17ac2d42188d2479d2cd80bcca853c20adc06cb9239a24e3fea3dc71ca419ef"} Nov 23 14:57:51 crc kubenswrapper[5050]: I1123 14:57:51.973716 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58f887965d-4856d" event={"ID":"ef8b813f-cf8f-46a0-ad63-259d5aa2dbb2","Type":"ContainerStarted","Data":"3bb9b45245a8de9deac8703ae1e6b5458a7333801c4131fc34d024b6e53841eb"} Nov 23 14:57:51 crc kubenswrapper[5050]: I1123 14:57:51.984756 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-mzmmd" event={"ID":"b886cbd7-ef95-46b5-b817-d041b5b96cec","Type":"ContainerStarted","Data":"244a1d47d785ef9e07ad29f5e6c4570e3b15da76f6014af49b0cb2b4776c79d4"} Nov 23 14:57:52 crc kubenswrapper[5050]: I1123 14:57:52.004100 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-xc9s4" event={"ID":"5de89bc7-53c3-4bf9-a791-b9b0b6a30603","Type":"ContainerStarted","Data":"ddff67139b9f3a35fb9a761baed338469ed0a25b939b5b2d5f1127a2bf0511b5"} Nov 23 14:57:52 crc kubenswrapper[5050]: E1123 14:57:52.022598 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-mzmmd" podUID="b886cbd7-ef95-46b5-b817-d041b5b96cec" Nov 23 14:57:52 crc kubenswrapper[5050]: I1123 14:57:52.028228 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-pc65p" event={"ID":"09386696-43fd-4bd4-9afd-bba22b85c546","Type":"ContainerStarted","Data":"046b44db283a500867306ab20a8a16cf027597c977d78bf7f8655d5a0cdae565"} Nov 23 14:57:52 crc kubenswrapper[5050]: I1123 14:57:52.051146 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-k9c9p" event={"ID":"52c3e5f3-3fee-4095-97f2-e7260eb08d3d","Type":"ContainerStarted","Data":"998c4830c0461d948da8846300f9f36cbe3897d5633e3cc779ce1c3c87d983bb"} Nov 23 14:57:52 crc kubenswrapper[5050]: I1123 14:57:52.091063 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-h5gcg" event={"ID":"b5fbd22d-ccfc-4a42-8b4b-f6566d36a8c1","Type":"ContainerStarted","Data":"e922d82000142ab7533a27231701481fdbcf21e714c07ceda0e817d54410b1d1"} Nov 23 14:57:52 crc kubenswrapper[5050]: I1123 14:57:52.124020 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-g2g7z" event={"ID":"96580238-85b0-481c-91f2-be0888975fb7","Type":"ContainerStarted","Data":"65aea2c640f3b44c74b2976defe7645bbf8394ffdc00615b3b94740aabc556e9"} Nov 23 14:57:52 crc kubenswrapper[5050]: I1123 14:57:52.124407 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-g2g7z" Nov 23 14:57:52 crc kubenswrapper[5050]: I1123 14:57:52.161409 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-4p57r" event={"ID":"97c24c4c-3ca8-4c5b-8b81-a64edf49b0bb","Type":"ContainerStarted","Data":"c2a20e31da4adf1bb5527d5489030294091f78c6cdfcfff4650e71fffb9211a6"} Nov 23 14:57:52 crc kubenswrapper[5050]: I1123 14:57:52.181176 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-g2g7z" podStartSLOduration=4.929147809 podStartE2EDuration="25.181151286s" podCreationTimestamp="2025-11-23 14:57:27 +0000 UTC" firstStartedPulling="2025-11-23 14:57:29.407757706 +0000 UTC m=+944.574754191" lastFinishedPulling="2025-11-23 14:57:49.659761173 +0000 UTC m=+964.826757668" observedRunningTime="2025-11-23 14:57:52.173271638 +0000 UTC m=+967.340268123" watchObservedRunningTime="2025-11-23 14:57:52.181151286 +0000 UTC m=+967.348147761" Nov 23 14:57:52 crc kubenswrapper[5050]: I1123 14:57:52.189303 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-8tnhz" event={"ID":"1b42304a-8ccd-4015-9e74-295ea7abd0fd","Type":"ContainerStarted","Data":"5f26047cdfb7fa554317460a7c4280a97d0f2b215ccb8db556997fc0c27bd84d"} Nov 23 14:57:52 crc kubenswrapper[5050]: I1123 14:57:52.189894 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-8tnhz" Nov 23 14:57:52 crc kubenswrapper[5050]: I1123 14:57:52.202703 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-2qdnf" event={"ID":"e4ff3d33-3a5f-4ba3-ae25-308b7ad6d389","Type":"ContainerStarted","Data":"6110cf858d9ffd0e24e4240dd710ca6e38d1c2692c69c2b08a6290329d4af06a"} Nov 23 14:57:52 crc kubenswrapper[5050]: I1123 14:57:52.216595 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-spv2t" event={"ID":"e9c42c8d-d147-4b90-bcb4-5b2771121504","Type":"ContainerStarted","Data":"03151d7605dc09314005056bd27c4edc973bb5a28e2779cddcb9447b7e9f159b"} Nov 23 14:57:52 crc kubenswrapper[5050]: I1123 14:57:52.243808 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-8tnhz" podStartSLOduration=4.160611216 podStartE2EDuration="24.243782613s" podCreationTimestamp="2025-11-23 14:57:28 +0000 UTC" firstStartedPulling="2025-11-23 14:57:30.379328499 +0000 UTC m=+945.546324984" lastFinishedPulling="2025-11-23 14:57:50.462499906 +0000 UTC m=+965.629496381" observedRunningTime="2025-11-23 14:57:52.24015492 +0000 UTC m=+967.407151425" watchObservedRunningTime="2025-11-23 14:57:52.243782613 +0000 UTC m=+967.410779098" Nov 23 14:57:52 crc kubenswrapper[5050]: E1123 14:57:52.923670 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-m7f26" podUID="9e9d6f21-c5ab-438c-99ee-433f1eaa9093" Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.313693 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-t4m5g" event={"ID":"17b553bf-a9b6-4807-be93-845263e46bda","Type":"ContainerStarted","Data":"5da41ea94b49491719445375b942f1474d7808551ba68df5b415a62f9bdac81a"} Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.315007 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-t4m5g" Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.319437 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-spv2t" event={"ID":"e9c42c8d-d147-4b90-bcb4-5b2771121504","Type":"ContainerStarted","Data":"69e385e5b874632aecc21aa8bebc7e01ac886505afe1d74b4596a199f50ec46e"} Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.320034 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-spv2t" Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.323389 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-xc9s4" event={"ID":"5de89bc7-53c3-4bf9-a791-b9b0b6a30603","Type":"ContainerStarted","Data":"166f255fc5b614652c4fc054123e7b5426fb14964d2731d1cb13a4dde3678811"} Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.323900 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-xc9s4" Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.326385 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-nqlmd" event={"ID":"8c522f94-22c6-4569-a62b-18f9ed2f3b3f","Type":"ContainerStarted","Data":"6ee106d76234eb7249c8bcffff968d0d99b642ecb54cb9d64895ad8260b13717"} Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.326945 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-nqlmd" Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.332737 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-2qdnf" event={"ID":"e4ff3d33-3a5f-4ba3-ae25-308b7ad6d389","Type":"ContainerStarted","Data":"e7dc2bb6a6b5f012bc32670f75339b1c5ed9657a87fda4b16ac3a240e1b6e55f"} Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.333334 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-2qdnf" Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.337230 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-4p57r" event={"ID":"97c24c4c-3ca8-4c5b-8b81-a64edf49b0bb","Type":"ContainerStarted","Data":"24e0924e1bf5ee1384ab4d3ddc5c550bc4c9578cfca104f7a2d86d70a4113320"} Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.337313 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-4p57r" Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.339310 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-k9c9p" event={"ID":"52c3e5f3-3fee-4095-97f2-e7260eb08d3d","Type":"ContainerStarted","Data":"46d658b443ce720d73fd86de2edf8776f1ff4fae347bddc642199a8865f67f32"} Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.339910 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-t4m5g" podStartSLOduration=5.53596284 podStartE2EDuration="26.339895786s" podCreationTimestamp="2025-11-23 14:57:27 +0000 UTC" firstStartedPulling="2025-11-23 14:57:29.651945508 +0000 UTC m=+944.818941993" lastFinishedPulling="2025-11-23 14:57:50.455878424 +0000 UTC m=+965.622874939" observedRunningTime="2025-11-23 14:57:53.336640731 +0000 UTC m=+968.503637216" watchObservedRunningTime="2025-11-23 14:57:53.339895786 +0000 UTC m=+968.506892271" Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.340315 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-k9c9p" Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.356921 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-7969689c84-r5lpg" event={"ID":"8f0ad77e-339a-4a2e-a700-c52790877f4c","Type":"ContainerStarted","Data":"c524cfc3914f0466bcf1ac866d8044bec600984a6941b28834816857bc48e5d9"} Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.357251 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-7969689c84-r5lpg" Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.360094 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-m7f26" event={"ID":"9e9d6f21-c5ab-438c-99ee-433f1eaa9093","Type":"ContainerStarted","Data":"862ca3fbca1385ee390ae89ef82f797ae5c249c3c9a171967db464947d78299e"} Nov 23 14:57:53 crc kubenswrapper[5050]: E1123 14:57:53.363916 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-m7f26" podUID="9e9d6f21-c5ab-438c-99ee-433f1eaa9093" Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.372622 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58f887965d-4856d" event={"ID":"ef8b813f-cf8f-46a0-ad63-259d5aa2dbb2","Type":"ContainerStarted","Data":"434b259c4f2706c97dacf112e7b49d3cafdce52b32d90d4ba378162095869f27"} Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.374812 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-xc9s4" podStartSLOduration=5.286753811 podStartE2EDuration="25.374779366s" podCreationTimestamp="2025-11-23 14:57:28 +0000 UTC" firstStartedPulling="2025-11-23 14:57:30.365714916 +0000 UTC m=+945.532711401" lastFinishedPulling="2025-11-23 14:57:50.453740441 +0000 UTC m=+965.620736956" observedRunningTime="2025-11-23 14:57:53.355940218 +0000 UTC m=+968.522936703" watchObservedRunningTime="2025-11-23 14:57:53.374779366 +0000 UTC m=+968.541775851" Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.383150 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-cp2sk" event={"ID":"0f64e767-a4c5-4f8c-9dc9-0c09328b5d83","Type":"ContainerStarted","Data":"85998a328f757d712405820aebf3b40b30e8305c336dc2fe1320934a0c089d22"} Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.384162 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-cp2sk" Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.393449 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-2qdnf" podStartSLOduration=5.286734597 podStartE2EDuration="26.39342405s" podCreationTimestamp="2025-11-23 14:57:27 +0000 UTC" firstStartedPulling="2025-11-23 14:57:29.348997697 +0000 UTC m=+944.515994182" lastFinishedPulling="2025-11-23 14:57:50.45568715 +0000 UTC m=+965.622683635" observedRunningTime="2025-11-23 14:57:53.387666235 +0000 UTC m=+968.554662730" watchObservedRunningTime="2025-11-23 14:57:53.39342405 +0000 UTC m=+968.560420535" Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.397643 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-h5gcg" event={"ID":"b5fbd22d-ccfc-4a42-8b4b-f6566d36a8c1","Type":"ContainerStarted","Data":"0547f7d51dcef2c1577dd12d8797dbd0247e8df7695d876d8ad1b04bcd9536f4"} Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.397722 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-h5gcg" Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.400948 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-g2g7z" event={"ID":"96580238-85b0-481c-91f2-be0888975fb7","Type":"ContainerStarted","Data":"7186bd2b80b85a679cb98f7f64c66cffa10aff2fcf93c13ab3be471dfd2a2171"} Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.406162 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-8tnhz" event={"ID":"1b42304a-8ccd-4015-9e74-295ea7abd0fd","Type":"ContainerStarted","Data":"1ad8cba80c8e47c1cadd0ef7604e423d54fcedd408cd530a369b820db2ee2ae0"} Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.409294 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-pc65p" event={"ID":"09386696-43fd-4bd4-9afd-bba22b85c546","Type":"ContainerStarted","Data":"e787bfca83dc0156c432980b0c2899594f2a04c92e9319d6c576feb48c1732b6"} Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.409391 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-pc65p" Nov 23 14:57:53 crc kubenswrapper[5050]: E1123 14:57:53.415500 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-mzmmd" podUID="b886cbd7-ef95-46b5-b817-d041b5b96cec" Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.441985 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-nqlmd" podStartSLOduration=4.954041623 podStartE2EDuration="25.441766791s" podCreationTimestamp="2025-11-23 14:57:28 +0000 UTC" firstStartedPulling="2025-11-23 14:57:29.96788044 +0000 UTC m=+945.134876935" lastFinishedPulling="2025-11-23 14:57:50.455605608 +0000 UTC m=+965.622602103" observedRunningTime="2025-11-23 14:57:53.432292201 +0000 UTC m=+968.599288696" watchObservedRunningTime="2025-11-23 14:57:53.441766791 +0000 UTC m=+968.608763276" Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.442568 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-spv2t" podStartSLOduration=5.398354101 podStartE2EDuration="25.442559047s" podCreationTimestamp="2025-11-23 14:57:28 +0000 UTC" firstStartedPulling="2025-11-23 14:57:30.410333661 +0000 UTC m=+945.577330146" lastFinishedPulling="2025-11-23 14:57:50.454538597 +0000 UTC m=+965.621535092" observedRunningTime="2025-11-23 14:57:53.413954202 +0000 UTC m=+968.580950687" watchObservedRunningTime="2025-11-23 14:57:53.442559047 +0000 UTC m=+968.609555532" Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.460612 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-cp2sk" podStartSLOduration=6.40626545 podStartE2EDuration="26.460591049s" podCreationTimestamp="2025-11-23 14:57:27 +0000 UTC" firstStartedPulling="2025-11-23 14:57:30.429371933 +0000 UTC m=+945.596368408" lastFinishedPulling="2025-11-23 14:57:50.483697492 +0000 UTC m=+965.650694007" observedRunningTime="2025-11-23 14:57:53.451868374 +0000 UTC m=+968.618864859" watchObservedRunningTime="2025-11-23 14:57:53.460591049 +0000 UTC m=+968.627587534" Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.481561 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-4p57r" podStartSLOduration=4.990576316 podStartE2EDuration="25.481534619s" podCreationTimestamp="2025-11-23 14:57:28 +0000 UTC" firstStartedPulling="2025-11-23 14:57:29.96289679 +0000 UTC m=+945.129893275" lastFinishedPulling="2025-11-23 14:57:50.453855073 +0000 UTC m=+965.620851578" observedRunningTime="2025-11-23 14:57:53.47659866 +0000 UTC m=+968.643595145" watchObservedRunningTime="2025-11-23 14:57:53.481534619 +0000 UTC m=+968.648531104" Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.517526 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-7969689c84-r5lpg" podStartSLOduration=6.479797736 podStartE2EDuration="26.517499461s" podCreationTimestamp="2025-11-23 14:57:27 +0000 UTC" firstStartedPulling="2025-11-23 14:57:29.62016691 +0000 UTC m=+944.787163385" lastFinishedPulling="2025-11-23 14:57:49.657868615 +0000 UTC m=+964.824865110" observedRunningTime="2025-11-23 14:57:53.504940049 +0000 UTC m=+968.671936524" watchObservedRunningTime="2025-11-23 14:57:53.517499461 +0000 UTC m=+968.684495946" Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.534274 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-h5gcg" podStartSLOduration=5.7122433489999995 podStartE2EDuration="26.534250737s" podCreationTimestamp="2025-11-23 14:57:27 +0000 UTC" firstStartedPulling="2025-11-23 14:57:29.635008258 +0000 UTC m=+944.802004743" lastFinishedPulling="2025-11-23 14:57:50.457015646 +0000 UTC m=+965.624012131" observedRunningTime="2025-11-23 14:57:53.522678325 +0000 UTC m=+968.689674830" watchObservedRunningTime="2025-11-23 14:57:53.534250737 +0000 UTC m=+968.701247222" Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.582768 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-58f887965d-4856d" podStartSLOduration=5.505704853 podStartE2EDuration="25.582720947s" podCreationTimestamp="2025-11-23 14:57:28 +0000 UTC" firstStartedPulling="2025-11-23 14:57:30.379218007 +0000 UTC m=+945.546214492" lastFinishedPulling="2025-11-23 14:57:50.456234081 +0000 UTC m=+965.623230586" observedRunningTime="2025-11-23 14:57:53.551751538 +0000 UTC m=+968.718748023" watchObservedRunningTime="2025-11-23 14:57:53.582720947 +0000 UTC m=+968.749717432" Nov 23 14:57:53 crc kubenswrapper[5050]: I1123 14:57:53.589660 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-pc65p" podStartSLOduration=5.530226543 podStartE2EDuration="25.589637044s" podCreationTimestamp="2025-11-23 14:57:28 +0000 UTC" firstStartedPulling="2025-11-23 14:57:30.397971503 +0000 UTC m=+945.564967988" lastFinishedPulling="2025-11-23 14:57:50.457381994 +0000 UTC m=+965.624378489" observedRunningTime="2025-11-23 14:57:53.573962409 +0000 UTC m=+968.740958894" watchObservedRunningTime="2025-11-23 14:57:53.589637044 +0000 UTC m=+968.756633559" Nov 23 14:57:54 crc kubenswrapper[5050]: I1123 14:57:54.415651 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-58f887965d-4856d" Nov 23 14:57:54 crc kubenswrapper[5050]: E1123 14:57:54.416818 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-m7f26" podUID="9e9d6f21-c5ab-438c-99ee-433f1eaa9093" Nov 23 14:57:54 crc kubenswrapper[5050]: I1123 14:57:54.430775 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-k9c9p" podStartSLOduration=6.36577505 podStartE2EDuration="26.430745842s" podCreationTimestamp="2025-11-23 14:57:28 +0000 UTC" firstStartedPulling="2025-11-23 14:57:30.389591735 +0000 UTC m=+945.556588220" lastFinishedPulling="2025-11-23 14:57:50.454562527 +0000 UTC m=+965.621559012" observedRunningTime="2025-11-23 14:57:53.626548382 +0000 UTC m=+968.793544867" watchObservedRunningTime="2025-11-23 14:57:54.430745842 +0000 UTC m=+969.597742337" Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.217580 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-2qdnf" Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.220044 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-h5gcg" Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.262325 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-7969689c84-r5lpg" Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.346138 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-g2g7z" Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.421033 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-t4m5g" Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.452332 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-k4khg" event={"ID":"14e92b7f-abf0-4e72-b7b0-ff9b8d1e199b","Type":"ContainerStarted","Data":"97cdbe380a3947ff9992eb18fb4d186806db5d958f4ee9c944b0baaa2b39aa3f"} Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.454634 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-b4c496f69-zdcb4" event={"ID":"4b036715-70f2-493c-9e9c-27228473af65","Type":"ContainerStarted","Data":"f06657a7527b50a6581a05d1f800a3cd2da838cb4e2215e2192ae80c960a7472"} Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.454930 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-b4c496f69-zdcb4" Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.458820 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d656998f4-fjsvj" event={"ID":"c937d003-eeeb-49db-b2fe-8bc1ffde5f94","Type":"ContainerStarted","Data":"aa6fd0672445a659d8d751ae55f685c332a20f42b0d8a06159dca628554c2727"} Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.459204 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d656998f4-fjsvj" Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.466250 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-d9m8q" event={"ID":"d54a2a99-ffe4-422e-bbae-54371be5a57e","Type":"ContainerStarted","Data":"410640e8e4d923604185006c4a0a941d6ec404a4c35a59649e5cc05777d2b371"} Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.467162 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-d9m8q" Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.476330 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-k4khg" podStartSLOduration=3.454199673 podStartE2EDuration="30.476312161s" podCreationTimestamp="2025-11-23 14:57:28 +0000 UTC" firstStartedPulling="2025-11-23 14:57:30.65686696 +0000 UTC m=+945.823863445" lastFinishedPulling="2025-11-23 14:57:57.678979398 +0000 UTC m=+972.845975933" observedRunningTime="2025-11-23 14:57:58.475514329 +0000 UTC m=+973.642510824" watchObservedRunningTime="2025-11-23 14:57:58.476312161 +0000 UTC m=+973.643308646" Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.486627 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-8wkkq" event={"ID":"78533aec-d2a4-4691-8960-a5ca3abd34a8","Type":"ContainerStarted","Data":"7da9be774aa47448280046f945b7bf01ecef2279d0e7ce84fc2417d9aba46b75"} Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.487917 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-8wkkq" Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.499896 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-zksdq" event={"ID":"9483bc3e-409c-426c-a0ea-34b75543ac3c","Type":"ContainerStarted","Data":"00f8c4c83e5c91e81394452a9777ae551b9abdd57e105c081958a72f80750fc4"} Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.500226 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-zksdq" Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.506865 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-d656998f4-fjsvj" podStartSLOduration=3.563080598 podStartE2EDuration="30.506831072s" podCreationTimestamp="2025-11-23 14:57:28 +0000 UTC" firstStartedPulling="2025-11-23 14:57:30.673412202 +0000 UTC m=+945.840408687" lastFinishedPulling="2025-11-23 14:57:57.617162636 +0000 UTC m=+972.784159161" observedRunningTime="2025-11-23 14:57:58.499889138 +0000 UTC m=+973.666885633" watchObservedRunningTime="2025-11-23 14:57:58.506831072 +0000 UTC m=+973.673827557" Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.526809 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-d9m8q" podStartSLOduration=3.578816241 podStartE2EDuration="30.526787458s" podCreationTimestamp="2025-11-23 14:57:28 +0000 UTC" firstStartedPulling="2025-11-23 14:57:30.669105156 +0000 UTC m=+945.836101641" lastFinishedPulling="2025-11-23 14:57:57.617076373 +0000 UTC m=+972.784072858" observedRunningTime="2025-11-23 14:57:58.52111698 +0000 UTC m=+973.688113465" watchObservedRunningTime="2025-11-23 14:57:58.526787458 +0000 UTC m=+973.693783943" Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.535588 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-4p57r" Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.545071 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-b4c496f69-zdcb4" podStartSLOduration=3.593476882 podStartE2EDuration="30.545048376s" podCreationTimestamp="2025-11-23 14:57:28 +0000 UTC" firstStartedPulling="2025-11-23 14:57:30.666089545 +0000 UTC m=+945.833086030" lastFinishedPulling="2025-11-23 14:57:57.617661039 +0000 UTC m=+972.784657524" observedRunningTime="2025-11-23 14:57:58.540917351 +0000 UTC m=+973.707913836" watchObservedRunningTime="2025-11-23 14:57:58.545048376 +0000 UTC m=+973.712044861" Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.563103 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-8wkkq" podStartSLOduration=3.566102038 podStartE2EDuration="30.563077979s" podCreationTimestamp="2025-11-23 14:57:28 +0000 UTC" firstStartedPulling="2025-11-23 14:57:30.663708567 +0000 UTC m=+945.830705052" lastFinishedPulling="2025-11-23 14:57:57.660684478 +0000 UTC m=+972.827680993" observedRunningTime="2025-11-23 14:57:58.555908139 +0000 UTC m=+973.722904634" watchObservedRunningTime="2025-11-23 14:57:58.563077979 +0000 UTC m=+973.730074464" Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.615817 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-cp2sk" Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.626173 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-spv2t" Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.635201 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-zksdq" podStartSLOduration=3.694935188 podStartE2EDuration="30.635180417s" podCreationTimestamp="2025-11-23 14:57:28 +0000 UTC" firstStartedPulling="2025-11-23 14:57:30.685324431 +0000 UTC m=+945.852320916" lastFinishedPulling="2025-11-23 14:57:57.62556962 +0000 UTC m=+972.792566145" observedRunningTime="2025-11-23 14:57:58.613850533 +0000 UTC m=+973.780847018" watchObservedRunningTime="2025-11-23 14:57:58.635180417 +0000 UTC m=+973.802176922" Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.661927 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-58f887965d-4856d" Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.676725 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-nqlmd" Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.682482 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-8tnhz" Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.878588 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-pc65p" Nov 23 14:57:58 crc kubenswrapper[5050]: I1123 14:57:58.983362 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-xc9s4" Nov 23 14:57:59 crc kubenswrapper[5050]: I1123 14:57:59.137217 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-k9c9p" Nov 23 14:58:06 crc kubenswrapper[5050]: I1123 14:58:06.553392 5050 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 23 14:58:07 crc kubenswrapper[5050]: I1123 14:58:07.586439 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-m7f26" event={"ID":"9e9d6f21-c5ab-438c-99ee-433f1eaa9093","Type":"ContainerStarted","Data":"12f77c9b737667b19b40e8a0fe92f62602fd3e92892feaa36c99eb5b906c7daf"} Nov 23 14:58:07 crc kubenswrapper[5050]: I1123 14:58:07.588237 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-m7f26" Nov 23 14:58:07 crc kubenswrapper[5050]: I1123 14:58:07.589697 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-mzmmd" event={"ID":"b886cbd7-ef95-46b5-b817-d041b5b96cec","Type":"ContainerStarted","Data":"dbfcd0c0160f06a85f1e90e3d1cc8399abe3020ce250fb3babec800fb36d4c66"} Nov 23 14:58:07 crc kubenswrapper[5050]: I1123 14:58:07.590197 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-mzmmd" Nov 23 14:58:07 crc kubenswrapper[5050]: I1123 14:58:07.627056 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-m7f26" podStartSLOduration=3.237791965 podStartE2EDuration="39.627033307s" podCreationTimestamp="2025-11-23 14:57:28 +0000 UTC" firstStartedPulling="2025-11-23 14:57:30.652904511 +0000 UTC m=+945.819900996" lastFinishedPulling="2025-11-23 14:58:07.042145813 +0000 UTC m=+982.209142338" observedRunningTime="2025-11-23 14:58:07.619049425 +0000 UTC m=+982.786045920" watchObservedRunningTime="2025-11-23 14:58:07.627033307 +0000 UTC m=+982.794029802" Nov 23 14:58:07 crc kubenswrapper[5050]: I1123 14:58:07.647868 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-mzmmd" podStartSLOduration=2.995680424 podStartE2EDuration="39.647832507s" podCreationTimestamp="2025-11-23 14:57:28 +0000 UTC" firstStartedPulling="2025-11-23 14:57:30.393043614 +0000 UTC m=+945.560040099" lastFinishedPulling="2025-11-23 14:58:07.045195657 +0000 UTC m=+982.212192182" observedRunningTime="2025-11-23 14:58:07.63860625 +0000 UTC m=+982.805602745" watchObservedRunningTime="2025-11-23 14:58:07.647832507 +0000 UTC m=+982.814829032" Nov 23 14:58:09 crc kubenswrapper[5050]: I1123 14:58:09.051134 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-d9m8q" Nov 23 14:58:09 crc kubenswrapper[5050]: I1123 14:58:09.201361 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-d656998f4-fjsvj" Nov 23 14:58:09 crc kubenswrapper[5050]: I1123 14:58:09.228204 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-8wkkq" Nov 23 14:58:09 crc kubenswrapper[5050]: I1123 14:58:09.263000 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-b4c496f69-zdcb4" Nov 23 14:58:09 crc kubenswrapper[5050]: I1123 14:58:09.462883 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-zksdq" Nov 23 14:58:18 crc kubenswrapper[5050]: I1123 14:58:18.770885 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-mzmmd" Nov 23 14:58:19 crc kubenswrapper[5050]: I1123 14:58:19.289401 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-m7f26" Nov 23 14:58:35 crc kubenswrapper[5050]: I1123 14:58:35.564348 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-gc5w9"] Nov 23 14:58:35 crc kubenswrapper[5050]: I1123 14:58:35.566515 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-gc5w9" Nov 23 14:58:35 crc kubenswrapper[5050]: I1123 14:58:35.569197 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 23 14:58:35 crc kubenswrapper[5050]: I1123 14:58:35.571725 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-rz5cb" Nov 23 14:58:35 crc kubenswrapper[5050]: I1123 14:58:35.571924 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 23 14:58:35 crc kubenswrapper[5050]: I1123 14:58:35.572050 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 23 14:58:35 crc kubenswrapper[5050]: I1123 14:58:35.585346 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-gc5w9"] Nov 23 14:58:35 crc kubenswrapper[5050]: I1123 14:58:35.690266 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-jqz2l"] Nov 23 14:58:35 crc kubenswrapper[5050]: I1123 14:58:35.691890 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-jqz2l" Nov 23 14:58:35 crc kubenswrapper[5050]: I1123 14:58:35.694419 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 23 14:58:35 crc kubenswrapper[5050]: I1123 14:58:35.703534 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-jqz2l"] Nov 23 14:58:35 crc kubenswrapper[5050]: I1123 14:58:35.754824 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6bae002e-fff8-41ed-8e1a-2f8b4f95393f-config\") pod \"dnsmasq-dns-675f4bcbfc-gc5w9\" (UID: \"6bae002e-fff8-41ed-8e1a-2f8b4f95393f\") " pod="openstack/dnsmasq-dns-675f4bcbfc-gc5w9" Nov 23 14:58:35 crc kubenswrapper[5050]: I1123 14:58:35.755937 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jl52z\" (UniqueName: \"kubernetes.io/projected/6bae002e-fff8-41ed-8e1a-2f8b4f95393f-kube-api-access-jl52z\") pod \"dnsmasq-dns-675f4bcbfc-gc5w9\" (UID: \"6bae002e-fff8-41ed-8e1a-2f8b4f95393f\") " pod="openstack/dnsmasq-dns-675f4bcbfc-gc5w9" Nov 23 14:58:35 crc kubenswrapper[5050]: I1123 14:58:35.857558 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e022ef9d-faf7-4dcb-87c8-cde58d2b8575-config\") pod \"dnsmasq-dns-78dd6ddcc-jqz2l\" (UID: \"e022ef9d-faf7-4dcb-87c8-cde58d2b8575\") " pod="openstack/dnsmasq-dns-78dd6ddcc-jqz2l" Nov 23 14:58:35 crc kubenswrapper[5050]: I1123 14:58:35.857632 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e022ef9d-faf7-4dcb-87c8-cde58d2b8575-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-jqz2l\" (UID: \"e022ef9d-faf7-4dcb-87c8-cde58d2b8575\") " pod="openstack/dnsmasq-dns-78dd6ddcc-jqz2l" Nov 23 14:58:35 crc kubenswrapper[5050]: I1123 14:58:35.857668 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6bae002e-fff8-41ed-8e1a-2f8b4f95393f-config\") pod \"dnsmasq-dns-675f4bcbfc-gc5w9\" (UID: \"6bae002e-fff8-41ed-8e1a-2f8b4f95393f\") " pod="openstack/dnsmasq-dns-675f4bcbfc-gc5w9" Nov 23 14:58:35 crc kubenswrapper[5050]: I1123 14:58:35.857722 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jl52z\" (UniqueName: \"kubernetes.io/projected/6bae002e-fff8-41ed-8e1a-2f8b4f95393f-kube-api-access-jl52z\") pod \"dnsmasq-dns-675f4bcbfc-gc5w9\" (UID: \"6bae002e-fff8-41ed-8e1a-2f8b4f95393f\") " pod="openstack/dnsmasq-dns-675f4bcbfc-gc5w9" Nov 23 14:58:35 crc kubenswrapper[5050]: I1123 14:58:35.857741 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58k2w\" (UniqueName: \"kubernetes.io/projected/e022ef9d-faf7-4dcb-87c8-cde58d2b8575-kube-api-access-58k2w\") pod \"dnsmasq-dns-78dd6ddcc-jqz2l\" (UID: \"e022ef9d-faf7-4dcb-87c8-cde58d2b8575\") " pod="openstack/dnsmasq-dns-78dd6ddcc-jqz2l" Nov 23 14:58:35 crc kubenswrapper[5050]: I1123 14:58:35.858642 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6bae002e-fff8-41ed-8e1a-2f8b4f95393f-config\") pod \"dnsmasq-dns-675f4bcbfc-gc5w9\" (UID: \"6bae002e-fff8-41ed-8e1a-2f8b4f95393f\") " pod="openstack/dnsmasq-dns-675f4bcbfc-gc5w9" Nov 23 14:58:35 crc kubenswrapper[5050]: I1123 14:58:35.880099 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jl52z\" (UniqueName: \"kubernetes.io/projected/6bae002e-fff8-41ed-8e1a-2f8b4f95393f-kube-api-access-jl52z\") pod \"dnsmasq-dns-675f4bcbfc-gc5w9\" (UID: \"6bae002e-fff8-41ed-8e1a-2f8b4f95393f\") " pod="openstack/dnsmasq-dns-675f4bcbfc-gc5w9" Nov 23 14:58:35 crc kubenswrapper[5050]: I1123 14:58:35.889781 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-gc5w9" Nov 23 14:58:35 crc kubenswrapper[5050]: I1123 14:58:35.958860 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58k2w\" (UniqueName: \"kubernetes.io/projected/e022ef9d-faf7-4dcb-87c8-cde58d2b8575-kube-api-access-58k2w\") pod \"dnsmasq-dns-78dd6ddcc-jqz2l\" (UID: \"e022ef9d-faf7-4dcb-87c8-cde58d2b8575\") " pod="openstack/dnsmasq-dns-78dd6ddcc-jqz2l" Nov 23 14:58:35 crc kubenswrapper[5050]: I1123 14:58:35.958963 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e022ef9d-faf7-4dcb-87c8-cde58d2b8575-config\") pod \"dnsmasq-dns-78dd6ddcc-jqz2l\" (UID: \"e022ef9d-faf7-4dcb-87c8-cde58d2b8575\") " pod="openstack/dnsmasq-dns-78dd6ddcc-jqz2l" Nov 23 14:58:35 crc kubenswrapper[5050]: I1123 14:58:35.959003 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e022ef9d-faf7-4dcb-87c8-cde58d2b8575-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-jqz2l\" (UID: \"e022ef9d-faf7-4dcb-87c8-cde58d2b8575\") " pod="openstack/dnsmasq-dns-78dd6ddcc-jqz2l" Nov 23 14:58:35 crc kubenswrapper[5050]: I1123 14:58:35.960191 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e022ef9d-faf7-4dcb-87c8-cde58d2b8575-config\") pod \"dnsmasq-dns-78dd6ddcc-jqz2l\" (UID: \"e022ef9d-faf7-4dcb-87c8-cde58d2b8575\") " pod="openstack/dnsmasq-dns-78dd6ddcc-jqz2l" Nov 23 14:58:35 crc kubenswrapper[5050]: I1123 14:58:35.960237 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e022ef9d-faf7-4dcb-87c8-cde58d2b8575-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-jqz2l\" (UID: \"e022ef9d-faf7-4dcb-87c8-cde58d2b8575\") " pod="openstack/dnsmasq-dns-78dd6ddcc-jqz2l" Nov 23 14:58:35 crc kubenswrapper[5050]: I1123 14:58:35.989286 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58k2w\" (UniqueName: \"kubernetes.io/projected/e022ef9d-faf7-4dcb-87c8-cde58d2b8575-kube-api-access-58k2w\") pod \"dnsmasq-dns-78dd6ddcc-jqz2l\" (UID: \"e022ef9d-faf7-4dcb-87c8-cde58d2b8575\") " pod="openstack/dnsmasq-dns-78dd6ddcc-jqz2l" Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.006935 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-jqz2l" Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.298939 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-gc5w9"] Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.359250 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-pc2bl"] Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.366437 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-pc2bl" Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.373136 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-pc2bl"] Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.377818 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gfj4l\" (UniqueName: \"kubernetes.io/projected/99bf0fd0-57d8-459a-a6b1-f538e878af9b-kube-api-access-gfj4l\") pod \"dnsmasq-dns-5ccc8479f9-pc2bl\" (UID: \"99bf0fd0-57d8-459a-a6b1-f538e878af9b\") " pod="openstack/dnsmasq-dns-5ccc8479f9-pc2bl" Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.377884 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99bf0fd0-57d8-459a-a6b1-f538e878af9b-config\") pod \"dnsmasq-dns-5ccc8479f9-pc2bl\" (UID: \"99bf0fd0-57d8-459a-a6b1-f538e878af9b\") " pod="openstack/dnsmasq-dns-5ccc8479f9-pc2bl" Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.377908 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/99bf0fd0-57d8-459a-a6b1-f538e878af9b-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-pc2bl\" (UID: \"99bf0fd0-57d8-459a-a6b1-f538e878af9b\") " pod="openstack/dnsmasq-dns-5ccc8479f9-pc2bl" Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.478901 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99bf0fd0-57d8-459a-a6b1-f538e878af9b-config\") pod \"dnsmasq-dns-5ccc8479f9-pc2bl\" (UID: \"99bf0fd0-57d8-459a-a6b1-f538e878af9b\") " pod="openstack/dnsmasq-dns-5ccc8479f9-pc2bl" Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.478948 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/99bf0fd0-57d8-459a-a6b1-f538e878af9b-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-pc2bl\" (UID: \"99bf0fd0-57d8-459a-a6b1-f538e878af9b\") " pod="openstack/dnsmasq-dns-5ccc8479f9-pc2bl" Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.479035 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gfj4l\" (UniqueName: \"kubernetes.io/projected/99bf0fd0-57d8-459a-a6b1-f538e878af9b-kube-api-access-gfj4l\") pod \"dnsmasq-dns-5ccc8479f9-pc2bl\" (UID: \"99bf0fd0-57d8-459a-a6b1-f538e878af9b\") " pod="openstack/dnsmasq-dns-5ccc8479f9-pc2bl" Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.480401 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99bf0fd0-57d8-459a-a6b1-f538e878af9b-config\") pod \"dnsmasq-dns-5ccc8479f9-pc2bl\" (UID: \"99bf0fd0-57d8-459a-a6b1-f538e878af9b\") " pod="openstack/dnsmasq-dns-5ccc8479f9-pc2bl" Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.480923 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/99bf0fd0-57d8-459a-a6b1-f538e878af9b-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-pc2bl\" (UID: \"99bf0fd0-57d8-459a-a6b1-f538e878af9b\") " pod="openstack/dnsmasq-dns-5ccc8479f9-pc2bl" Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.505090 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gfj4l\" (UniqueName: \"kubernetes.io/projected/99bf0fd0-57d8-459a-a6b1-f538e878af9b-kube-api-access-gfj4l\") pod \"dnsmasq-dns-5ccc8479f9-pc2bl\" (UID: \"99bf0fd0-57d8-459a-a6b1-f538e878af9b\") " pod="openstack/dnsmasq-dns-5ccc8479f9-pc2bl" Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.575064 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-jqz2l"] Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.659733 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-jqz2l"] Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.670328 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-gc5w9"] Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.696723 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-sdz75"] Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.698085 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-sdz75" Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.698956 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-pc2bl" Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.706814 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-sdz75"] Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.786952 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/25891e09-f495-4de3-b5d2-496188bf0981-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-sdz75\" (UID: \"25891e09-f495-4de3-b5d2-496188bf0981\") " pod="openstack/dnsmasq-dns-57d769cc4f-sdz75" Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.787106 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dcn7x\" (UniqueName: \"kubernetes.io/projected/25891e09-f495-4de3-b5d2-496188bf0981-kube-api-access-dcn7x\") pod \"dnsmasq-dns-57d769cc4f-sdz75\" (UID: \"25891e09-f495-4de3-b5d2-496188bf0981\") " pod="openstack/dnsmasq-dns-57d769cc4f-sdz75" Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.787169 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25891e09-f495-4de3-b5d2-496188bf0981-config\") pod \"dnsmasq-dns-57d769cc4f-sdz75\" (UID: \"25891e09-f495-4de3-b5d2-496188bf0981\") " pod="openstack/dnsmasq-dns-57d769cc4f-sdz75" Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.863829 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-gc5w9" event={"ID":"6bae002e-fff8-41ed-8e1a-2f8b4f95393f","Type":"ContainerStarted","Data":"b9e9ebafeed9f1be22ca45ddd430981bc7fe6cd441de571c2bb0e461fd89652b"} Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.864824 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-jqz2l" event={"ID":"e022ef9d-faf7-4dcb-87c8-cde58d2b8575","Type":"ContainerStarted","Data":"3771e211c4622c34ab79b70dfeb1f71fa5db5ff11d2cf2872548b4ea6a33e155"} Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.891653 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/25891e09-f495-4de3-b5d2-496188bf0981-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-sdz75\" (UID: \"25891e09-f495-4de3-b5d2-496188bf0981\") " pod="openstack/dnsmasq-dns-57d769cc4f-sdz75" Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.891726 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dcn7x\" (UniqueName: \"kubernetes.io/projected/25891e09-f495-4de3-b5d2-496188bf0981-kube-api-access-dcn7x\") pod \"dnsmasq-dns-57d769cc4f-sdz75\" (UID: \"25891e09-f495-4de3-b5d2-496188bf0981\") " pod="openstack/dnsmasq-dns-57d769cc4f-sdz75" Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.891760 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25891e09-f495-4de3-b5d2-496188bf0981-config\") pod \"dnsmasq-dns-57d769cc4f-sdz75\" (UID: \"25891e09-f495-4de3-b5d2-496188bf0981\") " pod="openstack/dnsmasq-dns-57d769cc4f-sdz75" Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.892747 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25891e09-f495-4de3-b5d2-496188bf0981-config\") pod \"dnsmasq-dns-57d769cc4f-sdz75\" (UID: \"25891e09-f495-4de3-b5d2-496188bf0981\") " pod="openstack/dnsmasq-dns-57d769cc4f-sdz75" Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.894972 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/25891e09-f495-4de3-b5d2-496188bf0981-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-sdz75\" (UID: \"25891e09-f495-4de3-b5d2-496188bf0981\") " pod="openstack/dnsmasq-dns-57d769cc4f-sdz75" Nov 23 14:58:36 crc kubenswrapper[5050]: I1123 14:58:36.915581 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dcn7x\" (UniqueName: \"kubernetes.io/projected/25891e09-f495-4de3-b5d2-496188bf0981-kube-api-access-dcn7x\") pod \"dnsmasq-dns-57d769cc4f-sdz75\" (UID: \"25891e09-f495-4de3-b5d2-496188bf0981\") " pod="openstack/dnsmasq-dns-57d769cc4f-sdz75" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.060102 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-sdz75" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.277821 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-pc2bl"] Nov 23 14:58:37 crc kubenswrapper[5050]: W1123 14:58:37.288052 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod99bf0fd0_57d8_459a_a6b1_f538e878af9b.slice/crio-48476f6a79a2bcbf275051d08312278b537ac375b8765449d910029cb3b4f4bf WatchSource:0}: Error finding container 48476f6a79a2bcbf275051d08312278b537ac375b8765449d910029cb3b4f4bf: Status 404 returned error can't find the container with id 48476f6a79a2bcbf275051d08312278b537ac375b8765449d910029cb3b4f4bf Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.515050 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.518755 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.529550 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.529733 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.529933 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.530191 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.530415 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-sbgxw" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.530625 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.530784 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.540188 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.560236 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-sdz75"] Nov 23 14:58:37 crc kubenswrapper[5050]: W1123 14:58:37.593060 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod25891e09_f495_4de3_b5d2_496188bf0981.slice/crio-05986b26b5382e18fe6493916074f8fc0cb73d0adcf2a5dfb725445c9886395b WatchSource:0}: Error finding container 05986b26b5382e18fe6493916074f8fc0cb73d0adcf2a5dfb725445c9886395b: Status 404 returned error can't find the container with id 05986b26b5382e18fe6493916074f8fc0cb73d0adcf2a5dfb725445c9886395b Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.706747 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.706814 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f92353db-5352-4216-ad2d-50242e58dfb7-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.707076 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f92353db-5352-4216-ad2d-50242e58dfb7-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.707219 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7vgm\" (UniqueName: \"kubernetes.io/projected/f92353db-5352-4216-ad2d-50242e58dfb7-kube-api-access-q7vgm\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.707263 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f92353db-5352-4216-ad2d-50242e58dfb7-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.707304 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f92353db-5352-4216-ad2d-50242e58dfb7-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.707392 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.707540 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.707587 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.707609 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f92353db-5352-4216-ad2d-50242e58dfb7-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.707646 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f92353db-5352-4216-ad2d-50242e58dfb7-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.809307 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.809375 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f92353db-5352-4216-ad2d-50242e58dfb7-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.809418 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f92353db-5352-4216-ad2d-50242e58dfb7-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.809462 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7vgm\" (UniqueName: \"kubernetes.io/projected/f92353db-5352-4216-ad2d-50242e58dfb7-kube-api-access-q7vgm\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.809491 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f92353db-5352-4216-ad2d-50242e58dfb7-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.809515 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f92353db-5352-4216-ad2d-50242e58dfb7-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.809567 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.809605 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.809627 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.809645 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f92353db-5352-4216-ad2d-50242e58dfb7-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.809673 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f92353db-5352-4216-ad2d-50242e58dfb7-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.810307 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f92353db-5352-4216-ad2d-50242e58dfb7-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.810651 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f92353db-5352-4216-ad2d-50242e58dfb7-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.810786 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.810882 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.811490 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.812542 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.818418 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f92353db-5352-4216-ad2d-50242e58dfb7-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.822512 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f92353db-5352-4216-ad2d-50242e58dfb7-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.822591 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f92353db-5352-4216-ad2d-50242e58dfb7-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.822766 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f92353db-5352-4216-ad2d-50242e58dfb7-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.836772 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.839983 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7vgm\" (UniqueName: \"kubernetes.io/projected/f92353db-5352-4216-ad2d-50242e58dfb7-kube-api-access-q7vgm\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.856598 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.866055 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.867476 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.870683 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.870696 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.875869 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.881207 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-qfp52" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.882202 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.897630 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.921656 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.940487 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-pc2bl" event={"ID":"99bf0fd0-57d8-459a-a6b1-f538e878af9b","Type":"ContainerStarted","Data":"48476f6a79a2bcbf275051d08312278b537ac375b8765449d910029cb3b4f4bf"} Nov 23 14:58:37 crc kubenswrapper[5050]: I1123 14:58:37.946426 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-sdz75" event={"ID":"25891e09-f495-4de3-b5d2-496188bf0981","Type":"ContainerStarted","Data":"05986b26b5382e18fe6493916074f8fc0cb73d0adcf2a5dfb725445c9886395b"} Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.025284 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5eff539e-c641-4873-aeae-450aaf0b4ac8-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.025721 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n788x\" (UniqueName: \"kubernetes.io/projected/5eff539e-c641-4873-aeae-450aaf0b4ac8-kube-api-access-n788x\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.025927 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-server-conf\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.026099 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5eff539e-c641-4873-aeae-450aaf0b4ac8-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.026178 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-config-data\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.026262 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5eff539e-c641-4873-aeae-450aaf0b4ac8-pod-info\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.026309 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5eff539e-c641-4873-aeae-450aaf0b4ac8-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.026332 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.026367 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5eff539e-c641-4873-aeae-450aaf0b4ac8-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.026388 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.026413 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5eff539e-c641-4873-aeae-450aaf0b4ac8-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.128403 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-server-conf\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.128487 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5eff539e-c641-4873-aeae-450aaf0b4ac8-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.128526 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-config-data\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.128557 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5eff539e-c641-4873-aeae-450aaf0b4ac8-pod-info\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.128587 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5eff539e-c641-4873-aeae-450aaf0b4ac8-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.128609 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.128634 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5eff539e-c641-4873-aeae-450aaf0b4ac8-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.128656 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.128674 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5eff539e-c641-4873-aeae-450aaf0b4ac8-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.128701 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5eff539e-c641-4873-aeae-450aaf0b4ac8-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.128729 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n788x\" (UniqueName: \"kubernetes.io/projected/5eff539e-c641-4873-aeae-450aaf0b4ac8-kube-api-access-n788x\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.130393 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-server-conf\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.130808 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.131623 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5eff539e-c641-4873-aeae-450aaf0b4ac8-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.132295 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-config-data\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.132886 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.134972 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5eff539e-c641-4873-aeae-450aaf0b4ac8-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.137716 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5eff539e-c641-4873-aeae-450aaf0b4ac8-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.138547 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5eff539e-c641-4873-aeae-450aaf0b4ac8-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.144258 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5eff539e-c641-4873-aeae-450aaf0b4ac8-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.150952 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n788x\" (UniqueName: \"kubernetes.io/projected/5eff539e-c641-4873-aeae-450aaf0b4ac8-kube-api-access-n788x\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.156831 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5eff539e-c641-4873-aeae-450aaf0b4ac8-pod-info\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.157089 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.178857 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.304496 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.746548 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 23 14:58:38 crc kubenswrapper[5050]: I1123 14:58:38.989178 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f92353db-5352-4216-ad2d-50242e58dfb7","Type":"ContainerStarted","Data":"3acf3f6a01bc622686debb83e7eda59f45ddb4da24b93ae3a1ab1bb2601f52b5"} Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.058095 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 23 14:58:39 crc kubenswrapper[5050]: W1123 14:58:39.089395 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5eff539e_c641_4873_aeae_450aaf0b4ac8.slice/crio-dde128d8d93aaff6ac1a522f7aa4b2c60615a85aeddbadd7ab21a071bac2480a WatchSource:0}: Error finding container dde128d8d93aaff6ac1a522f7aa4b2c60615a85aeddbadd7ab21a071bac2480a: Status 404 returned error can't find the container with id dde128d8d93aaff6ac1a522f7aa4b2c60615a85aeddbadd7ab21a071bac2480a Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.117223 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.130207 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.132756 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.137571 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-lrxnn" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.137896 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.144694 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.146675 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.162623 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.271296 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0a256986-024f-45e6-9b86-b5a724ab5f6e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " pod="openstack/openstack-galera-0" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.271345 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " pod="openstack/openstack-galera-0" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.271554 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0a256986-024f-45e6-9b86-b5a724ab5f6e-kolla-config\") pod \"openstack-galera-0\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " pod="openstack/openstack-galera-0" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.271706 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a256986-024f-45e6-9b86-b5a724ab5f6e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " pod="openstack/openstack-galera-0" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.271736 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0a256986-024f-45e6-9b86-b5a724ab5f6e-config-data-default\") pod \"openstack-galera-0\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " pod="openstack/openstack-galera-0" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.271784 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pmfpm\" (UniqueName: \"kubernetes.io/projected/0a256986-024f-45e6-9b86-b5a724ab5f6e-kube-api-access-pmfpm\") pod \"openstack-galera-0\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " pod="openstack/openstack-galera-0" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.271823 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a256986-024f-45e6-9b86-b5a724ab5f6e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " pod="openstack/openstack-galera-0" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.271844 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a256986-024f-45e6-9b86-b5a724ab5f6e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " pod="openstack/openstack-galera-0" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.376114 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0a256986-024f-45e6-9b86-b5a724ab5f6e-kolla-config\") pod \"openstack-galera-0\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " pod="openstack/openstack-galera-0" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.376201 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0a256986-024f-45e6-9b86-b5a724ab5f6e-config-data-default\") pod \"openstack-galera-0\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " pod="openstack/openstack-galera-0" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.376224 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a256986-024f-45e6-9b86-b5a724ab5f6e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " pod="openstack/openstack-galera-0" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.376263 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pmfpm\" (UniqueName: \"kubernetes.io/projected/0a256986-024f-45e6-9b86-b5a724ab5f6e-kube-api-access-pmfpm\") pod \"openstack-galera-0\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " pod="openstack/openstack-galera-0" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.376291 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a256986-024f-45e6-9b86-b5a724ab5f6e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " pod="openstack/openstack-galera-0" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.376318 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a256986-024f-45e6-9b86-b5a724ab5f6e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " pod="openstack/openstack-galera-0" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.376363 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0a256986-024f-45e6-9b86-b5a724ab5f6e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " pod="openstack/openstack-galera-0" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.376390 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " pod="openstack/openstack-galera-0" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.376932 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/openstack-galera-0" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.378544 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0a256986-024f-45e6-9b86-b5a724ab5f6e-config-data-default\") pod \"openstack-galera-0\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " pod="openstack/openstack-galera-0" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.380034 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0a256986-024f-45e6-9b86-b5a724ab5f6e-kolla-config\") pod \"openstack-galera-0\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " pod="openstack/openstack-galera-0" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.380467 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a256986-024f-45e6-9b86-b5a724ab5f6e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " pod="openstack/openstack-galera-0" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.380798 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0a256986-024f-45e6-9b86-b5a724ab5f6e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " pod="openstack/openstack-galera-0" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.389417 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a256986-024f-45e6-9b86-b5a724ab5f6e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " pod="openstack/openstack-galera-0" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.390075 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a256986-024f-45e6-9b86-b5a724ab5f6e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " pod="openstack/openstack-galera-0" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.396912 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pmfpm\" (UniqueName: \"kubernetes.io/projected/0a256986-024f-45e6-9b86-b5a724ab5f6e-kube-api-access-pmfpm\") pod \"openstack-galera-0\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " pod="openstack/openstack-galera-0" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.405914 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " pod="openstack/openstack-galera-0" Nov 23 14:58:39 crc kubenswrapper[5050]: I1123 14:58:39.475058 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.007463 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5eff539e-c641-4873-aeae-450aaf0b4ac8","Type":"ContainerStarted","Data":"dde128d8d93aaff6ac1a522f7aa4b2c60615a85aeddbadd7ab21a071bac2480a"} Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.093416 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.459744 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.461801 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.466159 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.466309 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-8fpn5" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.466995 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.467397 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.468156 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.604342 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/85ca597a-bf71-43bb-b57c-f840b37f196f-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " pod="openstack/openstack-cell1-galera-0" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.604399 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/85ca597a-bf71-43bb-b57c-f840b37f196f-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " pod="openstack/openstack-cell1-galera-0" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.604437 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/85ca597a-bf71-43bb-b57c-f840b37f196f-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " pod="openstack/openstack-cell1-galera-0" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.604472 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzhml\" (UniqueName: \"kubernetes.io/projected/85ca597a-bf71-43bb-b57c-f840b37f196f-kube-api-access-pzhml\") pod \"openstack-cell1-galera-0\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " pod="openstack/openstack-cell1-galera-0" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.604731 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/85ca597a-bf71-43bb-b57c-f840b37f196f-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " pod="openstack/openstack-cell1-galera-0" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.604810 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " pod="openstack/openstack-cell1-galera-0" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.604951 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/85ca597a-bf71-43bb-b57c-f840b37f196f-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " pod="openstack/openstack-cell1-galera-0" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.605033 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85ca597a-bf71-43bb-b57c-f840b37f196f-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " pod="openstack/openstack-cell1-galera-0" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.706519 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/85ca597a-bf71-43bb-b57c-f840b37f196f-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " pod="openstack/openstack-cell1-galera-0" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.706596 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pzhml\" (UniqueName: \"kubernetes.io/projected/85ca597a-bf71-43bb-b57c-f840b37f196f-kube-api-access-pzhml\") pod \"openstack-cell1-galera-0\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " pod="openstack/openstack-cell1-galera-0" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.706651 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/85ca597a-bf71-43bb-b57c-f840b37f196f-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " pod="openstack/openstack-cell1-galera-0" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.706684 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " pod="openstack/openstack-cell1-galera-0" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.706729 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/85ca597a-bf71-43bb-b57c-f840b37f196f-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " pod="openstack/openstack-cell1-galera-0" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.706761 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85ca597a-bf71-43bb-b57c-f840b37f196f-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " pod="openstack/openstack-cell1-galera-0" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.706811 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/85ca597a-bf71-43bb-b57c-f840b37f196f-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " pod="openstack/openstack-cell1-galera-0" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.706830 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/85ca597a-bf71-43bb-b57c-f840b37f196f-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " pod="openstack/openstack-cell1-galera-0" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.707423 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/85ca597a-bf71-43bb-b57c-f840b37f196f-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " pod="openstack/openstack-cell1-galera-0" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.709179 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/openstack-cell1-galera-0" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.710355 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/85ca597a-bf71-43bb-b57c-f840b37f196f-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " pod="openstack/openstack-cell1-galera-0" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.710747 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/85ca597a-bf71-43bb-b57c-f840b37f196f-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " pod="openstack/openstack-cell1-galera-0" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.712545 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/85ca597a-bf71-43bb-b57c-f840b37f196f-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " pod="openstack/openstack-cell1-galera-0" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.718749 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/85ca597a-bf71-43bb-b57c-f840b37f196f-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " pod="openstack/openstack-cell1-galera-0" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.719055 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85ca597a-bf71-43bb-b57c-f840b37f196f-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " pod="openstack/openstack-cell1-galera-0" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.737665 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " pod="openstack/openstack-cell1-galera-0" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.749090 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzhml\" (UniqueName: \"kubernetes.io/projected/85ca597a-bf71-43bb-b57c-f840b37f196f-kube-api-access-pzhml\") pod \"openstack-cell1-galera-0\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " pod="openstack/openstack-cell1-galera-0" Nov 23 14:58:40 crc kubenswrapper[5050]: I1123 14:58:40.787616 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 23 14:58:41 crc kubenswrapper[5050]: I1123 14:58:41.021025 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"0a256986-024f-45e6-9b86-b5a724ab5f6e","Type":"ContainerStarted","Data":"cc769eb507d83ac2fbe0b6d010133c9cf92815a976f80934d722bc938295d965"} Nov 23 14:58:41 crc kubenswrapper[5050]: I1123 14:58:41.080246 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 23 14:58:41 crc kubenswrapper[5050]: I1123 14:58:41.085629 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 23 14:58:41 crc kubenswrapper[5050]: I1123 14:58:41.106776 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-hj2mw" Nov 23 14:58:41 crc kubenswrapper[5050]: I1123 14:58:41.107494 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 23 14:58:41 crc kubenswrapper[5050]: I1123 14:58:41.111572 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 23 14:58:41 crc kubenswrapper[5050]: I1123 14:58:41.120331 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 23 14:58:41 crc kubenswrapper[5050]: I1123 14:58:41.214375 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-config-data\") pod \"memcached-0\" (UID: \"d1ce97bd-1ff7-400e-a741-7e757fbd7e17\") " pod="openstack/memcached-0" Nov 23 14:58:41 crc kubenswrapper[5050]: I1123 14:58:41.214423 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-combined-ca-bundle\") pod \"memcached-0\" (UID: \"d1ce97bd-1ff7-400e-a741-7e757fbd7e17\") " pod="openstack/memcached-0" Nov 23 14:58:41 crc kubenswrapper[5050]: I1123 14:58:41.214483 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-memcached-tls-certs\") pod \"memcached-0\" (UID: \"d1ce97bd-1ff7-400e-a741-7e757fbd7e17\") " pod="openstack/memcached-0" Nov 23 14:58:41 crc kubenswrapper[5050]: I1123 14:58:41.214507 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rg5xn\" (UniqueName: \"kubernetes.io/projected/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-kube-api-access-rg5xn\") pod \"memcached-0\" (UID: \"d1ce97bd-1ff7-400e-a741-7e757fbd7e17\") " pod="openstack/memcached-0" Nov 23 14:58:41 crc kubenswrapper[5050]: I1123 14:58:41.214571 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-kolla-config\") pod \"memcached-0\" (UID: \"d1ce97bd-1ff7-400e-a741-7e757fbd7e17\") " pod="openstack/memcached-0" Nov 23 14:58:41 crc kubenswrapper[5050]: I1123 14:58:41.318871 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-memcached-tls-certs\") pod \"memcached-0\" (UID: \"d1ce97bd-1ff7-400e-a741-7e757fbd7e17\") " pod="openstack/memcached-0" Nov 23 14:58:41 crc kubenswrapper[5050]: I1123 14:58:41.319181 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rg5xn\" (UniqueName: \"kubernetes.io/projected/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-kube-api-access-rg5xn\") pod \"memcached-0\" (UID: \"d1ce97bd-1ff7-400e-a741-7e757fbd7e17\") " pod="openstack/memcached-0" Nov 23 14:58:41 crc kubenswrapper[5050]: I1123 14:58:41.319254 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-kolla-config\") pod \"memcached-0\" (UID: \"d1ce97bd-1ff7-400e-a741-7e757fbd7e17\") " pod="openstack/memcached-0" Nov 23 14:58:41 crc kubenswrapper[5050]: I1123 14:58:41.319311 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-config-data\") pod \"memcached-0\" (UID: \"d1ce97bd-1ff7-400e-a741-7e757fbd7e17\") " pod="openstack/memcached-0" Nov 23 14:58:41 crc kubenswrapper[5050]: I1123 14:58:41.319330 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-combined-ca-bundle\") pod \"memcached-0\" (UID: \"d1ce97bd-1ff7-400e-a741-7e757fbd7e17\") " pod="openstack/memcached-0" Nov 23 14:58:41 crc kubenswrapper[5050]: I1123 14:58:41.322261 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-kolla-config\") pod \"memcached-0\" (UID: \"d1ce97bd-1ff7-400e-a741-7e757fbd7e17\") " pod="openstack/memcached-0" Nov 23 14:58:41 crc kubenswrapper[5050]: I1123 14:58:41.322854 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-config-data\") pod \"memcached-0\" (UID: \"d1ce97bd-1ff7-400e-a741-7e757fbd7e17\") " pod="openstack/memcached-0" Nov 23 14:58:41 crc kubenswrapper[5050]: I1123 14:58:41.329737 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-combined-ca-bundle\") pod \"memcached-0\" (UID: \"d1ce97bd-1ff7-400e-a741-7e757fbd7e17\") " pod="openstack/memcached-0" Nov 23 14:58:41 crc kubenswrapper[5050]: I1123 14:58:41.339037 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-memcached-tls-certs\") pod \"memcached-0\" (UID: \"d1ce97bd-1ff7-400e-a741-7e757fbd7e17\") " pod="openstack/memcached-0" Nov 23 14:58:41 crc kubenswrapper[5050]: I1123 14:58:41.385021 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rg5xn\" (UniqueName: \"kubernetes.io/projected/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-kube-api-access-rg5xn\") pod \"memcached-0\" (UID: \"d1ce97bd-1ff7-400e-a741-7e757fbd7e17\") " pod="openstack/memcached-0" Nov 23 14:58:41 crc kubenswrapper[5050]: I1123 14:58:41.395463 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 23 14:58:41 crc kubenswrapper[5050]: I1123 14:58:41.426893 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 23 14:58:41 crc kubenswrapper[5050]: I1123 14:58:41.974183 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 23 14:58:42 crc kubenswrapper[5050]: I1123 14:58:42.046008 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"d1ce97bd-1ff7-400e-a741-7e757fbd7e17","Type":"ContainerStarted","Data":"bdf4f973d1ec653e97e4cd376f6610129add9f44484993c9530763194b55afac"} Nov 23 14:58:42 crc kubenswrapper[5050]: I1123 14:58:42.050264 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"85ca597a-bf71-43bb-b57c-f840b37f196f","Type":"ContainerStarted","Data":"4b00faf0a87aa75a385c4aba5cd2b739db65e2441c80af82db041006c53e48cf"} Nov 23 14:58:42 crc kubenswrapper[5050]: I1123 14:58:42.674952 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 23 14:58:42 crc kubenswrapper[5050]: I1123 14:58:42.676144 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 23 14:58:42 crc kubenswrapper[5050]: I1123 14:58:42.681542 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-bmhjg" Nov 23 14:58:42 crc kubenswrapper[5050]: I1123 14:58:42.684609 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 23 14:58:42 crc kubenswrapper[5050]: I1123 14:58:42.769240 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d8dj5\" (UniqueName: \"kubernetes.io/projected/3782bc88-94a6-4434-bd20-a48caad31d31-kube-api-access-d8dj5\") pod \"kube-state-metrics-0\" (UID: \"3782bc88-94a6-4434-bd20-a48caad31d31\") " pod="openstack/kube-state-metrics-0" Nov 23 14:58:42 crc kubenswrapper[5050]: I1123 14:58:42.873356 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d8dj5\" (UniqueName: \"kubernetes.io/projected/3782bc88-94a6-4434-bd20-a48caad31d31-kube-api-access-d8dj5\") pod \"kube-state-metrics-0\" (UID: \"3782bc88-94a6-4434-bd20-a48caad31d31\") " pod="openstack/kube-state-metrics-0" Nov 23 14:58:42 crc kubenswrapper[5050]: I1123 14:58:42.901828 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d8dj5\" (UniqueName: \"kubernetes.io/projected/3782bc88-94a6-4434-bd20-a48caad31d31-kube-api-access-d8dj5\") pod \"kube-state-metrics-0\" (UID: \"3782bc88-94a6-4434-bd20-a48caad31d31\") " pod="openstack/kube-state-metrics-0" Nov 23 14:58:42 crc kubenswrapper[5050]: I1123 14:58:42.999276 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 23 14:58:43 crc kubenswrapper[5050]: I1123 14:58:43.492890 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 23 14:58:44 crc kubenswrapper[5050]: I1123 14:58:44.081820 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"3782bc88-94a6-4434-bd20-a48caad31d31","Type":"ContainerStarted","Data":"bf1e4171575efaa26d2cb4a06de28504e1cfb9153b7ac0e02def9b607bb0f494"} Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.517169 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-tbww8"] Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.519053 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-tbww8" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.522096 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.522212 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-m55fs" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.522381 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.524798 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-tbww8"] Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.530807 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-n9v8v"] Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.535809 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-n9v8v"] Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.536111 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-n9v8v" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.598098 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/33af3c2e-dea7-4448-8b28-b579d77490b9-ovn-controller-tls-certs\") pod \"ovn-controller-tbww8\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " pod="openstack/ovn-controller-tbww8" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.598184 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33af3c2e-dea7-4448-8b28-b579d77490b9-combined-ca-bundle\") pod \"ovn-controller-tbww8\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " pod="openstack/ovn-controller-tbww8" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.598233 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/33af3c2e-dea7-4448-8b28-b579d77490b9-var-run-ovn\") pod \"ovn-controller-tbww8\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " pod="openstack/ovn-controller-tbww8" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.598259 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/33af3c2e-dea7-4448-8b28-b579d77490b9-var-log-ovn\") pod \"ovn-controller-tbww8\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " pod="openstack/ovn-controller-tbww8" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.598354 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/33af3c2e-dea7-4448-8b28-b579d77490b9-var-run\") pod \"ovn-controller-tbww8\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " pod="openstack/ovn-controller-tbww8" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.598391 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/33af3c2e-dea7-4448-8b28-b579d77490b9-scripts\") pod \"ovn-controller-tbww8\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " pod="openstack/ovn-controller-tbww8" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.598428 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vh8k\" (UniqueName: \"kubernetes.io/projected/33af3c2e-dea7-4448-8b28-b579d77490b9-kube-api-access-2vh8k\") pod \"ovn-controller-tbww8\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " pod="openstack/ovn-controller-tbww8" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.701892 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33af3c2e-dea7-4448-8b28-b579d77490b9-combined-ca-bundle\") pod \"ovn-controller-tbww8\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " pod="openstack/ovn-controller-tbww8" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.702316 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/33af3c2e-dea7-4448-8b28-b579d77490b9-var-run-ovn\") pod \"ovn-controller-tbww8\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " pod="openstack/ovn-controller-tbww8" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.702340 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/33af3c2e-dea7-4448-8b28-b579d77490b9-var-log-ovn\") pod \"ovn-controller-tbww8\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " pod="openstack/ovn-controller-tbww8" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.702375 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c21366ac-a7da-4cac-8332-753659210595-etc-ovs\") pod \"ovn-controller-ovs-n9v8v\" (UID: \"c21366ac-a7da-4cac-8332-753659210595\") " pod="openstack/ovn-controller-ovs-n9v8v" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.702486 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c21366ac-a7da-4cac-8332-753659210595-var-log\") pod \"ovn-controller-ovs-n9v8v\" (UID: \"c21366ac-a7da-4cac-8332-753659210595\") " pod="openstack/ovn-controller-ovs-n9v8v" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.702602 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c21366ac-a7da-4cac-8332-753659210595-var-run\") pod \"ovn-controller-ovs-n9v8v\" (UID: \"c21366ac-a7da-4cac-8332-753659210595\") " pod="openstack/ovn-controller-ovs-n9v8v" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.702852 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/33af3c2e-dea7-4448-8b28-b579d77490b9-var-run\") pod \"ovn-controller-tbww8\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " pod="openstack/ovn-controller-tbww8" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.702897 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b7cr7\" (UniqueName: \"kubernetes.io/projected/c21366ac-a7da-4cac-8332-753659210595-kube-api-access-b7cr7\") pod \"ovn-controller-ovs-n9v8v\" (UID: \"c21366ac-a7da-4cac-8332-753659210595\") " pod="openstack/ovn-controller-ovs-n9v8v" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.702933 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/33af3c2e-dea7-4448-8b28-b579d77490b9-scripts\") pod \"ovn-controller-tbww8\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " pod="openstack/ovn-controller-tbww8" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.703069 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vh8k\" (UniqueName: \"kubernetes.io/projected/33af3c2e-dea7-4448-8b28-b579d77490b9-kube-api-access-2vh8k\") pod \"ovn-controller-tbww8\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " pod="openstack/ovn-controller-tbww8" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.703147 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/33af3c2e-dea7-4448-8b28-b579d77490b9-var-run-ovn\") pod \"ovn-controller-tbww8\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " pod="openstack/ovn-controller-tbww8" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.703188 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c21366ac-a7da-4cac-8332-753659210595-var-lib\") pod \"ovn-controller-ovs-n9v8v\" (UID: \"c21366ac-a7da-4cac-8332-753659210595\") " pod="openstack/ovn-controller-ovs-n9v8v" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.703232 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/33af3c2e-dea7-4448-8b28-b579d77490b9-var-log-ovn\") pod \"ovn-controller-tbww8\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " pod="openstack/ovn-controller-tbww8" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.703285 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c21366ac-a7da-4cac-8332-753659210595-scripts\") pod \"ovn-controller-ovs-n9v8v\" (UID: \"c21366ac-a7da-4cac-8332-753659210595\") " pod="openstack/ovn-controller-ovs-n9v8v" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.703607 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/33af3c2e-dea7-4448-8b28-b579d77490b9-ovn-controller-tls-certs\") pod \"ovn-controller-tbww8\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " pod="openstack/ovn-controller-tbww8" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.703918 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/33af3c2e-dea7-4448-8b28-b579d77490b9-var-run\") pod \"ovn-controller-tbww8\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " pod="openstack/ovn-controller-tbww8" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.706368 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/33af3c2e-dea7-4448-8b28-b579d77490b9-scripts\") pod \"ovn-controller-tbww8\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " pod="openstack/ovn-controller-tbww8" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.709605 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33af3c2e-dea7-4448-8b28-b579d77490b9-combined-ca-bundle\") pod \"ovn-controller-tbww8\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " pod="openstack/ovn-controller-tbww8" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.710304 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/33af3c2e-dea7-4448-8b28-b579d77490b9-ovn-controller-tls-certs\") pod \"ovn-controller-tbww8\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " pod="openstack/ovn-controller-tbww8" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.723949 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vh8k\" (UniqueName: \"kubernetes.io/projected/33af3c2e-dea7-4448-8b28-b579d77490b9-kube-api-access-2vh8k\") pod \"ovn-controller-tbww8\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " pod="openstack/ovn-controller-tbww8" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.805104 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c21366ac-a7da-4cac-8332-753659210595-etc-ovs\") pod \"ovn-controller-ovs-n9v8v\" (UID: \"c21366ac-a7da-4cac-8332-753659210595\") " pod="openstack/ovn-controller-ovs-n9v8v" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.805173 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c21366ac-a7da-4cac-8332-753659210595-var-log\") pod \"ovn-controller-ovs-n9v8v\" (UID: \"c21366ac-a7da-4cac-8332-753659210595\") " pod="openstack/ovn-controller-ovs-n9v8v" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.805228 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c21366ac-a7da-4cac-8332-753659210595-var-run\") pod \"ovn-controller-ovs-n9v8v\" (UID: \"c21366ac-a7da-4cac-8332-753659210595\") " pod="openstack/ovn-controller-ovs-n9v8v" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.805269 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b7cr7\" (UniqueName: \"kubernetes.io/projected/c21366ac-a7da-4cac-8332-753659210595-kube-api-access-b7cr7\") pod \"ovn-controller-ovs-n9v8v\" (UID: \"c21366ac-a7da-4cac-8332-753659210595\") " pod="openstack/ovn-controller-ovs-n9v8v" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.805324 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c21366ac-a7da-4cac-8332-753659210595-var-lib\") pod \"ovn-controller-ovs-n9v8v\" (UID: \"c21366ac-a7da-4cac-8332-753659210595\") " pod="openstack/ovn-controller-ovs-n9v8v" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.805354 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c21366ac-a7da-4cac-8332-753659210595-scripts\") pod \"ovn-controller-ovs-n9v8v\" (UID: \"c21366ac-a7da-4cac-8332-753659210595\") " pod="openstack/ovn-controller-ovs-n9v8v" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.805579 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c21366ac-a7da-4cac-8332-753659210595-etc-ovs\") pod \"ovn-controller-ovs-n9v8v\" (UID: \"c21366ac-a7da-4cac-8332-753659210595\") " pod="openstack/ovn-controller-ovs-n9v8v" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.805669 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c21366ac-a7da-4cac-8332-753659210595-var-run\") pod \"ovn-controller-ovs-n9v8v\" (UID: \"c21366ac-a7da-4cac-8332-753659210595\") " pod="openstack/ovn-controller-ovs-n9v8v" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.805766 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c21366ac-a7da-4cac-8332-753659210595-var-log\") pod \"ovn-controller-ovs-n9v8v\" (UID: \"c21366ac-a7da-4cac-8332-753659210595\") " pod="openstack/ovn-controller-ovs-n9v8v" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.806231 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c21366ac-a7da-4cac-8332-753659210595-var-lib\") pod \"ovn-controller-ovs-n9v8v\" (UID: \"c21366ac-a7da-4cac-8332-753659210595\") " pod="openstack/ovn-controller-ovs-n9v8v" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.807590 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c21366ac-a7da-4cac-8332-753659210595-scripts\") pod \"ovn-controller-ovs-n9v8v\" (UID: \"c21366ac-a7da-4cac-8332-753659210595\") " pod="openstack/ovn-controller-ovs-n9v8v" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.824257 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b7cr7\" (UniqueName: \"kubernetes.io/projected/c21366ac-a7da-4cac-8332-753659210595-kube-api-access-b7cr7\") pod \"ovn-controller-ovs-n9v8v\" (UID: \"c21366ac-a7da-4cac-8332-753659210595\") " pod="openstack/ovn-controller-ovs-n9v8v" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.850745 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-tbww8" Nov 23 14:58:46 crc kubenswrapper[5050]: I1123 14:58:46.861526 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-n9v8v" Nov 23 14:58:47 crc kubenswrapper[5050]: I1123 14:58:47.963657 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 23 14:58:47 crc kubenswrapper[5050]: I1123 14:58:47.966181 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 23 14:58:47 crc kubenswrapper[5050]: I1123 14:58:47.972248 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 23 14:58:47 crc kubenswrapper[5050]: I1123 14:58:47.972598 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 23 14:58:47 crc kubenswrapper[5050]: I1123 14:58:47.972749 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 23 14:58:47 crc kubenswrapper[5050]: I1123 14:58:47.976785 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 23 14:58:47 crc kubenswrapper[5050]: I1123 14:58:47.976859 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-hsvnv" Nov 23 14:58:48 crc kubenswrapper[5050]: I1123 14:58:48.000295 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 23 14:58:48 crc kubenswrapper[5050]: I1123 14:58:48.147550 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f81d4e06-4245-4355-89d6-ddc438c06f29-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " pod="openstack/ovsdbserver-sb-0" Nov 23 14:58:48 crc kubenswrapper[5050]: I1123 14:58:48.147620 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " pod="openstack/ovsdbserver-sb-0" Nov 23 14:58:48 crc kubenswrapper[5050]: I1123 14:58:48.147806 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f81d4e06-4245-4355-89d6-ddc438c06f29-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " pod="openstack/ovsdbserver-sb-0" Nov 23 14:58:48 crc kubenswrapper[5050]: I1123 14:58:48.147849 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6n2xs\" (UniqueName: \"kubernetes.io/projected/f81d4e06-4245-4355-89d6-ddc438c06f29-kube-api-access-6n2xs\") pod \"ovsdbserver-sb-0\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " pod="openstack/ovsdbserver-sb-0" Nov 23 14:58:48 crc kubenswrapper[5050]: I1123 14:58:48.147879 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f81d4e06-4245-4355-89d6-ddc438c06f29-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " pod="openstack/ovsdbserver-sb-0" Nov 23 14:58:48 crc kubenswrapper[5050]: I1123 14:58:48.147919 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f81d4e06-4245-4355-89d6-ddc438c06f29-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " pod="openstack/ovsdbserver-sb-0" Nov 23 14:58:48 crc kubenswrapper[5050]: I1123 14:58:48.147996 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f81d4e06-4245-4355-89d6-ddc438c06f29-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " pod="openstack/ovsdbserver-sb-0" Nov 23 14:58:48 crc kubenswrapper[5050]: I1123 14:58:48.148035 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f81d4e06-4245-4355-89d6-ddc438c06f29-config\") pod \"ovsdbserver-sb-0\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " pod="openstack/ovsdbserver-sb-0" Nov 23 14:58:48 crc kubenswrapper[5050]: I1123 14:58:48.249470 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f81d4e06-4245-4355-89d6-ddc438c06f29-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " pod="openstack/ovsdbserver-sb-0" Nov 23 14:58:48 crc kubenswrapper[5050]: I1123 14:58:48.249535 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " pod="openstack/ovsdbserver-sb-0" Nov 23 14:58:48 crc kubenswrapper[5050]: I1123 14:58:48.249683 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f81d4e06-4245-4355-89d6-ddc438c06f29-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " pod="openstack/ovsdbserver-sb-0" Nov 23 14:58:48 crc kubenswrapper[5050]: I1123 14:58:48.249720 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6n2xs\" (UniqueName: \"kubernetes.io/projected/f81d4e06-4245-4355-89d6-ddc438c06f29-kube-api-access-6n2xs\") pod \"ovsdbserver-sb-0\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " pod="openstack/ovsdbserver-sb-0" Nov 23 14:58:48 crc kubenswrapper[5050]: I1123 14:58:48.249748 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f81d4e06-4245-4355-89d6-ddc438c06f29-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " pod="openstack/ovsdbserver-sb-0" Nov 23 14:58:48 crc kubenswrapper[5050]: I1123 14:58:48.249781 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f81d4e06-4245-4355-89d6-ddc438c06f29-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " pod="openstack/ovsdbserver-sb-0" Nov 23 14:58:48 crc kubenswrapper[5050]: I1123 14:58:48.249841 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f81d4e06-4245-4355-89d6-ddc438c06f29-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " pod="openstack/ovsdbserver-sb-0" Nov 23 14:58:48 crc kubenswrapper[5050]: I1123 14:58:48.249865 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f81d4e06-4245-4355-89d6-ddc438c06f29-config\") pod \"ovsdbserver-sb-0\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " pod="openstack/ovsdbserver-sb-0" Nov 23 14:58:48 crc kubenswrapper[5050]: I1123 14:58:48.250119 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/ovsdbserver-sb-0" Nov 23 14:58:48 crc kubenswrapper[5050]: I1123 14:58:48.250369 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f81d4e06-4245-4355-89d6-ddc438c06f29-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " pod="openstack/ovsdbserver-sb-0" Nov 23 14:58:48 crc kubenswrapper[5050]: I1123 14:58:48.250777 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f81d4e06-4245-4355-89d6-ddc438c06f29-config\") pod \"ovsdbserver-sb-0\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " pod="openstack/ovsdbserver-sb-0" Nov 23 14:58:48 crc kubenswrapper[5050]: I1123 14:58:48.251571 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f81d4e06-4245-4355-89d6-ddc438c06f29-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " pod="openstack/ovsdbserver-sb-0" Nov 23 14:58:48 crc kubenswrapper[5050]: I1123 14:58:48.256279 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f81d4e06-4245-4355-89d6-ddc438c06f29-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " pod="openstack/ovsdbserver-sb-0" Nov 23 14:58:48 crc kubenswrapper[5050]: I1123 14:58:48.257576 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f81d4e06-4245-4355-89d6-ddc438c06f29-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " pod="openstack/ovsdbserver-sb-0" Nov 23 14:58:48 crc kubenswrapper[5050]: I1123 14:58:48.267845 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f81d4e06-4245-4355-89d6-ddc438c06f29-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " pod="openstack/ovsdbserver-sb-0" Nov 23 14:58:48 crc kubenswrapper[5050]: I1123 14:58:48.274863 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6n2xs\" (UniqueName: \"kubernetes.io/projected/f81d4e06-4245-4355-89d6-ddc438c06f29-kube-api-access-6n2xs\") pod \"ovsdbserver-sb-0\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " pod="openstack/ovsdbserver-sb-0" Nov 23 14:58:48 crc kubenswrapper[5050]: I1123 14:58:48.294949 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " pod="openstack/ovsdbserver-sb-0" Nov 23 14:58:48 crc kubenswrapper[5050]: I1123 14:58:48.593352 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.609545 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.612056 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.615542 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.615784 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-k24xf" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.615940 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.616185 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.618570 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.710902 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6t6p\" (UniqueName: \"kubernetes.io/projected/716a14cd-88b9-4e18-a781-6dbfaad7634c-kube-api-access-c6t6p\") pod \"ovsdbserver-nb-0\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.711042 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/716a14cd-88b9-4e18-a781-6dbfaad7634c-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.711077 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/716a14cd-88b9-4e18-a781-6dbfaad7634c-config\") pod \"ovsdbserver-nb-0\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.711121 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.711207 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/716a14cd-88b9-4e18-a781-6dbfaad7634c-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.711242 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/716a14cd-88b9-4e18-a781-6dbfaad7634c-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.711344 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/716a14cd-88b9-4e18-a781-6dbfaad7634c-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.711523 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/716a14cd-88b9-4e18-a781-6dbfaad7634c-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.812863 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6t6p\" (UniqueName: \"kubernetes.io/projected/716a14cd-88b9-4e18-a781-6dbfaad7634c-kube-api-access-c6t6p\") pod \"ovsdbserver-nb-0\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.812949 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/716a14cd-88b9-4e18-a781-6dbfaad7634c-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.812972 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/716a14cd-88b9-4e18-a781-6dbfaad7634c-config\") pod \"ovsdbserver-nb-0\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.813000 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.813037 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/716a14cd-88b9-4e18-a781-6dbfaad7634c-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.813060 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/716a14cd-88b9-4e18-a781-6dbfaad7634c-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.813093 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/716a14cd-88b9-4e18-a781-6dbfaad7634c-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.813138 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/716a14cd-88b9-4e18-a781-6dbfaad7634c-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.813730 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/716a14cd-88b9-4e18-a781-6dbfaad7634c-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.815666 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/ovsdbserver-nb-0" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.816074 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/716a14cd-88b9-4e18-a781-6dbfaad7634c-config\") pod \"ovsdbserver-nb-0\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.817075 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/716a14cd-88b9-4e18-a781-6dbfaad7634c-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.835656 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/716a14cd-88b9-4e18-a781-6dbfaad7634c-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.841323 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/716a14cd-88b9-4e18-a781-6dbfaad7634c-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.844233 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/716a14cd-88b9-4e18-a781-6dbfaad7634c-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.887107 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.887380 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6t6p\" (UniqueName: \"kubernetes.io/projected/716a14cd-88b9-4e18-a781-6dbfaad7634c-kube-api-access-c6t6p\") pod \"ovsdbserver-nb-0\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 14:58:49 crc kubenswrapper[5050]: I1123 14:58:49.977273 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 23 14:59:00 crc kubenswrapper[5050]: E1123 14:59:00.107112 5050 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 23 14:59:00 crc kubenswrapper[5050]: E1123 14:59:00.108296 5050 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-n788x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(5eff539e-c641-4873-aeae-450aaf0b4ac8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 23 14:59:00 crc kubenswrapper[5050]: E1123 14:59:00.109615 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="5eff539e-c641-4873-aeae-450aaf0b4ac8" Nov 23 14:59:00 crc kubenswrapper[5050]: E1123 14:59:00.141235 5050 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 23 14:59:00 crc kubenswrapper[5050]: E1123 14:59:00.141603 5050 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-q7vgm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(f92353db-5352-4216-ad2d-50242e58dfb7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 23 14:59:00 crc kubenswrapper[5050]: E1123 14:59:00.142993 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="f92353db-5352-4216-ad2d-50242e58dfb7" Nov 23 14:59:00 crc kubenswrapper[5050]: E1123 14:59:00.255104 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-0" podUID="5eff539e-c641-4873-aeae-450aaf0b4ac8" Nov 23 14:59:00 crc kubenswrapper[5050]: E1123 14:59:00.255948 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="f92353db-5352-4216-ad2d-50242e58dfb7" Nov 23 14:59:00 crc kubenswrapper[5050]: E1123 14:59:00.863099 5050 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-memcached:current-podified" Nov 23 14:59:00 crc kubenswrapper[5050]: E1123 14:59:00.863473 5050 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:memcached,Image:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,Command:[/usr/bin/dumb-init -- /usr/local/bin/kolla_start],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:memcached,HostPort:0,ContainerPort:11211,Protocol:TCP,HostIP:,},ContainerPort{Name:memcached-tls,HostPort:0,ContainerPort:11212,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:POD_IPS,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIPs,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:CONFIG_HASH,Value:n67fh78h5dch64ch589h88h78h59dh556hddh586h86h545h64dh676h5ddh547h5dbh549h67bh65bh689h687h594h5f4h5c8h5dch95h695h58h564h65bq,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/src,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/certs/memcached.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/private/memcached.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rg5xn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42457,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42457,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod memcached-0_openstack(d1ce97bd-1ff7-400e-a741-7e757fbd7e17): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 23 14:59:00 crc kubenswrapper[5050]: E1123 14:59:00.864967 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/memcached-0" podUID="d1ce97bd-1ff7-400e-a741-7e757fbd7e17" Nov 23 14:59:01 crc kubenswrapper[5050]: E1123 14:59:01.262571 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-memcached:current-podified\\\"\"" pod="openstack/memcached-0" podUID="d1ce97bd-1ff7-400e-a741-7e757fbd7e17" Nov 23 14:59:02 crc kubenswrapper[5050]: E1123 14:59:02.708951 5050 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Nov 23 14:59:02 crc kubenswrapper[5050]: E1123 14:59:02.709194 5050 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pzhml,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-cell1-galera-0_openstack(85ca597a-bf71-43bb-b57c-f840b37f196f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 23 14:59:02 crc kubenswrapper[5050]: E1123 14:59:02.710405 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-cell1-galera-0" podUID="85ca597a-bf71-43bb-b57c-f840b37f196f" Nov 23 14:59:02 crc kubenswrapper[5050]: E1123 14:59:02.713516 5050 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Nov 23 14:59:02 crc kubenswrapper[5050]: E1123 14:59:02.713860 5050 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pmfpm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(0a256986-024f-45e6-9b86-b5a724ab5f6e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 23 14:59:02 crc kubenswrapper[5050]: E1123 14:59:02.715026 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="0a256986-024f-45e6-9b86-b5a724ab5f6e" Nov 23 14:59:03 crc kubenswrapper[5050]: E1123 14:59:03.277963 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-cell1-galera-0" podUID="85ca597a-bf71-43bb-b57c-f840b37f196f" Nov 23 14:59:03 crc kubenswrapper[5050]: E1123 14:59:03.278057 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-galera-0" podUID="0a256986-024f-45e6-9b86-b5a724ab5f6e" Nov 23 14:59:07 crc kubenswrapper[5050]: E1123 14:59:07.814137 5050 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 23 14:59:07 crc kubenswrapper[5050]: E1123 14:59:07.815276 5050 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-58k2w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-jqz2l_openstack(e022ef9d-faf7-4dcb-87c8-cde58d2b8575): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 23 14:59:07 crc kubenswrapper[5050]: E1123 14:59:07.816411 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-jqz2l" podUID="e022ef9d-faf7-4dcb-87c8-cde58d2b8575" Nov 23 14:59:07 crc kubenswrapper[5050]: E1123 14:59:07.819662 5050 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 23 14:59:07 crc kubenswrapper[5050]: E1123 14:59:07.819851 5050 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nfdh5dfhb6h64h676hc4h78h97h669h54chfbh696hb5h54bh5d4h6bh64h644h677h584h5cbh698h9dh5bbh5f8h5b8hcdh644h5c7h694hbfh589q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gfj4l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-5ccc8479f9-pc2bl_openstack(99bf0fd0-57d8-459a-a6b1-f538e878af9b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 23 14:59:07 crc kubenswrapper[5050]: E1123 14:59:07.821822 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-5ccc8479f9-pc2bl" podUID="99bf0fd0-57d8-459a-a6b1-f538e878af9b" Nov 23 14:59:07 crc kubenswrapper[5050]: E1123 14:59:07.836849 5050 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 23 14:59:07 crc kubenswrapper[5050]: E1123 14:59:07.837046 5050 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jl52z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-gc5w9_openstack(6bae002e-fff8-41ed-8e1a-2f8b4f95393f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 23 14:59:07 crc kubenswrapper[5050]: E1123 14:59:07.838229 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-gc5w9" podUID="6bae002e-fff8-41ed-8e1a-2f8b4f95393f" Nov 23 14:59:08 crc kubenswrapper[5050]: E1123 14:59:08.197082 5050 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 23 14:59:08 crc kubenswrapper[5050]: E1123 14:59:08.197639 5050 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dcn7x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-sdz75_openstack(25891e09-f495-4de3-b5d2-496188bf0981): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 23 14:59:08 crc kubenswrapper[5050]: E1123 14:59:08.198799 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-sdz75" podUID="25891e09-f495-4de3-b5d2-496188bf0981" Nov 23 14:59:08 crc kubenswrapper[5050]: E1123 14:59:08.344578 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-5ccc8479f9-pc2bl" podUID="99bf0fd0-57d8-459a-a6b1-f538e878af9b" Nov 23 14:59:08 crc kubenswrapper[5050]: E1123 14:59:08.344642 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-sdz75" podUID="25891e09-f495-4de3-b5d2-496188bf0981" Nov 23 14:59:08 crc kubenswrapper[5050]: I1123 14:59:08.536792 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 23 14:59:08 crc kubenswrapper[5050]: I1123 14:59:08.700790 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-tbww8"] Nov 23 14:59:08 crc kubenswrapper[5050]: W1123 14:59:08.744832 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod33af3c2e_dea7_4448_8b28_b579d77490b9.slice/crio-c3195f1364743c6f2ef8f040c543b8a404bd64e7d77030ed0e05333791fa6afd WatchSource:0}: Error finding container c3195f1364743c6f2ef8f040c543b8a404bd64e7d77030ed0e05333791fa6afd: Status 404 returned error can't find the container with id c3195f1364743c6f2ef8f040c543b8a404bd64e7d77030ed0e05333791fa6afd Nov 23 14:59:09 crc kubenswrapper[5050]: I1123 14:59:09.002225 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-n9v8v"] Nov 23 14:59:09 crc kubenswrapper[5050]: I1123 14:59:09.079826 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 23 14:59:09 crc kubenswrapper[5050]: W1123 14:59:09.307588 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod716a14cd_88b9_4e18_a781_6dbfaad7634c.slice/crio-1eaf8c3d075287d38dfeeb9be67a8d31d35dfb63a94883c205131fdaa82c8b4d WatchSource:0}: Error finding container 1eaf8c3d075287d38dfeeb9be67a8d31d35dfb63a94883c205131fdaa82c8b4d: Status 404 returned error can't find the container with id 1eaf8c3d075287d38dfeeb9be67a8d31d35dfb63a94883c205131fdaa82c8b4d Nov 23 14:59:09 crc kubenswrapper[5050]: W1123 14:59:09.310365 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc21366ac_a7da_4cac_8332_753659210595.slice/crio-4719503c5841ca5b62f9ea359a8f6b09eedc13f56f362e1c8530c0d6c42b7a46 WatchSource:0}: Error finding container 4719503c5841ca5b62f9ea359a8f6b09eedc13f56f362e1c8530c0d6c42b7a46: Status 404 returned error can't find the container with id 4719503c5841ca5b62f9ea359a8f6b09eedc13f56f362e1c8530c0d6c42b7a46 Nov 23 14:59:09 crc kubenswrapper[5050]: I1123 14:59:09.349215 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-n9v8v" event={"ID":"c21366ac-a7da-4cac-8332-753659210595","Type":"ContainerStarted","Data":"4719503c5841ca5b62f9ea359a8f6b09eedc13f56f362e1c8530c0d6c42b7a46"} Nov 23 14:59:09 crc kubenswrapper[5050]: I1123 14:59:09.350543 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-gc5w9" event={"ID":"6bae002e-fff8-41ed-8e1a-2f8b4f95393f","Type":"ContainerDied","Data":"b9e9ebafeed9f1be22ca45ddd430981bc7fe6cd441de571c2bb0e461fd89652b"} Nov 23 14:59:09 crc kubenswrapper[5050]: I1123 14:59:09.350578 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b9e9ebafeed9f1be22ca45ddd430981bc7fe6cd441de571c2bb0e461fd89652b" Nov 23 14:59:09 crc kubenswrapper[5050]: I1123 14:59:09.351898 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-jqz2l" event={"ID":"e022ef9d-faf7-4dcb-87c8-cde58d2b8575","Type":"ContainerDied","Data":"3771e211c4622c34ab79b70dfeb1f71fa5db5ff11d2cf2872548b4ea6a33e155"} Nov 23 14:59:09 crc kubenswrapper[5050]: I1123 14:59:09.351959 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3771e211c4622c34ab79b70dfeb1f71fa5db5ff11d2cf2872548b4ea6a33e155" Nov 23 14:59:09 crc kubenswrapper[5050]: I1123 14:59:09.353025 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"f81d4e06-4245-4355-89d6-ddc438c06f29","Type":"ContainerStarted","Data":"26dfadc19806d4c9b9a23efca729410607729d3d5613fa898d8137bbcd345102"} Nov 23 14:59:09 crc kubenswrapper[5050]: I1123 14:59:09.355174 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"716a14cd-88b9-4e18-a781-6dbfaad7634c","Type":"ContainerStarted","Data":"1eaf8c3d075287d38dfeeb9be67a8d31d35dfb63a94883c205131fdaa82c8b4d"} Nov 23 14:59:09 crc kubenswrapper[5050]: I1123 14:59:09.356992 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-tbww8" event={"ID":"33af3c2e-dea7-4448-8b28-b579d77490b9","Type":"ContainerStarted","Data":"c3195f1364743c6f2ef8f040c543b8a404bd64e7d77030ed0e05333791fa6afd"} Nov 23 14:59:09 crc kubenswrapper[5050]: I1123 14:59:09.387123 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-gc5w9" Nov 23 14:59:09 crc kubenswrapper[5050]: I1123 14:59:09.395116 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-jqz2l" Nov 23 14:59:09 crc kubenswrapper[5050]: I1123 14:59:09.513910 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jl52z\" (UniqueName: \"kubernetes.io/projected/6bae002e-fff8-41ed-8e1a-2f8b4f95393f-kube-api-access-jl52z\") pod \"6bae002e-fff8-41ed-8e1a-2f8b4f95393f\" (UID: \"6bae002e-fff8-41ed-8e1a-2f8b4f95393f\") " Nov 23 14:59:09 crc kubenswrapper[5050]: I1123 14:59:09.514198 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e022ef9d-faf7-4dcb-87c8-cde58d2b8575-dns-svc\") pod \"e022ef9d-faf7-4dcb-87c8-cde58d2b8575\" (UID: \"e022ef9d-faf7-4dcb-87c8-cde58d2b8575\") " Nov 23 14:59:09 crc kubenswrapper[5050]: I1123 14:59:09.514273 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-58k2w\" (UniqueName: \"kubernetes.io/projected/e022ef9d-faf7-4dcb-87c8-cde58d2b8575-kube-api-access-58k2w\") pod \"e022ef9d-faf7-4dcb-87c8-cde58d2b8575\" (UID: \"e022ef9d-faf7-4dcb-87c8-cde58d2b8575\") " Nov 23 14:59:09 crc kubenswrapper[5050]: I1123 14:59:09.514331 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6bae002e-fff8-41ed-8e1a-2f8b4f95393f-config\") pod \"6bae002e-fff8-41ed-8e1a-2f8b4f95393f\" (UID: \"6bae002e-fff8-41ed-8e1a-2f8b4f95393f\") " Nov 23 14:59:09 crc kubenswrapper[5050]: I1123 14:59:09.514465 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e022ef9d-faf7-4dcb-87c8-cde58d2b8575-config\") pod \"e022ef9d-faf7-4dcb-87c8-cde58d2b8575\" (UID: \"e022ef9d-faf7-4dcb-87c8-cde58d2b8575\") " Nov 23 14:59:09 crc kubenswrapper[5050]: I1123 14:59:09.515886 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e022ef9d-faf7-4dcb-87c8-cde58d2b8575-config" (OuterVolumeSpecName: "config") pod "e022ef9d-faf7-4dcb-87c8-cde58d2b8575" (UID: "e022ef9d-faf7-4dcb-87c8-cde58d2b8575"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:59:09 crc kubenswrapper[5050]: I1123 14:59:09.517594 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e022ef9d-faf7-4dcb-87c8-cde58d2b8575-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e022ef9d-faf7-4dcb-87c8-cde58d2b8575" (UID: "e022ef9d-faf7-4dcb-87c8-cde58d2b8575"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:59:09 crc kubenswrapper[5050]: I1123 14:59:09.518036 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6bae002e-fff8-41ed-8e1a-2f8b4f95393f-config" (OuterVolumeSpecName: "config") pod "6bae002e-fff8-41ed-8e1a-2f8b4f95393f" (UID: "6bae002e-fff8-41ed-8e1a-2f8b4f95393f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:59:09 crc kubenswrapper[5050]: I1123 14:59:09.525523 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e022ef9d-faf7-4dcb-87c8-cde58d2b8575-kube-api-access-58k2w" (OuterVolumeSpecName: "kube-api-access-58k2w") pod "e022ef9d-faf7-4dcb-87c8-cde58d2b8575" (UID: "e022ef9d-faf7-4dcb-87c8-cde58d2b8575"). InnerVolumeSpecName "kube-api-access-58k2w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:59:09 crc kubenswrapper[5050]: I1123 14:59:09.533753 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6bae002e-fff8-41ed-8e1a-2f8b4f95393f-kube-api-access-jl52z" (OuterVolumeSpecName: "kube-api-access-jl52z") pod "6bae002e-fff8-41ed-8e1a-2f8b4f95393f" (UID: "6bae002e-fff8-41ed-8e1a-2f8b4f95393f"). InnerVolumeSpecName "kube-api-access-jl52z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:59:09 crc kubenswrapper[5050]: I1123 14:59:09.616329 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e022ef9d-faf7-4dcb-87c8-cde58d2b8575-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:09 crc kubenswrapper[5050]: I1123 14:59:09.616782 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-58k2w\" (UniqueName: \"kubernetes.io/projected/e022ef9d-faf7-4dcb-87c8-cde58d2b8575-kube-api-access-58k2w\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:09 crc kubenswrapper[5050]: I1123 14:59:09.616797 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6bae002e-fff8-41ed-8e1a-2f8b4f95393f-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:09 crc kubenswrapper[5050]: I1123 14:59:09.616813 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e022ef9d-faf7-4dcb-87c8-cde58d2b8575-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:09 crc kubenswrapper[5050]: I1123 14:59:09.616828 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jl52z\" (UniqueName: \"kubernetes.io/projected/6bae002e-fff8-41ed-8e1a-2f8b4f95393f-kube-api-access-jl52z\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:10 crc kubenswrapper[5050]: I1123 14:59:10.367916 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-gc5w9" Nov 23 14:59:10 crc kubenswrapper[5050]: I1123 14:59:10.367946 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-jqz2l" Nov 23 14:59:10 crc kubenswrapper[5050]: I1123 14:59:10.425782 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-jqz2l"] Nov 23 14:59:10 crc kubenswrapper[5050]: I1123 14:59:10.435405 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-jqz2l"] Nov 23 14:59:10 crc kubenswrapper[5050]: I1123 14:59:10.462132 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-gc5w9"] Nov 23 14:59:10 crc kubenswrapper[5050]: I1123 14:59:10.479848 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-gc5w9"] Nov 23 14:59:11 crc kubenswrapper[5050]: I1123 14:59:11.583126 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6bae002e-fff8-41ed-8e1a-2f8b4f95393f" path="/var/lib/kubelet/pods/6bae002e-fff8-41ed-8e1a-2f8b4f95393f/volumes" Nov 23 14:59:11 crc kubenswrapper[5050]: I1123 14:59:11.584097 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e022ef9d-faf7-4dcb-87c8-cde58d2b8575" path="/var/lib/kubelet/pods/e022ef9d-faf7-4dcb-87c8-cde58d2b8575/volumes" Nov 23 14:59:12 crc kubenswrapper[5050]: I1123 14:59:12.388778 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"3782bc88-94a6-4434-bd20-a48caad31d31","Type":"ContainerStarted","Data":"a325cd944d3e8806653ffd40ad1ee7a791f66ddbf95d52aae6b757339a46df2c"} Nov 23 14:59:12 crc kubenswrapper[5050]: I1123 14:59:12.388948 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 23 14:59:12 crc kubenswrapper[5050]: I1123 14:59:12.408361 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.649170011 podStartE2EDuration="30.408343234s" podCreationTimestamp="2025-11-23 14:58:42 +0000 UTC" firstStartedPulling="2025-11-23 14:58:43.507197569 +0000 UTC m=+1018.674194054" lastFinishedPulling="2025-11-23 14:59:11.266370782 +0000 UTC m=+1046.433367277" observedRunningTime="2025-11-23 14:59:12.406536044 +0000 UTC m=+1047.573532549" watchObservedRunningTime="2025-11-23 14:59:12.408343234 +0000 UTC m=+1047.575339719" Nov 23 14:59:14 crc kubenswrapper[5050]: I1123 14:59:14.409965 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-n9v8v" event={"ID":"c21366ac-a7da-4cac-8332-753659210595","Type":"ContainerStarted","Data":"d79191eb437e5f7ec421da5ab0ca602912b38ed265ec750ace6b1b344d1ac58d"} Nov 23 14:59:14 crc kubenswrapper[5050]: I1123 14:59:14.414156 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"d1ce97bd-1ff7-400e-a741-7e757fbd7e17","Type":"ContainerStarted","Data":"10d8f0ce4d54a26bf81a8c27901423a54c9049d1e6deff9e87bd78ed1978474b"} Nov 23 14:59:14 crc kubenswrapper[5050]: I1123 14:59:14.414605 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 23 14:59:14 crc kubenswrapper[5050]: I1123 14:59:14.417382 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"f81d4e06-4245-4355-89d6-ddc438c06f29","Type":"ContainerStarted","Data":"c0069a68c89482704d61e8c58556300339031d7c4accb976799bb1d562fe9ac9"} Nov 23 14:59:14 crc kubenswrapper[5050]: I1123 14:59:14.419848 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"716a14cd-88b9-4e18-a781-6dbfaad7634c","Type":"ContainerStarted","Data":"3249cf8628f145ad70288b99dd53e1eb58e6ee9953f4d2ed143ddefe3d8d6d1c"} Nov 23 14:59:14 crc kubenswrapper[5050]: I1123 14:59:14.462240 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=2.704537328 podStartE2EDuration="34.462222777s" podCreationTimestamp="2025-11-23 14:58:40 +0000 UTC" firstStartedPulling="2025-11-23 14:58:41.993545108 +0000 UTC m=+1017.160541593" lastFinishedPulling="2025-11-23 14:59:13.751230557 +0000 UTC m=+1048.918227042" observedRunningTime="2025-11-23 14:59:14.456985482 +0000 UTC m=+1049.623981977" watchObservedRunningTime="2025-11-23 14:59:14.462222777 +0000 UTC m=+1049.629219262" Nov 23 14:59:15 crc kubenswrapper[5050]: I1123 14:59:15.437109 5050 generic.go:334] "Generic (PLEG): container finished" podID="c21366ac-a7da-4cac-8332-753659210595" containerID="d79191eb437e5f7ec421da5ab0ca602912b38ed265ec750ace6b1b344d1ac58d" exitCode=0 Nov 23 14:59:15 crc kubenswrapper[5050]: I1123 14:59:15.437211 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-n9v8v" event={"ID":"c21366ac-a7da-4cac-8332-753659210595","Type":"ContainerDied","Data":"d79191eb437e5f7ec421da5ab0ca602912b38ed265ec750ace6b1b344d1ac58d"} Nov 23 14:59:15 crc kubenswrapper[5050]: I1123 14:59:15.440338 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"0a256986-024f-45e6-9b86-b5a724ab5f6e","Type":"ContainerStarted","Data":"ebcffda6c310da77ec9c4ccf0ade9f7141b3251552ce8adddbcfbe4c14dc54c1"} Nov 23 14:59:15 crc kubenswrapper[5050]: I1123 14:59:15.445805 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-tbww8" event={"ID":"33af3c2e-dea7-4448-8b28-b579d77490b9","Type":"ContainerStarted","Data":"849e41c94f217d7fbbec3b38946489dc46e00bb66fcfcb08151c2e3f0587eb24"} Nov 23 14:59:15 crc kubenswrapper[5050]: I1123 14:59:15.484913 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-tbww8" podStartSLOduration=24.491135228 podStartE2EDuration="29.484880511s" podCreationTimestamp="2025-11-23 14:58:46 +0000 UTC" firstStartedPulling="2025-11-23 14:59:08.748082074 +0000 UTC m=+1043.915078559" lastFinishedPulling="2025-11-23 14:59:13.741827357 +0000 UTC m=+1048.908823842" observedRunningTime="2025-11-23 14:59:15.478797423 +0000 UTC m=+1050.645793908" watchObservedRunningTime="2025-11-23 14:59:15.484880511 +0000 UTC m=+1050.651876996" Nov 23 14:59:16 crc kubenswrapper[5050]: I1123 14:59:16.461366 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5eff539e-c641-4873-aeae-450aaf0b4ac8","Type":"ContainerStarted","Data":"e92ae2c79d385eac2b74e01fa6c265a4e1892dff4fe0ad87c1b8d47c3cad30bd"} Nov 23 14:59:16 crc kubenswrapper[5050]: I1123 14:59:16.463284 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-n9v8v" event={"ID":"c21366ac-a7da-4cac-8332-753659210595","Type":"ContainerStarted","Data":"1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8"} Nov 23 14:59:16 crc kubenswrapper[5050]: I1123 14:59:16.465081 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f92353db-5352-4216-ad2d-50242e58dfb7","Type":"ContainerStarted","Data":"8fa79fa4d3da8bc1f764d8d40be5053a2b08532c54d8bfc67df9434fce12f626"} Nov 23 14:59:16 crc kubenswrapper[5050]: I1123 14:59:16.465305 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-tbww8" Nov 23 14:59:17 crc kubenswrapper[5050]: I1123 14:59:17.477834 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"716a14cd-88b9-4e18-a781-6dbfaad7634c","Type":"ContainerStarted","Data":"e0aa36f5771c43f6dd0d81d07cdda1b7e03ac841535ddf7564847c9249d6a707"} Nov 23 14:59:17 crc kubenswrapper[5050]: I1123 14:59:17.484175 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-n9v8v" event={"ID":"c21366ac-a7da-4cac-8332-753659210595","Type":"ContainerStarted","Data":"a0820261b299cc33db42946f12f1f4467f892f1598cff4f933ab45def65d8af9"} Nov 23 14:59:17 crc kubenswrapper[5050]: I1123 14:59:17.484478 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-n9v8v" Nov 23 14:59:17 crc kubenswrapper[5050]: I1123 14:59:17.512073 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=21.627100926 podStartE2EDuration="29.512041195s" podCreationTimestamp="2025-11-23 14:58:48 +0000 UTC" firstStartedPulling="2025-11-23 14:59:09.31027304 +0000 UTC m=+1044.477269545" lastFinishedPulling="2025-11-23 14:59:17.195213319 +0000 UTC m=+1052.362209814" observedRunningTime="2025-11-23 14:59:17.503321624 +0000 UTC m=+1052.670318139" watchObservedRunningTime="2025-11-23 14:59:17.512041195 +0000 UTC m=+1052.679037690" Nov 23 14:59:17 crc kubenswrapper[5050]: I1123 14:59:17.533525 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-n9v8v" podStartSLOduration=27.106613201 podStartE2EDuration="31.533495818s" podCreationTimestamp="2025-11-23 14:58:46 +0000 UTC" firstStartedPulling="2025-11-23 14:59:09.316075571 +0000 UTC m=+1044.483072066" lastFinishedPulling="2025-11-23 14:59:13.742958198 +0000 UTC m=+1048.909954683" observedRunningTime="2025-11-23 14:59:17.531763801 +0000 UTC m=+1052.698760306" watchObservedRunningTime="2025-11-23 14:59:17.533495818 +0000 UTC m=+1052.700492313" Nov 23 14:59:18 crc kubenswrapper[5050]: I1123 14:59:18.509673 5050 generic.go:334] "Generic (PLEG): container finished" podID="0a256986-024f-45e6-9b86-b5a724ab5f6e" containerID="ebcffda6c310da77ec9c4ccf0ade9f7141b3251552ce8adddbcfbe4c14dc54c1" exitCode=0 Nov 23 14:59:18 crc kubenswrapper[5050]: I1123 14:59:18.509828 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"0a256986-024f-45e6-9b86-b5a724ab5f6e","Type":"ContainerDied","Data":"ebcffda6c310da77ec9c4ccf0ade9f7141b3251552ce8adddbcfbe4c14dc54c1"} Nov 23 14:59:18 crc kubenswrapper[5050]: I1123 14:59:18.512517 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"85ca597a-bf71-43bb-b57c-f840b37f196f","Type":"ContainerStarted","Data":"a2b90cc293f875a53ea5d05893281bf2cf0a6970ffc907d52d3260fc55c43311"} Nov 23 14:59:18 crc kubenswrapper[5050]: I1123 14:59:18.513517 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-n9v8v" Nov 23 14:59:19 crc kubenswrapper[5050]: I1123 14:59:19.529287 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"0a256986-024f-45e6-9b86-b5a724ab5f6e","Type":"ContainerStarted","Data":"e05743a215c5ada706341f4409a1a724e3949e0173a53be6eeb47dacec86519e"} Nov 23 14:59:19 crc kubenswrapper[5050]: I1123 14:59:19.533402 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"f81d4e06-4245-4355-89d6-ddc438c06f29","Type":"ContainerStarted","Data":"40c87c219c9a5646179505e0d69951602c9a0948f6df53d02f15fd03bcb735c9"} Nov 23 14:59:19 crc kubenswrapper[5050]: I1123 14:59:19.604387 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=7.692426789 podStartE2EDuration="41.604361262s" podCreationTimestamp="2025-11-23 14:58:38 +0000 UTC" firstStartedPulling="2025-11-23 14:58:40.116859305 +0000 UTC m=+1015.283855790" lastFinishedPulling="2025-11-23 14:59:14.028793788 +0000 UTC m=+1049.195790263" observedRunningTime="2025-11-23 14:59:19.572639015 +0000 UTC m=+1054.739635500" watchObservedRunningTime="2025-11-23 14:59:19.604361262 +0000 UTC m=+1054.771357777" Nov 23 14:59:19 crc kubenswrapper[5050]: I1123 14:59:19.607394 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=23.545944466 podStartE2EDuration="33.607388396s" podCreationTimestamp="2025-11-23 14:58:46 +0000 UTC" firstStartedPulling="2025-11-23 14:59:08.648607265 +0000 UTC m=+1043.815603760" lastFinishedPulling="2025-11-23 14:59:18.710051195 +0000 UTC m=+1053.877047690" observedRunningTime="2025-11-23 14:59:19.603604411 +0000 UTC m=+1054.770600896" watchObservedRunningTime="2025-11-23 14:59:19.607388396 +0000 UTC m=+1054.774384891" Nov 23 14:59:19 crc kubenswrapper[5050]: I1123 14:59:19.978684 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 23 14:59:19 crc kubenswrapper[5050]: I1123 14:59:19.978757 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 23 14:59:20 crc kubenswrapper[5050]: I1123 14:59:20.053822 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 23 14:59:20 crc kubenswrapper[5050]: I1123 14:59:20.601186 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 23 14:59:20 crc kubenswrapper[5050]: I1123 14:59:20.909853 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-sdz75"] Nov 23 14:59:20 crc kubenswrapper[5050]: I1123 14:59:20.970473 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-zhbvv"] Nov 23 14:59:20 crc kubenswrapper[5050]: I1123 14:59:20.985794 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-zhbvv" Nov 23 14:59:20 crc kubenswrapper[5050]: I1123 14:59:20.997681 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.035012 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-zhbvv"] Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.061700 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-22cnp"] Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.063098 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-22cnp" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.066778 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.074155 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-22cnp"] Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.081235 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a7ed50c-3990-423d-9fd6-1165da59c5c9-config\") pod \"ovn-controller-metrics-22cnp\" (UID: \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\") " pod="openstack/ovn-controller-metrics-22cnp" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.081312 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-np29s\" (UniqueName: \"kubernetes.io/projected/4a7ed50c-3990-423d-9fd6-1165da59c5c9-kube-api-access-np29s\") pod \"ovn-controller-metrics-22cnp\" (UID: \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\") " pod="openstack/ovn-controller-metrics-22cnp" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.081374 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/4a7ed50c-3990-423d-9fd6-1165da59c5c9-ovs-rundir\") pod \"ovn-controller-metrics-22cnp\" (UID: \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\") " pod="openstack/ovn-controller-metrics-22cnp" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.082374 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd77ef0c-49e7-44ea-bf7f-383a60ca3012-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-zhbvv\" (UID: \"bd77ef0c-49e7-44ea-bf7f-383a60ca3012\") " pod="openstack/dnsmasq-dns-5bf47b49b7-zhbvv" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.082417 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd77ef0c-49e7-44ea-bf7f-383a60ca3012-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-zhbvv\" (UID: \"bd77ef0c-49e7-44ea-bf7f-383a60ca3012\") " pod="openstack/dnsmasq-dns-5bf47b49b7-zhbvv" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.082476 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a7ed50c-3990-423d-9fd6-1165da59c5c9-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-22cnp\" (UID: \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\") " pod="openstack/ovn-controller-metrics-22cnp" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.082494 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd77ef0c-49e7-44ea-bf7f-383a60ca3012-config\") pod \"dnsmasq-dns-5bf47b49b7-zhbvv\" (UID: \"bd77ef0c-49e7-44ea-bf7f-383a60ca3012\") " pod="openstack/dnsmasq-dns-5bf47b49b7-zhbvv" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.082625 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b4f6m\" (UniqueName: \"kubernetes.io/projected/bd77ef0c-49e7-44ea-bf7f-383a60ca3012-kube-api-access-b4f6m\") pod \"dnsmasq-dns-5bf47b49b7-zhbvv\" (UID: \"bd77ef0c-49e7-44ea-bf7f-383a60ca3012\") " pod="openstack/dnsmasq-dns-5bf47b49b7-zhbvv" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.082704 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/4a7ed50c-3990-423d-9fd6-1165da59c5c9-ovn-rundir\") pod \"ovn-controller-metrics-22cnp\" (UID: \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\") " pod="openstack/ovn-controller-metrics-22cnp" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.082761 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a7ed50c-3990-423d-9fd6-1165da59c5c9-combined-ca-bundle\") pod \"ovn-controller-metrics-22cnp\" (UID: \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\") " pod="openstack/ovn-controller-metrics-22cnp" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.185056 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-pc2bl"] Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.202859 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd77ef0c-49e7-44ea-bf7f-383a60ca3012-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-zhbvv\" (UID: \"bd77ef0c-49e7-44ea-bf7f-383a60ca3012\") " pod="openstack/dnsmasq-dns-5bf47b49b7-zhbvv" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.202937 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a7ed50c-3990-423d-9fd6-1165da59c5c9-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-22cnp\" (UID: \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\") " pod="openstack/ovn-controller-metrics-22cnp" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.202966 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd77ef0c-49e7-44ea-bf7f-383a60ca3012-config\") pod \"dnsmasq-dns-5bf47b49b7-zhbvv\" (UID: \"bd77ef0c-49e7-44ea-bf7f-383a60ca3012\") " pod="openstack/dnsmasq-dns-5bf47b49b7-zhbvv" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.203005 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b4f6m\" (UniqueName: \"kubernetes.io/projected/bd77ef0c-49e7-44ea-bf7f-383a60ca3012-kube-api-access-b4f6m\") pod \"dnsmasq-dns-5bf47b49b7-zhbvv\" (UID: \"bd77ef0c-49e7-44ea-bf7f-383a60ca3012\") " pod="openstack/dnsmasq-dns-5bf47b49b7-zhbvv" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.203059 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/4a7ed50c-3990-423d-9fd6-1165da59c5c9-ovn-rundir\") pod \"ovn-controller-metrics-22cnp\" (UID: \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\") " pod="openstack/ovn-controller-metrics-22cnp" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.203093 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a7ed50c-3990-423d-9fd6-1165da59c5c9-combined-ca-bundle\") pod \"ovn-controller-metrics-22cnp\" (UID: \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\") " pod="openstack/ovn-controller-metrics-22cnp" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.203133 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a7ed50c-3990-423d-9fd6-1165da59c5c9-config\") pod \"ovn-controller-metrics-22cnp\" (UID: \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\") " pod="openstack/ovn-controller-metrics-22cnp" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.203185 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-np29s\" (UniqueName: \"kubernetes.io/projected/4a7ed50c-3990-423d-9fd6-1165da59c5c9-kube-api-access-np29s\") pod \"ovn-controller-metrics-22cnp\" (UID: \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\") " pod="openstack/ovn-controller-metrics-22cnp" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.203222 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/4a7ed50c-3990-423d-9fd6-1165da59c5c9-ovs-rundir\") pod \"ovn-controller-metrics-22cnp\" (UID: \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\") " pod="openstack/ovn-controller-metrics-22cnp" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.203279 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd77ef0c-49e7-44ea-bf7f-383a60ca3012-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-zhbvv\" (UID: \"bd77ef0c-49e7-44ea-bf7f-383a60ca3012\") " pod="openstack/dnsmasq-dns-5bf47b49b7-zhbvv" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.203652 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/4a7ed50c-3990-423d-9fd6-1165da59c5c9-ovn-rundir\") pod \"ovn-controller-metrics-22cnp\" (UID: \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\") " pod="openstack/ovn-controller-metrics-22cnp" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.204378 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd77ef0c-49e7-44ea-bf7f-383a60ca3012-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-zhbvv\" (UID: \"bd77ef0c-49e7-44ea-bf7f-383a60ca3012\") " pod="openstack/dnsmasq-dns-5bf47b49b7-zhbvv" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.204412 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd77ef0c-49e7-44ea-bf7f-383a60ca3012-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-zhbvv\" (UID: \"bd77ef0c-49e7-44ea-bf7f-383a60ca3012\") " pod="openstack/dnsmasq-dns-5bf47b49b7-zhbvv" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.205066 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd77ef0c-49e7-44ea-bf7f-383a60ca3012-config\") pod \"dnsmasq-dns-5bf47b49b7-zhbvv\" (UID: \"bd77ef0c-49e7-44ea-bf7f-383a60ca3012\") " pod="openstack/dnsmasq-dns-5bf47b49b7-zhbvv" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.205093 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a7ed50c-3990-423d-9fd6-1165da59c5c9-config\") pod \"ovn-controller-metrics-22cnp\" (UID: \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\") " pod="openstack/ovn-controller-metrics-22cnp" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.210818 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/4a7ed50c-3990-423d-9fd6-1165da59c5c9-ovs-rundir\") pod \"ovn-controller-metrics-22cnp\" (UID: \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\") " pod="openstack/ovn-controller-metrics-22cnp" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.211642 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a7ed50c-3990-423d-9fd6-1165da59c5c9-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-22cnp\" (UID: \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\") " pod="openstack/ovn-controller-metrics-22cnp" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.241009 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a7ed50c-3990-423d-9fd6-1165da59c5c9-combined-ca-bundle\") pod \"ovn-controller-metrics-22cnp\" (UID: \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\") " pod="openstack/ovn-controller-metrics-22cnp" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.266594 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b4f6m\" (UniqueName: \"kubernetes.io/projected/bd77ef0c-49e7-44ea-bf7f-383a60ca3012-kube-api-access-b4f6m\") pod \"dnsmasq-dns-5bf47b49b7-zhbvv\" (UID: \"bd77ef0c-49e7-44ea-bf7f-383a60ca3012\") " pod="openstack/dnsmasq-dns-5bf47b49b7-zhbvv" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.285995 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-np29s\" (UniqueName: \"kubernetes.io/projected/4a7ed50c-3990-423d-9fd6-1165da59c5c9-kube-api-access-np29s\") pod \"ovn-controller-metrics-22cnp\" (UID: \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\") " pod="openstack/ovn-controller-metrics-22cnp" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.329230 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8554648995-n7gcr"] Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.330634 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-n7gcr" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.342659 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.353960 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-n7gcr"] Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.384721 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-zhbvv" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.408474 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67vlg\" (UniqueName: \"kubernetes.io/projected/f115d163-3bbf-44ac-8c45-6f8753667208-kube-api-access-67vlg\") pod \"dnsmasq-dns-8554648995-n7gcr\" (UID: \"f115d163-3bbf-44ac-8c45-6f8753667208\") " pod="openstack/dnsmasq-dns-8554648995-n7gcr" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.408557 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f115d163-3bbf-44ac-8c45-6f8753667208-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-n7gcr\" (UID: \"f115d163-3bbf-44ac-8c45-6f8753667208\") " pod="openstack/dnsmasq-dns-8554648995-n7gcr" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.408580 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f115d163-3bbf-44ac-8c45-6f8753667208-dns-svc\") pod \"dnsmasq-dns-8554648995-n7gcr\" (UID: \"f115d163-3bbf-44ac-8c45-6f8753667208\") " pod="openstack/dnsmasq-dns-8554648995-n7gcr" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.408716 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f115d163-3bbf-44ac-8c45-6f8753667208-config\") pod \"dnsmasq-dns-8554648995-n7gcr\" (UID: \"f115d163-3bbf-44ac-8c45-6f8753667208\") " pod="openstack/dnsmasq-dns-8554648995-n7gcr" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.408739 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f115d163-3bbf-44ac-8c45-6f8753667208-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-n7gcr\" (UID: \"f115d163-3bbf-44ac-8c45-6f8753667208\") " pod="openstack/dnsmasq-dns-8554648995-n7gcr" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.430752 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.454953 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-22cnp" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.509831 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67vlg\" (UniqueName: \"kubernetes.io/projected/f115d163-3bbf-44ac-8c45-6f8753667208-kube-api-access-67vlg\") pod \"dnsmasq-dns-8554648995-n7gcr\" (UID: \"f115d163-3bbf-44ac-8c45-6f8753667208\") " pod="openstack/dnsmasq-dns-8554648995-n7gcr" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.509888 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f115d163-3bbf-44ac-8c45-6f8753667208-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-n7gcr\" (UID: \"f115d163-3bbf-44ac-8c45-6f8753667208\") " pod="openstack/dnsmasq-dns-8554648995-n7gcr" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.509920 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f115d163-3bbf-44ac-8c45-6f8753667208-dns-svc\") pod \"dnsmasq-dns-8554648995-n7gcr\" (UID: \"f115d163-3bbf-44ac-8c45-6f8753667208\") " pod="openstack/dnsmasq-dns-8554648995-n7gcr" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.510060 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f115d163-3bbf-44ac-8c45-6f8753667208-config\") pod \"dnsmasq-dns-8554648995-n7gcr\" (UID: \"f115d163-3bbf-44ac-8c45-6f8753667208\") " pod="openstack/dnsmasq-dns-8554648995-n7gcr" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.510079 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f115d163-3bbf-44ac-8c45-6f8753667208-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-n7gcr\" (UID: \"f115d163-3bbf-44ac-8c45-6f8753667208\") " pod="openstack/dnsmasq-dns-8554648995-n7gcr" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.511090 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f115d163-3bbf-44ac-8c45-6f8753667208-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-n7gcr\" (UID: \"f115d163-3bbf-44ac-8c45-6f8753667208\") " pod="openstack/dnsmasq-dns-8554648995-n7gcr" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.511935 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f115d163-3bbf-44ac-8c45-6f8753667208-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-n7gcr\" (UID: \"f115d163-3bbf-44ac-8c45-6f8753667208\") " pod="openstack/dnsmasq-dns-8554648995-n7gcr" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.512461 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f115d163-3bbf-44ac-8c45-6f8753667208-dns-svc\") pod \"dnsmasq-dns-8554648995-n7gcr\" (UID: \"f115d163-3bbf-44ac-8c45-6f8753667208\") " pod="openstack/dnsmasq-dns-8554648995-n7gcr" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.515049 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f115d163-3bbf-44ac-8c45-6f8753667208-config\") pod \"dnsmasq-dns-8554648995-n7gcr\" (UID: \"f115d163-3bbf-44ac-8c45-6f8753667208\") " pod="openstack/dnsmasq-dns-8554648995-n7gcr" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.534058 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-sdz75" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.535425 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67vlg\" (UniqueName: \"kubernetes.io/projected/f115d163-3bbf-44ac-8c45-6f8753667208-kube-api-access-67vlg\") pod \"dnsmasq-dns-8554648995-n7gcr\" (UID: \"f115d163-3bbf-44ac-8c45-6f8753667208\") " pod="openstack/dnsmasq-dns-8554648995-n7gcr" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.564336 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-sdz75" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.611402 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25891e09-f495-4de3-b5d2-496188bf0981-config\") pod \"25891e09-f495-4de3-b5d2-496188bf0981\" (UID: \"25891e09-f495-4de3-b5d2-496188bf0981\") " Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.611581 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dcn7x\" (UniqueName: \"kubernetes.io/projected/25891e09-f495-4de3-b5d2-496188bf0981-kube-api-access-dcn7x\") pod \"25891e09-f495-4de3-b5d2-496188bf0981\" (UID: \"25891e09-f495-4de3-b5d2-496188bf0981\") " Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.611692 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/25891e09-f495-4de3-b5d2-496188bf0981-dns-svc\") pod \"25891e09-f495-4de3-b5d2-496188bf0981\" (UID: \"25891e09-f495-4de3-b5d2-496188bf0981\") " Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.611831 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.611866 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-sdz75" event={"ID":"25891e09-f495-4de3-b5d2-496188bf0981","Type":"ContainerDied","Data":"05986b26b5382e18fe6493916074f8fc0cb73d0adcf2a5dfb725445c9886395b"} Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.612259 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25891e09-f495-4de3-b5d2-496188bf0981-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "25891e09-f495-4de3-b5d2-496188bf0981" (UID: "25891e09-f495-4de3-b5d2-496188bf0981"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.612277 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25891e09-f495-4de3-b5d2-496188bf0981-config" (OuterVolumeSpecName: "config") pod "25891e09-f495-4de3-b5d2-496188bf0981" (UID: "25891e09-f495-4de3-b5d2-496188bf0981"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.613397 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25891e09-f495-4de3-b5d2-496188bf0981-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.613413 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/25891e09-f495-4de3-b5d2-496188bf0981-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.623835 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25891e09-f495-4de3-b5d2-496188bf0981-kube-api-access-dcn7x" (OuterVolumeSpecName: "kube-api-access-dcn7x") pod "25891e09-f495-4de3-b5d2-496188bf0981" (UID: "25891e09-f495-4de3-b5d2-496188bf0981"). InnerVolumeSpecName "kube-api-access-dcn7x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.668605 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.694742 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-n7gcr" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.714569 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dcn7x\" (UniqueName: \"kubernetes.io/projected/25891e09-f495-4de3-b5d2-496188bf0981-kube-api-access-dcn7x\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.947872 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-sdz75"] Nov 23 14:59:21 crc kubenswrapper[5050]: I1123 14:59:21.954957 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-sdz75"] Nov 23 14:59:21 crc kubenswrapper[5050]: E1123 14:59:21.979083 5050 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod25891e09_f495_4de3_b5d2_496188bf0981.slice/crio-05986b26b5382e18fe6493916074f8fc0cb73d0adcf2a5dfb725445c9886395b\": RecentStats: unable to find data in memory cache]" Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.021927 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-zhbvv"] Nov 23 14:59:22 crc kubenswrapper[5050]: W1123 14:59:22.027059 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbd77ef0c_49e7_44ea_bf7f_383a60ca3012.slice/crio-f60bc5764a710ce746b5e13dec68bab5dfcb23037386a97d08383c83f1129b4e WatchSource:0}: Error finding container f60bc5764a710ce746b5e13dec68bab5dfcb23037386a97d08383c83f1129b4e: Status 404 returned error can't find the container with id f60bc5764a710ce746b5e13dec68bab5dfcb23037386a97d08383c83f1129b4e Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.092336 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-22cnp"] Nov 23 14:59:22 crc kubenswrapper[5050]: W1123 14:59:22.213289 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf115d163_3bbf_44ac_8c45_6f8753667208.slice/crio-feb58f9c6f53de96a8f280741d4fecc1371e663929d4e101b7a292f83ae4da71 WatchSource:0}: Error finding container feb58f9c6f53de96a8f280741d4fecc1371e663929d4e101b7a292f83ae4da71: Status 404 returned error can't find the container with id feb58f9c6f53de96a8f280741d4fecc1371e663929d4e101b7a292f83ae4da71 Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.215950 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-n7gcr"] Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.576406 5050 generic.go:334] "Generic (PLEG): container finished" podID="99bf0fd0-57d8-459a-a6b1-f538e878af9b" containerID="26e8ee294644a48a3465452748ebe02568ba72a2301e6c918d4ed998eef51523" exitCode=0 Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.576864 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-pc2bl" event={"ID":"99bf0fd0-57d8-459a-a6b1-f538e878af9b","Type":"ContainerDied","Data":"26e8ee294644a48a3465452748ebe02568ba72a2301e6c918d4ed998eef51523"} Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.580369 5050 generic.go:334] "Generic (PLEG): container finished" podID="85ca597a-bf71-43bb-b57c-f840b37f196f" containerID="a2b90cc293f875a53ea5d05893281bf2cf0a6970ffc907d52d3260fc55c43311" exitCode=0 Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.580531 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"85ca597a-bf71-43bb-b57c-f840b37f196f","Type":"ContainerDied","Data":"a2b90cc293f875a53ea5d05893281bf2cf0a6970ffc907d52d3260fc55c43311"} Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.585678 5050 generic.go:334] "Generic (PLEG): container finished" podID="f115d163-3bbf-44ac-8c45-6f8753667208" containerID="a3dccc85a62c2f329e73f72bb946446b9d1b93dc280fbc7f88851bfcf7bacf42" exitCode=0 Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.585834 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-n7gcr" event={"ID":"f115d163-3bbf-44ac-8c45-6f8753667208","Type":"ContainerDied","Data":"a3dccc85a62c2f329e73f72bb946446b9d1b93dc280fbc7f88851bfcf7bacf42"} Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.585910 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-n7gcr" event={"ID":"f115d163-3bbf-44ac-8c45-6f8753667208","Type":"ContainerStarted","Data":"feb58f9c6f53de96a8f280741d4fecc1371e663929d4e101b7a292f83ae4da71"} Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.587875 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-22cnp" event={"ID":"4a7ed50c-3990-423d-9fd6-1165da59c5c9","Type":"ContainerStarted","Data":"7a08082700302fc03c17eee134d553b46f2157cc92e9951f31627a535a42e5af"} Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.587999 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-22cnp" event={"ID":"4a7ed50c-3990-423d-9fd6-1165da59c5c9","Type":"ContainerStarted","Data":"429b35bcdb10b05c74108505605b277102d3dfbca26dcd7d7d00ef19354c7d9b"} Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.590924 5050 generic.go:334] "Generic (PLEG): container finished" podID="bd77ef0c-49e7-44ea-bf7f-383a60ca3012" containerID="6492298288de9f874d9f80a4b99954a7040ed7d8739bb3c5f5c29a497762e27e" exitCode=0 Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.592288 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-zhbvv" event={"ID":"bd77ef0c-49e7-44ea-bf7f-383a60ca3012","Type":"ContainerDied","Data":"6492298288de9f874d9f80a4b99954a7040ed7d8739bb3c5f5c29a497762e27e"} Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.592340 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.592356 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-zhbvv" event={"ID":"bd77ef0c-49e7-44ea-bf7f-383a60ca3012","Type":"ContainerStarted","Data":"f60bc5764a710ce746b5e13dec68bab5dfcb23037386a97d08383c83f1129b4e"} Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.657323 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.667221 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-22cnp" podStartSLOduration=2.66719896 podStartE2EDuration="2.66719896s" podCreationTimestamp="2025-11-23 14:59:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:59:22.659294692 +0000 UTC m=+1057.826291177" watchObservedRunningTime="2025-11-23 14:59:22.66719896 +0000 UTC m=+1057.834195445" Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.923052 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-pc2bl" Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.926437 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 23 14:59:22 crc kubenswrapper[5050]: E1123 14:59:22.926870 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99bf0fd0-57d8-459a-a6b1-f538e878af9b" containerName="init" Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.926887 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="99bf0fd0-57d8-459a-a6b1-f538e878af9b" containerName="init" Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.927060 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="99bf0fd0-57d8-459a-a6b1-f538e878af9b" containerName="init" Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.928093 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.930510 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-c6kv5" Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.930928 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.932818 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.932991 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 23 14:59:22 crc kubenswrapper[5050]: I1123 14:59:22.955713 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.027745 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.050027 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/99bf0fd0-57d8-459a-a6b1-f538e878af9b-dns-svc\") pod \"99bf0fd0-57d8-459a-a6b1-f538e878af9b\" (UID: \"99bf0fd0-57d8-459a-a6b1-f538e878af9b\") " Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.050172 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gfj4l\" (UniqueName: \"kubernetes.io/projected/99bf0fd0-57d8-459a-a6b1-f538e878af9b-kube-api-access-gfj4l\") pod \"99bf0fd0-57d8-459a-a6b1-f538e878af9b\" (UID: \"99bf0fd0-57d8-459a-a6b1-f538e878af9b\") " Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.050274 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99bf0fd0-57d8-459a-a6b1-f538e878af9b-config\") pod \"99bf0fd0-57d8-459a-a6b1-f538e878af9b\" (UID: \"99bf0fd0-57d8-459a-a6b1-f538e878af9b\") " Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.050559 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/27e26dea-41e9-4d40-9adc-f902e5f4791f-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " pod="openstack/ovn-northd-0" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.050602 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27e26dea-41e9-4d40-9adc-f902e5f4791f-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " pod="openstack/ovn-northd-0" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.050626 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/27e26dea-41e9-4d40-9adc-f902e5f4791f-scripts\") pod \"ovn-northd-0\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " pod="openstack/ovn-northd-0" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.050644 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/27e26dea-41e9-4d40-9adc-f902e5f4791f-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " pod="openstack/ovn-northd-0" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.050682 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27e26dea-41e9-4d40-9adc-f902e5f4791f-config\") pod \"ovn-northd-0\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " pod="openstack/ovn-northd-0" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.050710 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/27e26dea-41e9-4d40-9adc-f902e5f4791f-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " pod="openstack/ovn-northd-0" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.050748 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtd92\" (UniqueName: \"kubernetes.io/projected/27e26dea-41e9-4d40-9adc-f902e5f4791f-kube-api-access-qtd92\") pod \"ovn-northd-0\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " pod="openstack/ovn-northd-0" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.055622 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99bf0fd0-57d8-459a-a6b1-f538e878af9b-kube-api-access-gfj4l" (OuterVolumeSpecName: "kube-api-access-gfj4l") pod "99bf0fd0-57d8-459a-a6b1-f538e878af9b" (UID: "99bf0fd0-57d8-459a-a6b1-f538e878af9b"). InnerVolumeSpecName "kube-api-access-gfj4l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.106947 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/99bf0fd0-57d8-459a-a6b1-f538e878af9b-config" (OuterVolumeSpecName: "config") pod "99bf0fd0-57d8-459a-a6b1-f538e878af9b" (UID: "99bf0fd0-57d8-459a-a6b1-f538e878af9b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.114112 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/99bf0fd0-57d8-459a-a6b1-f538e878af9b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "99bf0fd0-57d8-459a-a6b1-f538e878af9b" (UID: "99bf0fd0-57d8-459a-a6b1-f538e878af9b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.144421 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-zhbvv"] Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.154050 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtd92\" (UniqueName: \"kubernetes.io/projected/27e26dea-41e9-4d40-9adc-f902e5f4791f-kube-api-access-qtd92\") pod \"ovn-northd-0\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " pod="openstack/ovn-northd-0" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.154156 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/27e26dea-41e9-4d40-9adc-f902e5f4791f-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " pod="openstack/ovn-northd-0" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.154187 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27e26dea-41e9-4d40-9adc-f902e5f4791f-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " pod="openstack/ovn-northd-0" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.154221 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/27e26dea-41e9-4d40-9adc-f902e5f4791f-scripts\") pod \"ovn-northd-0\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " pod="openstack/ovn-northd-0" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.154240 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/27e26dea-41e9-4d40-9adc-f902e5f4791f-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " pod="openstack/ovn-northd-0" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.154285 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27e26dea-41e9-4d40-9adc-f902e5f4791f-config\") pod \"ovn-northd-0\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " pod="openstack/ovn-northd-0" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.154320 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/27e26dea-41e9-4d40-9adc-f902e5f4791f-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " pod="openstack/ovn-northd-0" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.154392 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99bf0fd0-57d8-459a-a6b1-f538e878af9b-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.154403 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/99bf0fd0-57d8-459a-a6b1-f538e878af9b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.154415 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gfj4l\" (UniqueName: \"kubernetes.io/projected/99bf0fd0-57d8-459a-a6b1-f538e878af9b-kube-api-access-gfj4l\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.155437 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/27e26dea-41e9-4d40-9adc-f902e5f4791f-scripts\") pod \"ovn-northd-0\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " pod="openstack/ovn-northd-0" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.155799 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/27e26dea-41e9-4d40-9adc-f902e5f4791f-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " pod="openstack/ovn-northd-0" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.158590 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27e26dea-41e9-4d40-9adc-f902e5f4791f-config\") pod \"ovn-northd-0\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " pod="openstack/ovn-northd-0" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.160078 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/27e26dea-41e9-4d40-9adc-f902e5f4791f-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " pod="openstack/ovn-northd-0" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.167012 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/27e26dea-41e9-4d40-9adc-f902e5f4791f-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " pod="openstack/ovn-northd-0" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.172382 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27e26dea-41e9-4d40-9adc-f902e5f4791f-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " pod="openstack/ovn-northd-0" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.193629 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-98kts"] Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.195691 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtd92\" (UniqueName: \"kubernetes.io/projected/27e26dea-41e9-4d40-9adc-f902e5f4791f-kube-api-access-qtd92\") pod \"ovn-northd-0\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " pod="openstack/ovn-northd-0" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.195759 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-98kts" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.217086 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-98kts"] Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.300015 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.368576 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8982637f-7dc1-4454-9213-47ea0f43971a-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-98kts\" (UID: \"8982637f-7dc1-4454-9213-47ea0f43971a\") " pod="openstack/dnsmasq-dns-b8fbc5445-98kts" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.368619 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8982637f-7dc1-4454-9213-47ea0f43971a-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-98kts\" (UID: \"8982637f-7dc1-4454-9213-47ea0f43971a\") " pod="openstack/dnsmasq-dns-b8fbc5445-98kts" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.368644 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8982637f-7dc1-4454-9213-47ea0f43971a-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-98kts\" (UID: \"8982637f-7dc1-4454-9213-47ea0f43971a\") " pod="openstack/dnsmasq-dns-b8fbc5445-98kts" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.368744 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zrl2\" (UniqueName: \"kubernetes.io/projected/8982637f-7dc1-4454-9213-47ea0f43971a-kube-api-access-4zrl2\") pod \"dnsmasq-dns-b8fbc5445-98kts\" (UID: \"8982637f-7dc1-4454-9213-47ea0f43971a\") " pod="openstack/dnsmasq-dns-b8fbc5445-98kts" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.368784 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8982637f-7dc1-4454-9213-47ea0f43971a-config\") pod \"dnsmasq-dns-b8fbc5445-98kts\" (UID: \"8982637f-7dc1-4454-9213-47ea0f43971a\") " pod="openstack/dnsmasq-dns-b8fbc5445-98kts" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.469836 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zrl2\" (UniqueName: \"kubernetes.io/projected/8982637f-7dc1-4454-9213-47ea0f43971a-kube-api-access-4zrl2\") pod \"dnsmasq-dns-b8fbc5445-98kts\" (UID: \"8982637f-7dc1-4454-9213-47ea0f43971a\") " pod="openstack/dnsmasq-dns-b8fbc5445-98kts" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.469910 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8982637f-7dc1-4454-9213-47ea0f43971a-config\") pod \"dnsmasq-dns-b8fbc5445-98kts\" (UID: \"8982637f-7dc1-4454-9213-47ea0f43971a\") " pod="openstack/dnsmasq-dns-b8fbc5445-98kts" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.469944 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8982637f-7dc1-4454-9213-47ea0f43971a-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-98kts\" (UID: \"8982637f-7dc1-4454-9213-47ea0f43971a\") " pod="openstack/dnsmasq-dns-b8fbc5445-98kts" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.469963 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8982637f-7dc1-4454-9213-47ea0f43971a-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-98kts\" (UID: \"8982637f-7dc1-4454-9213-47ea0f43971a\") " pod="openstack/dnsmasq-dns-b8fbc5445-98kts" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.469983 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8982637f-7dc1-4454-9213-47ea0f43971a-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-98kts\" (UID: \"8982637f-7dc1-4454-9213-47ea0f43971a\") " pod="openstack/dnsmasq-dns-b8fbc5445-98kts" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.470991 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8982637f-7dc1-4454-9213-47ea0f43971a-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-98kts\" (UID: \"8982637f-7dc1-4454-9213-47ea0f43971a\") " pod="openstack/dnsmasq-dns-b8fbc5445-98kts" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.472024 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8982637f-7dc1-4454-9213-47ea0f43971a-config\") pod \"dnsmasq-dns-b8fbc5445-98kts\" (UID: \"8982637f-7dc1-4454-9213-47ea0f43971a\") " pod="openstack/dnsmasq-dns-b8fbc5445-98kts" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.472835 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8982637f-7dc1-4454-9213-47ea0f43971a-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-98kts\" (UID: \"8982637f-7dc1-4454-9213-47ea0f43971a\") " pod="openstack/dnsmasq-dns-b8fbc5445-98kts" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.472966 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8982637f-7dc1-4454-9213-47ea0f43971a-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-98kts\" (UID: \"8982637f-7dc1-4454-9213-47ea0f43971a\") " pod="openstack/dnsmasq-dns-b8fbc5445-98kts" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.494334 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zrl2\" (UniqueName: \"kubernetes.io/projected/8982637f-7dc1-4454-9213-47ea0f43971a-kube-api-access-4zrl2\") pod \"dnsmasq-dns-b8fbc5445-98kts\" (UID: \"8982637f-7dc1-4454-9213-47ea0f43971a\") " pod="openstack/dnsmasq-dns-b8fbc5445-98kts" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.561570 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25891e09-f495-4de3-b5d2-496188bf0981" path="/var/lib/kubelet/pods/25891e09-f495-4de3-b5d2-496188bf0981/volumes" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.580239 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-98kts" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.607757 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-pc2bl" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.608559 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-pc2bl" event={"ID":"99bf0fd0-57d8-459a-a6b1-f538e878af9b","Type":"ContainerDied","Data":"48476f6a79a2bcbf275051d08312278b537ac375b8765449d910029cb3b4f4bf"} Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.608601 5050 scope.go:117] "RemoveContainer" containerID="26e8ee294644a48a3465452748ebe02568ba72a2301e6c918d4ed998eef51523" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.630000 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"85ca597a-bf71-43bb-b57c-f840b37f196f","Type":"ContainerStarted","Data":"838adec05740ac7159beefeb2323ec2c83ab99b474e2e4e94e47e803accce920"} Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.661586 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=-9223371992.19321 podStartE2EDuration="44.661566151s" podCreationTimestamp="2025-11-23 14:58:39 +0000 UTC" firstStartedPulling="2025-11-23 14:58:41.448471753 +0000 UTC m=+1016.615468238" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:59:23.656686637 +0000 UTC m=+1058.823683132" watchObservedRunningTime="2025-11-23 14:59:23.661566151 +0000 UTC m=+1058.828562636" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.693011 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-n7gcr" event={"ID":"f115d163-3bbf-44ac-8c45-6f8753667208","Type":"ContainerStarted","Data":"adff62585b8a79ee0446188a12ee8521f072ef528a14ce65e4dc3e9431fe030a"} Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.694498 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8554648995-n7gcr" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.709991 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-zhbvv" event={"ID":"bd77ef0c-49e7-44ea-bf7f-383a60ca3012","Type":"ContainerStarted","Data":"4d1b6c6c61b575c6c0aa6947db68d1bcd9ab01330419cb2406597cf12bc7e6b6"} Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.711497 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5bf47b49b7-zhbvv" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.727587 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-pc2bl"] Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.733719 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-pc2bl"] Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.737236 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8554648995-n7gcr" podStartSLOduration=2.737209242 podStartE2EDuration="2.737209242s" podCreationTimestamp="2025-11-23 14:59:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:59:23.721626191 +0000 UTC m=+1058.888622676" watchObservedRunningTime="2025-11-23 14:59:23.737209242 +0000 UTC m=+1058.904205727" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.746695 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5bf47b49b7-zhbvv" podStartSLOduration=3.746677644 podStartE2EDuration="3.746677644s" podCreationTimestamp="2025-11-23 14:59:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:59:23.743825375 +0000 UTC m=+1058.910821860" watchObservedRunningTime="2025-11-23 14:59:23.746677644 +0000 UTC m=+1058.913674129" Nov 23 14:59:23 crc kubenswrapper[5050]: I1123 14:59:23.804230 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.206427 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-98kts"] Nov 23 14:59:24 crc kubenswrapper[5050]: W1123 14:59:24.213651 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8982637f_7dc1_4454_9213_47ea0f43971a.slice/crio-727b1a17874f8c0695ddba66aaf3aed331b1b5652c646f784041f11a6e4207af WatchSource:0}: Error finding container 727b1a17874f8c0695ddba66aaf3aed331b1b5652c646f784041f11a6e4207af: Status 404 returned error can't find the container with id 727b1a17874f8c0695ddba66aaf3aed331b1b5652c646f784041f11a6e4207af Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.283419 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.290383 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.294022 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.294040 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.294044 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-67jn2" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.295881 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.305009 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.393435 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"swift-storage-0\" (UID: \"4947b94e-805e-457e-bb17-f7faea3b5fca\") " pod="openstack/swift-storage-0" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.393553 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4947b94e-805e-457e-bb17-f7faea3b5fca-etc-swift\") pod \"swift-storage-0\" (UID: \"4947b94e-805e-457e-bb17-f7faea3b5fca\") " pod="openstack/swift-storage-0" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.393667 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/4947b94e-805e-457e-bb17-f7faea3b5fca-cache\") pod \"swift-storage-0\" (UID: \"4947b94e-805e-457e-bb17-f7faea3b5fca\") " pod="openstack/swift-storage-0" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.393853 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmcsm\" (UniqueName: \"kubernetes.io/projected/4947b94e-805e-457e-bb17-f7faea3b5fca-kube-api-access-nmcsm\") pod \"swift-storage-0\" (UID: \"4947b94e-805e-457e-bb17-f7faea3b5fca\") " pod="openstack/swift-storage-0" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.394139 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/4947b94e-805e-457e-bb17-f7faea3b5fca-lock\") pod \"swift-storage-0\" (UID: \"4947b94e-805e-457e-bb17-f7faea3b5fca\") " pod="openstack/swift-storage-0" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.495743 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/4947b94e-805e-457e-bb17-f7faea3b5fca-lock\") pod \"swift-storage-0\" (UID: \"4947b94e-805e-457e-bb17-f7faea3b5fca\") " pod="openstack/swift-storage-0" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.495861 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"swift-storage-0\" (UID: \"4947b94e-805e-457e-bb17-f7faea3b5fca\") " pod="openstack/swift-storage-0" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.495896 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4947b94e-805e-457e-bb17-f7faea3b5fca-etc-swift\") pod \"swift-storage-0\" (UID: \"4947b94e-805e-457e-bb17-f7faea3b5fca\") " pod="openstack/swift-storage-0" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.495939 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/4947b94e-805e-457e-bb17-f7faea3b5fca-cache\") pod \"swift-storage-0\" (UID: \"4947b94e-805e-457e-bb17-f7faea3b5fca\") " pod="openstack/swift-storage-0" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.495980 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmcsm\" (UniqueName: \"kubernetes.io/projected/4947b94e-805e-457e-bb17-f7faea3b5fca-kube-api-access-nmcsm\") pod \"swift-storage-0\" (UID: \"4947b94e-805e-457e-bb17-f7faea3b5fca\") " pod="openstack/swift-storage-0" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.496497 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/4947b94e-805e-457e-bb17-f7faea3b5fca-lock\") pod \"swift-storage-0\" (UID: \"4947b94e-805e-457e-bb17-f7faea3b5fca\") " pod="openstack/swift-storage-0" Nov 23 14:59:24 crc kubenswrapper[5050]: E1123 14:59:24.496660 5050 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 23 14:59:24 crc kubenswrapper[5050]: E1123 14:59:24.496767 5050 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.496718 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"swift-storage-0\" (UID: \"4947b94e-805e-457e-bb17-f7faea3b5fca\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/swift-storage-0" Nov 23 14:59:24 crc kubenswrapper[5050]: E1123 14:59:24.496963 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4947b94e-805e-457e-bb17-f7faea3b5fca-etc-swift podName:4947b94e-805e-457e-bb17-f7faea3b5fca nodeName:}" failed. No retries permitted until 2025-11-23 14:59:24.996937339 +0000 UTC m=+1060.163933824 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/4947b94e-805e-457e-bb17-f7faea3b5fca-etc-swift") pod "swift-storage-0" (UID: "4947b94e-805e-457e-bb17-f7faea3b5fca") : configmap "swift-ring-files" not found Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.497151 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/4947b94e-805e-457e-bb17-f7faea3b5fca-cache\") pod \"swift-storage-0\" (UID: \"4947b94e-805e-457e-bb17-f7faea3b5fca\") " pod="openstack/swift-storage-0" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.519583 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"swift-storage-0\" (UID: \"4947b94e-805e-457e-bb17-f7faea3b5fca\") " pod="openstack/swift-storage-0" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.520230 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmcsm\" (UniqueName: \"kubernetes.io/projected/4947b94e-805e-457e-bb17-f7faea3b5fca-kube-api-access-nmcsm\") pod \"swift-storage-0\" (UID: \"4947b94e-805e-457e-bb17-f7faea3b5fca\") " pod="openstack/swift-storage-0" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.717570 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-98kts" event={"ID":"8982637f-7dc1-4454-9213-47ea0f43971a","Type":"ContainerStarted","Data":"727b1a17874f8c0695ddba66aaf3aed331b1b5652c646f784041f11a6e4207af"} Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.719240 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"27e26dea-41e9-4d40-9adc-f902e5f4791f","Type":"ContainerStarted","Data":"c792fa7250904d4d39bdbb042f089fe70ac0de73b9dd4b95958a61bca56e3a3c"} Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.719634 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5bf47b49b7-zhbvv" podUID="bd77ef0c-49e7-44ea-bf7f-383a60ca3012" containerName="dnsmasq-dns" containerID="cri-o://4d1b6c6c61b575c6c0aa6947db68d1bcd9ab01330419cb2406597cf12bc7e6b6" gracePeriod=10 Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.782022 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-94h5j"] Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.783235 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-94h5j" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.785703 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.786618 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.789178 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.827144 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-94h5j"] Nov 23 14:59:24 crc kubenswrapper[5050]: E1123 14:59:24.828141 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle dispersionconf etc-swift kube-api-access-jw69k ring-data-devices scripts swiftconf], unattached volumes=[], failed to process volumes=[combined-ca-bundle dispersionconf etc-swift kube-api-access-jw69k ring-data-devices scripts swiftconf]: context canceled" pod="openstack/swift-ring-rebalance-94h5j" podUID="6fc1584d-8a46-46ab-8e59-9b4d6f2eae67" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.842678 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-99xwj"] Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.844785 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-99xwj" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.866547 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-94h5j"] Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.886171 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-99xwj"] Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.902903 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jw69k\" (UniqueName: \"kubernetes.io/projected/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-kube-api-access-jw69k\") pod \"swift-ring-rebalance-94h5j\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " pod="openstack/swift-ring-rebalance-94h5j" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.902962 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-etc-swift\") pod \"swift-ring-rebalance-94h5j\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " pod="openstack/swift-ring-rebalance-94h5j" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.903003 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f4e77f81-6a4b-4c66-8e27-891159e3da45-swiftconf\") pod \"swift-ring-rebalance-99xwj\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " pod="openstack/swift-ring-rebalance-99xwj" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.903033 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-dispersionconf\") pod \"swift-ring-rebalance-94h5j\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " pod="openstack/swift-ring-rebalance-94h5j" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.903120 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f4e77f81-6a4b-4c66-8e27-891159e3da45-etc-swift\") pod \"swift-ring-rebalance-99xwj\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " pod="openstack/swift-ring-rebalance-99xwj" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.903163 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4e77f81-6a4b-4c66-8e27-891159e3da45-combined-ca-bundle\") pod \"swift-ring-rebalance-99xwj\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " pod="openstack/swift-ring-rebalance-99xwj" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.903183 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-combined-ca-bundle\") pod \"swift-ring-rebalance-94h5j\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " pod="openstack/swift-ring-rebalance-94h5j" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.903200 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wf9s\" (UniqueName: \"kubernetes.io/projected/f4e77f81-6a4b-4c66-8e27-891159e3da45-kube-api-access-4wf9s\") pod \"swift-ring-rebalance-99xwj\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " pod="openstack/swift-ring-rebalance-99xwj" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.903284 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f4e77f81-6a4b-4c66-8e27-891159e3da45-scripts\") pod \"swift-ring-rebalance-99xwj\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " pod="openstack/swift-ring-rebalance-99xwj" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.903312 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f4e77f81-6a4b-4c66-8e27-891159e3da45-ring-data-devices\") pod \"swift-ring-rebalance-99xwj\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " pod="openstack/swift-ring-rebalance-99xwj" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.903357 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-scripts\") pod \"swift-ring-rebalance-94h5j\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " pod="openstack/swift-ring-rebalance-94h5j" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.903377 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f4e77f81-6a4b-4c66-8e27-891159e3da45-dispersionconf\") pod \"swift-ring-rebalance-99xwj\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " pod="openstack/swift-ring-rebalance-99xwj" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.903406 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-ring-data-devices\") pod \"swift-ring-rebalance-94h5j\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " pod="openstack/swift-ring-rebalance-94h5j" Nov 23 14:59:24 crc kubenswrapper[5050]: I1123 14:59:24.903580 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-swiftconf\") pod \"swift-ring-rebalance-94h5j\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " pod="openstack/swift-ring-rebalance-94h5j" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.005871 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-swiftconf\") pod \"swift-ring-rebalance-94h5j\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " pod="openstack/swift-ring-rebalance-94h5j" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.005977 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jw69k\" (UniqueName: \"kubernetes.io/projected/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-kube-api-access-jw69k\") pod \"swift-ring-rebalance-94h5j\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " pod="openstack/swift-ring-rebalance-94h5j" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.006021 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-etc-swift\") pod \"swift-ring-rebalance-94h5j\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " pod="openstack/swift-ring-rebalance-94h5j" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.006059 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f4e77f81-6a4b-4c66-8e27-891159e3da45-swiftconf\") pod \"swift-ring-rebalance-99xwj\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " pod="openstack/swift-ring-rebalance-99xwj" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.006097 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-dispersionconf\") pod \"swift-ring-rebalance-94h5j\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " pod="openstack/swift-ring-rebalance-94h5j" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.006130 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f4e77f81-6a4b-4c66-8e27-891159e3da45-etc-swift\") pod \"swift-ring-rebalance-99xwj\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " pod="openstack/swift-ring-rebalance-99xwj" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.006177 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4e77f81-6a4b-4c66-8e27-891159e3da45-combined-ca-bundle\") pod \"swift-ring-rebalance-99xwj\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " pod="openstack/swift-ring-rebalance-99xwj" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.006197 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-combined-ca-bundle\") pod \"swift-ring-rebalance-94h5j\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " pod="openstack/swift-ring-rebalance-94h5j" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.006216 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wf9s\" (UniqueName: \"kubernetes.io/projected/f4e77f81-6a4b-4c66-8e27-891159e3da45-kube-api-access-4wf9s\") pod \"swift-ring-rebalance-99xwj\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " pod="openstack/swift-ring-rebalance-99xwj" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.006302 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f4e77f81-6a4b-4c66-8e27-891159e3da45-scripts\") pod \"swift-ring-rebalance-99xwj\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " pod="openstack/swift-ring-rebalance-99xwj" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.006345 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f4e77f81-6a4b-4c66-8e27-891159e3da45-ring-data-devices\") pod \"swift-ring-rebalance-99xwj\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " pod="openstack/swift-ring-rebalance-99xwj" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.006404 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-scripts\") pod \"swift-ring-rebalance-94h5j\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " pod="openstack/swift-ring-rebalance-94h5j" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.006425 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f4e77f81-6a4b-4c66-8e27-891159e3da45-dispersionconf\") pod \"swift-ring-rebalance-99xwj\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " pod="openstack/swift-ring-rebalance-99xwj" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.006469 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-ring-data-devices\") pod \"swift-ring-rebalance-94h5j\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " pod="openstack/swift-ring-rebalance-94h5j" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.006504 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4947b94e-805e-457e-bb17-f7faea3b5fca-etc-swift\") pod \"swift-storage-0\" (UID: \"4947b94e-805e-457e-bb17-f7faea3b5fca\") " pod="openstack/swift-storage-0" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.006927 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-etc-swift\") pod \"swift-ring-rebalance-94h5j\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " pod="openstack/swift-ring-rebalance-94h5j" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.007139 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f4e77f81-6a4b-4c66-8e27-891159e3da45-etc-swift\") pod \"swift-ring-rebalance-99xwj\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " pod="openstack/swift-ring-rebalance-99xwj" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.007297 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-ring-data-devices\") pod \"swift-ring-rebalance-94h5j\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " pod="openstack/swift-ring-rebalance-94h5j" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.007372 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f4e77f81-6a4b-4c66-8e27-891159e3da45-ring-data-devices\") pod \"swift-ring-rebalance-99xwj\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " pod="openstack/swift-ring-rebalance-99xwj" Nov 23 14:59:25 crc kubenswrapper[5050]: E1123 14:59:25.007879 5050 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 23 14:59:25 crc kubenswrapper[5050]: E1123 14:59:25.007901 5050 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 23 14:59:25 crc kubenswrapper[5050]: E1123 14:59:25.007975 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4947b94e-805e-457e-bb17-f7faea3b5fca-etc-swift podName:4947b94e-805e-457e-bb17-f7faea3b5fca nodeName:}" failed. No retries permitted until 2025-11-23 14:59:26.007958392 +0000 UTC m=+1061.174954877 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/4947b94e-805e-457e-bb17-f7faea3b5fca-etc-swift") pod "swift-storage-0" (UID: "4947b94e-805e-457e-bb17-f7faea3b5fca") : configmap "swift-ring-files" not found Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.008300 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f4e77f81-6a4b-4c66-8e27-891159e3da45-scripts\") pod \"swift-ring-rebalance-99xwj\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " pod="openstack/swift-ring-rebalance-99xwj" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.008744 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-scripts\") pod \"swift-ring-rebalance-94h5j\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " pod="openstack/swift-ring-rebalance-94h5j" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.011059 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4e77f81-6a4b-4c66-8e27-891159e3da45-combined-ca-bundle\") pod \"swift-ring-rebalance-99xwj\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " pod="openstack/swift-ring-rebalance-99xwj" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.011910 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-dispersionconf\") pod \"swift-ring-rebalance-94h5j\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " pod="openstack/swift-ring-rebalance-94h5j" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.013187 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f4e77f81-6a4b-4c66-8e27-891159e3da45-dispersionconf\") pod \"swift-ring-rebalance-99xwj\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " pod="openstack/swift-ring-rebalance-99xwj" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.020626 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f4e77f81-6a4b-4c66-8e27-891159e3da45-swiftconf\") pod \"swift-ring-rebalance-99xwj\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " pod="openstack/swift-ring-rebalance-99xwj" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.020626 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-swiftconf\") pod \"swift-ring-rebalance-94h5j\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " pod="openstack/swift-ring-rebalance-94h5j" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.023616 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-combined-ca-bundle\") pod \"swift-ring-rebalance-94h5j\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " pod="openstack/swift-ring-rebalance-94h5j" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.023822 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jw69k\" (UniqueName: \"kubernetes.io/projected/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-kube-api-access-jw69k\") pod \"swift-ring-rebalance-94h5j\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " pod="openstack/swift-ring-rebalance-94h5j" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.024030 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wf9s\" (UniqueName: \"kubernetes.io/projected/f4e77f81-6a4b-4c66-8e27-891159e3da45-kube-api-access-4wf9s\") pod \"swift-ring-rebalance-99xwj\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " pod="openstack/swift-ring-rebalance-99xwj" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.168520 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-99xwj" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.563191 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="99bf0fd0-57d8-459a-a6b1-f538e878af9b" path="/var/lib/kubelet/pods/99bf0fd0-57d8-459a-a6b1-f538e878af9b/volumes" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.672134 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-99xwj"] Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.767088 5050 generic.go:334] "Generic (PLEG): container finished" podID="8982637f-7dc1-4454-9213-47ea0f43971a" containerID="faedd9482b7ef82e8458f6c7533ea9e2546fbb705468e4e4c265eab9f4f12302" exitCode=0 Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.767520 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-98kts" event={"ID":"8982637f-7dc1-4454-9213-47ea0f43971a","Type":"ContainerDied","Data":"faedd9482b7ef82e8458f6c7533ea9e2546fbb705468e4e4c265eab9f4f12302"} Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.804719 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-99xwj" event={"ID":"f4e77f81-6a4b-4c66-8e27-891159e3da45","Type":"ContainerStarted","Data":"f65487eb6925dad9b190466f20f35976f26db9e3c815b7e59746b34010f1e092"} Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.840773 5050 generic.go:334] "Generic (PLEG): container finished" podID="bd77ef0c-49e7-44ea-bf7f-383a60ca3012" containerID="4d1b6c6c61b575c6c0aa6947db68d1bcd9ab01330419cb2406597cf12bc7e6b6" exitCode=0 Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.840890 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-94h5j" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.840877 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-zhbvv" event={"ID":"bd77ef0c-49e7-44ea-bf7f-383a60ca3012","Type":"ContainerDied","Data":"4d1b6c6c61b575c6c0aa6947db68d1bcd9ab01330419cb2406597cf12bc7e6b6"} Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.840955 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-zhbvv" event={"ID":"bd77ef0c-49e7-44ea-bf7f-383a60ca3012","Type":"ContainerDied","Data":"f60bc5764a710ce746b5e13dec68bab5dfcb23037386a97d08383c83f1129b4e"} Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.840970 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f60bc5764a710ce746b5e13dec68bab5dfcb23037386a97d08383c83f1129b4e" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.850657 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-zhbvv" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.859230 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-94h5j" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.932937 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-etc-swift\") pod \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.933119 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd77ef0c-49e7-44ea-bf7f-383a60ca3012-config\") pod \"bd77ef0c-49e7-44ea-bf7f-383a60ca3012\" (UID: \"bd77ef0c-49e7-44ea-bf7f-383a60ca3012\") " Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.933158 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd77ef0c-49e7-44ea-bf7f-383a60ca3012-dns-svc\") pod \"bd77ef0c-49e7-44ea-bf7f-383a60ca3012\" (UID: \"bd77ef0c-49e7-44ea-bf7f-383a60ca3012\") " Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.933243 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-swiftconf\") pod \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.933288 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd77ef0c-49e7-44ea-bf7f-383a60ca3012-ovsdbserver-nb\") pod \"bd77ef0c-49e7-44ea-bf7f-383a60ca3012\" (UID: \"bd77ef0c-49e7-44ea-bf7f-383a60ca3012\") " Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.933350 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jw69k\" (UniqueName: \"kubernetes.io/projected/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-kube-api-access-jw69k\") pod \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.933415 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-ring-data-devices\") pod \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.933529 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-combined-ca-bundle\") pod \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.933595 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-scripts\") pod \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.934246 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-dispersionconf\") pod \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\" (UID: \"6fc1584d-8a46-46ab-8e59-9b4d6f2eae67\") " Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.934291 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "6fc1584d-8a46-46ab-8e59-9b4d6f2eae67" (UID: "6fc1584d-8a46-46ab-8e59-9b4d6f2eae67"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.934320 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b4f6m\" (UniqueName: \"kubernetes.io/projected/bd77ef0c-49e7-44ea-bf7f-383a60ca3012-kube-api-access-b4f6m\") pod \"bd77ef0c-49e7-44ea-bf7f-383a60ca3012\" (UID: \"bd77ef0c-49e7-44ea-bf7f-383a60ca3012\") " Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.934598 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "6fc1584d-8a46-46ab-8e59-9b4d6f2eae67" (UID: "6fc1584d-8a46-46ab-8e59-9b4d6f2eae67"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.934849 5050 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.934866 5050 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.934891 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-scripts" (OuterVolumeSpecName: "scripts") pod "6fc1584d-8a46-46ab-8e59-9b4d6f2eae67" (UID: "6fc1584d-8a46-46ab-8e59-9b4d6f2eae67"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.938999 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-kube-api-access-jw69k" (OuterVolumeSpecName: "kube-api-access-jw69k") pod "6fc1584d-8a46-46ab-8e59-9b4d6f2eae67" (UID: "6fc1584d-8a46-46ab-8e59-9b4d6f2eae67"). InnerVolumeSpecName "kube-api-access-jw69k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.941206 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "6fc1584d-8a46-46ab-8e59-9b4d6f2eae67" (UID: "6fc1584d-8a46-46ab-8e59-9b4d6f2eae67"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.941695 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6fc1584d-8a46-46ab-8e59-9b4d6f2eae67" (UID: "6fc1584d-8a46-46ab-8e59-9b4d6f2eae67"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.944728 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd77ef0c-49e7-44ea-bf7f-383a60ca3012-kube-api-access-b4f6m" (OuterVolumeSpecName: "kube-api-access-b4f6m") pod "bd77ef0c-49e7-44ea-bf7f-383a60ca3012" (UID: "bd77ef0c-49e7-44ea-bf7f-383a60ca3012"). InnerVolumeSpecName "kube-api-access-b4f6m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:59:25 crc kubenswrapper[5050]: I1123 14:59:25.946629 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "6fc1584d-8a46-46ab-8e59-9b4d6f2eae67" (UID: "6fc1584d-8a46-46ab-8e59-9b4d6f2eae67"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:59:26 crc kubenswrapper[5050]: I1123 14:59:26.006196 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd77ef0c-49e7-44ea-bf7f-383a60ca3012-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bd77ef0c-49e7-44ea-bf7f-383a60ca3012" (UID: "bd77ef0c-49e7-44ea-bf7f-383a60ca3012"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:59:26 crc kubenswrapper[5050]: I1123 14:59:26.022941 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd77ef0c-49e7-44ea-bf7f-383a60ca3012-config" (OuterVolumeSpecName: "config") pod "bd77ef0c-49e7-44ea-bf7f-383a60ca3012" (UID: "bd77ef0c-49e7-44ea-bf7f-383a60ca3012"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:59:26 crc kubenswrapper[5050]: I1123 14:59:26.037302 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4947b94e-805e-457e-bb17-f7faea3b5fca-etc-swift\") pod \"swift-storage-0\" (UID: \"4947b94e-805e-457e-bb17-f7faea3b5fca\") " pod="openstack/swift-storage-0" Nov 23 14:59:26 crc kubenswrapper[5050]: I1123 14:59:26.037545 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jw69k\" (UniqueName: \"kubernetes.io/projected/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-kube-api-access-jw69k\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:26 crc kubenswrapper[5050]: I1123 14:59:26.037561 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:26 crc kubenswrapper[5050]: I1123 14:59:26.037588 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:26 crc kubenswrapper[5050]: I1123 14:59:26.037602 5050 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:26 crc kubenswrapper[5050]: I1123 14:59:26.037614 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b4f6m\" (UniqueName: \"kubernetes.io/projected/bd77ef0c-49e7-44ea-bf7f-383a60ca3012-kube-api-access-b4f6m\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:26 crc kubenswrapper[5050]: I1123 14:59:26.037622 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd77ef0c-49e7-44ea-bf7f-383a60ca3012-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:26 crc kubenswrapper[5050]: I1123 14:59:26.037631 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd77ef0c-49e7-44ea-bf7f-383a60ca3012-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:26 crc kubenswrapper[5050]: I1123 14:59:26.037639 5050 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:26 crc kubenswrapper[5050]: E1123 14:59:26.037768 5050 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 23 14:59:26 crc kubenswrapper[5050]: E1123 14:59:26.037783 5050 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 23 14:59:26 crc kubenswrapper[5050]: E1123 14:59:26.037831 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4947b94e-805e-457e-bb17-f7faea3b5fca-etc-swift podName:4947b94e-805e-457e-bb17-f7faea3b5fca nodeName:}" failed. No retries permitted until 2025-11-23 14:59:28.037816135 +0000 UTC m=+1063.204812620 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/4947b94e-805e-457e-bb17-f7faea3b5fca-etc-swift") pod "swift-storage-0" (UID: "4947b94e-805e-457e-bb17-f7faea3b5fca") : configmap "swift-ring-files" not found Nov 23 14:59:26 crc kubenswrapper[5050]: I1123 14:59:26.050887 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd77ef0c-49e7-44ea-bf7f-383a60ca3012-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "bd77ef0c-49e7-44ea-bf7f-383a60ca3012" (UID: "bd77ef0c-49e7-44ea-bf7f-383a60ca3012"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:59:26 crc kubenswrapper[5050]: I1123 14:59:26.139635 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd77ef0c-49e7-44ea-bf7f-383a60ca3012-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:26 crc kubenswrapper[5050]: I1123 14:59:26.855817 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-98kts" event={"ID":"8982637f-7dc1-4454-9213-47ea0f43971a","Type":"ContainerStarted","Data":"2d7a198938bb1d3a0d558342939a2598c548f53de9bad190abfab99ec80ae0a2"} Nov 23 14:59:26 crc kubenswrapper[5050]: I1123 14:59:26.856342 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-98kts" Nov 23 14:59:26 crc kubenswrapper[5050]: I1123 14:59:26.857734 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"27e26dea-41e9-4d40-9adc-f902e5f4791f","Type":"ContainerStarted","Data":"8b448c0c28f62c7ec1d3e7f3dd39d921c151ee6325f2379cf0312b266ebedf37"} Nov 23 14:59:26 crc kubenswrapper[5050]: I1123 14:59:26.857782 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"27e26dea-41e9-4d40-9adc-f902e5f4791f","Type":"ContainerStarted","Data":"deafd7234d6f0ca5e0fe921dff3e5ef022a4b569f1d6016eec6fa8e609b595e2"} Nov 23 14:59:26 crc kubenswrapper[5050]: I1123 14:59:26.857795 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-zhbvv" Nov 23 14:59:26 crc kubenswrapper[5050]: I1123 14:59:26.857940 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-94h5j" Nov 23 14:59:26 crc kubenswrapper[5050]: I1123 14:59:26.858214 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 23 14:59:26 crc kubenswrapper[5050]: I1123 14:59:26.881596 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b8fbc5445-98kts" podStartSLOduration=3.881573853 podStartE2EDuration="3.881573853s" podCreationTimestamp="2025-11-23 14:59:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:59:26.878464618 +0000 UTC m=+1062.045461103" watchObservedRunningTime="2025-11-23 14:59:26.881573853 +0000 UTC m=+1062.048570338" Nov 23 14:59:26 crc kubenswrapper[5050]: I1123 14:59:26.915062 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.879321456 podStartE2EDuration="4.915027268s" podCreationTimestamp="2025-11-23 14:59:22 +0000 UTC" firstStartedPulling="2025-11-23 14:59:23.813998384 +0000 UTC m=+1058.980994869" lastFinishedPulling="2025-11-23 14:59:25.849704196 +0000 UTC m=+1061.016700681" observedRunningTime="2025-11-23 14:59:26.901870884 +0000 UTC m=+1062.068867369" watchObservedRunningTime="2025-11-23 14:59:26.915027268 +0000 UTC m=+1062.082023753" Nov 23 14:59:26 crc kubenswrapper[5050]: I1123 14:59:26.949211 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-94h5j"] Nov 23 14:59:26 crc kubenswrapper[5050]: I1123 14:59:26.954778 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-94h5j"] Nov 23 14:59:26 crc kubenswrapper[5050]: I1123 14:59:26.960314 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-zhbvv"] Nov 23 14:59:26 crc kubenswrapper[5050]: I1123 14:59:26.975368 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-zhbvv"] Nov 23 14:59:27 crc kubenswrapper[5050]: I1123 14:59:27.567591 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6fc1584d-8a46-46ab-8e59-9b4d6f2eae67" path="/var/lib/kubelet/pods/6fc1584d-8a46-46ab-8e59-9b4d6f2eae67/volumes" Nov 23 14:59:27 crc kubenswrapper[5050]: I1123 14:59:27.568105 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd77ef0c-49e7-44ea-bf7f-383a60ca3012" path="/var/lib/kubelet/pods/bd77ef0c-49e7-44ea-bf7f-383a60ca3012/volumes" Nov 23 14:59:28 crc kubenswrapper[5050]: I1123 14:59:28.102203 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4947b94e-805e-457e-bb17-f7faea3b5fca-etc-swift\") pod \"swift-storage-0\" (UID: \"4947b94e-805e-457e-bb17-f7faea3b5fca\") " pod="openstack/swift-storage-0" Nov 23 14:59:28 crc kubenswrapper[5050]: E1123 14:59:28.102415 5050 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 23 14:59:28 crc kubenswrapper[5050]: E1123 14:59:28.102441 5050 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 23 14:59:28 crc kubenswrapper[5050]: E1123 14:59:28.102550 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4947b94e-805e-457e-bb17-f7faea3b5fca-etc-swift podName:4947b94e-805e-457e-bb17-f7faea3b5fca nodeName:}" failed. No retries permitted until 2025-11-23 14:59:32.102524937 +0000 UTC m=+1067.269521422 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/4947b94e-805e-457e-bb17-f7faea3b5fca-etc-swift") pod "swift-storage-0" (UID: "4947b94e-805e-457e-bb17-f7faea3b5fca") : configmap "swift-ring-files" not found Nov 23 14:59:29 crc kubenswrapper[5050]: I1123 14:59:29.224365 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 14:59:29 crc kubenswrapper[5050]: I1123 14:59:29.224955 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 14:59:29 crc kubenswrapper[5050]: I1123 14:59:29.476834 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 23 14:59:29 crc kubenswrapper[5050]: I1123 14:59:29.478909 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 23 14:59:29 crc kubenswrapper[5050]: I1123 14:59:29.609136 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 23 14:59:29 crc kubenswrapper[5050]: I1123 14:59:29.889016 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-99xwj" event={"ID":"f4e77f81-6a4b-4c66-8e27-891159e3da45","Type":"ContainerStarted","Data":"e3acb2767a7451b13639ee8d1428b93e74fd54c5a63166ef91e7d202125e96e6"} Nov 23 14:59:29 crc kubenswrapper[5050]: I1123 14:59:29.922690 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-99xwj" podStartSLOduration=2.566816625 podStartE2EDuration="5.922667551s" podCreationTimestamp="2025-11-23 14:59:24 +0000 UTC" firstStartedPulling="2025-11-23 14:59:25.717713488 +0000 UTC m=+1060.884709973" lastFinishedPulling="2025-11-23 14:59:29.073564414 +0000 UTC m=+1064.240560899" observedRunningTime="2025-11-23 14:59:29.915003039 +0000 UTC m=+1065.081999554" watchObservedRunningTime="2025-11-23 14:59:29.922667551 +0000 UTC m=+1065.089664046" Nov 23 14:59:30 crc kubenswrapper[5050]: I1123 14:59:30.027015 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 23 14:59:30 crc kubenswrapper[5050]: I1123 14:59:30.789745 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 23 14:59:30 crc kubenswrapper[5050]: I1123 14:59:30.789834 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 23 14:59:30 crc kubenswrapper[5050]: I1123 14:59:30.926026 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.042114 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-d6xrh"] Nov 23 14:59:31 crc kubenswrapper[5050]: E1123 14:59:31.042636 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd77ef0c-49e7-44ea-bf7f-383a60ca3012" containerName="dnsmasq-dns" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.042658 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd77ef0c-49e7-44ea-bf7f-383a60ca3012" containerName="dnsmasq-dns" Nov 23 14:59:31 crc kubenswrapper[5050]: E1123 14:59:31.042681 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd77ef0c-49e7-44ea-bf7f-383a60ca3012" containerName="init" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.042690 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd77ef0c-49e7-44ea-bf7f-383a60ca3012" containerName="init" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.042905 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd77ef0c-49e7-44ea-bf7f-383a60ca3012" containerName="dnsmasq-dns" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.045910 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-d6xrh" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.058497 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-d6xrh"] Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.073696 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/241ba8d0-d451-4df3-888a-ff7df9129456-operator-scripts\") pod \"keystone-db-create-d6xrh\" (UID: \"241ba8d0-d451-4df3-888a-ff7df9129456\") " pod="openstack/keystone-db-create-d6xrh" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.073814 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5ztp\" (UniqueName: \"kubernetes.io/projected/241ba8d0-d451-4df3-888a-ff7df9129456-kube-api-access-m5ztp\") pod \"keystone-db-create-d6xrh\" (UID: \"241ba8d0-d451-4df3-888a-ff7df9129456\") " pod="openstack/keystone-db-create-d6xrh" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.106337 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.175584 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/241ba8d0-d451-4df3-888a-ff7df9129456-operator-scripts\") pod \"keystone-db-create-d6xrh\" (UID: \"241ba8d0-d451-4df3-888a-ff7df9129456\") " pod="openstack/keystone-db-create-d6xrh" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.175706 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5ztp\" (UniqueName: \"kubernetes.io/projected/241ba8d0-d451-4df3-888a-ff7df9129456-kube-api-access-m5ztp\") pod \"keystone-db-create-d6xrh\" (UID: \"241ba8d0-d451-4df3-888a-ff7df9129456\") " pod="openstack/keystone-db-create-d6xrh" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.177207 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/241ba8d0-d451-4df3-888a-ff7df9129456-operator-scripts\") pod \"keystone-db-create-d6xrh\" (UID: \"241ba8d0-d451-4df3-888a-ff7df9129456\") " pod="openstack/keystone-db-create-d6xrh" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.218507 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5ztp\" (UniqueName: \"kubernetes.io/projected/241ba8d0-d451-4df3-888a-ff7df9129456-kube-api-access-m5ztp\") pod \"keystone-db-create-d6xrh\" (UID: \"241ba8d0-d451-4df3-888a-ff7df9129456\") " pod="openstack/keystone-db-create-d6xrh" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.251510 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-c056-account-create-tc55d"] Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.253437 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-c056-account-create-tc55d" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.258218 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-c056-account-create-tc55d"] Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.259049 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.278001 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d52e824b-4a80-4af9-961a-42652860b157-operator-scripts\") pod \"keystone-c056-account-create-tc55d\" (UID: \"d52e824b-4a80-4af9-961a-42652860b157\") " pod="openstack/keystone-c056-account-create-tc55d" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.278485 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qrdd\" (UniqueName: \"kubernetes.io/projected/d52e824b-4a80-4af9-961a-42652860b157-kube-api-access-5qrdd\") pod \"keystone-c056-account-create-tc55d\" (UID: \"d52e824b-4a80-4af9-961a-42652860b157\") " pod="openstack/keystone-c056-account-create-tc55d" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.380161 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qrdd\" (UniqueName: \"kubernetes.io/projected/d52e824b-4a80-4af9-961a-42652860b157-kube-api-access-5qrdd\") pod \"keystone-c056-account-create-tc55d\" (UID: \"d52e824b-4a80-4af9-961a-42652860b157\") " pod="openstack/keystone-c056-account-create-tc55d" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.380228 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d52e824b-4a80-4af9-961a-42652860b157-operator-scripts\") pod \"keystone-c056-account-create-tc55d\" (UID: \"d52e824b-4a80-4af9-961a-42652860b157\") " pod="openstack/keystone-c056-account-create-tc55d" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.380970 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d52e824b-4a80-4af9-961a-42652860b157-operator-scripts\") pod \"keystone-c056-account-create-tc55d\" (UID: \"d52e824b-4a80-4af9-961a-42652860b157\") " pod="openstack/keystone-c056-account-create-tc55d" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.413027 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-d6xrh" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.417965 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qrdd\" (UniqueName: \"kubernetes.io/projected/d52e824b-4a80-4af9-961a-42652860b157-kube-api-access-5qrdd\") pod \"keystone-c056-account-create-tc55d\" (UID: \"d52e824b-4a80-4af9-961a-42652860b157\") " pod="openstack/keystone-c056-account-create-tc55d" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.561554 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-875vf"] Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.562864 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-875vf" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.563651 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-875vf"] Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.573743 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-c056-account-create-tc55d" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.651864 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-ktwq8"] Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.654425 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-ktwq8" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.661217 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-ktwq8"] Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.693067 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbnpc\" (UniqueName: \"kubernetes.io/projected/27cbd9dd-3372-49ca-9321-421a05e76922-kube-api-access-sbnpc\") pod \"placement-db-create-875vf\" (UID: \"27cbd9dd-3372-49ca-9321-421a05e76922\") " pod="openstack/placement-db-create-875vf" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.693149 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27cbd9dd-3372-49ca-9321-421a05e76922-operator-scripts\") pod \"placement-db-create-875vf\" (UID: \"27cbd9dd-3372-49ca-9321-421a05e76922\") " pod="openstack/placement-db-create-875vf" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.699616 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8554648995-n7gcr" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.767345 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-bb96-account-create-mhdc2"] Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.782724 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-bb96-account-create-mhdc2"] Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.785054 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-bb96-account-create-mhdc2" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.789961 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.796433 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbnpc\" (UniqueName: \"kubernetes.io/projected/27cbd9dd-3372-49ca-9321-421a05e76922-kube-api-access-sbnpc\") pod \"placement-db-create-875vf\" (UID: \"27cbd9dd-3372-49ca-9321-421a05e76922\") " pod="openstack/placement-db-create-875vf" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.796535 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27cbd9dd-3372-49ca-9321-421a05e76922-operator-scripts\") pod \"placement-db-create-875vf\" (UID: \"27cbd9dd-3372-49ca-9321-421a05e76922\") " pod="openstack/placement-db-create-875vf" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.796596 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzb9l\" (UniqueName: \"kubernetes.io/projected/e6916e1b-5874-4095-8306-0af203ba7f7d-kube-api-access-lzb9l\") pod \"glance-db-create-ktwq8\" (UID: \"e6916e1b-5874-4095-8306-0af203ba7f7d\") " pod="openstack/glance-db-create-ktwq8" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.796704 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e6916e1b-5874-4095-8306-0af203ba7f7d-operator-scripts\") pod \"glance-db-create-ktwq8\" (UID: \"e6916e1b-5874-4095-8306-0af203ba7f7d\") " pod="openstack/glance-db-create-ktwq8" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.797737 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27cbd9dd-3372-49ca-9321-421a05e76922-operator-scripts\") pod \"placement-db-create-875vf\" (UID: \"27cbd9dd-3372-49ca-9321-421a05e76922\") " pod="openstack/placement-db-create-875vf" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.818068 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbnpc\" (UniqueName: \"kubernetes.io/projected/27cbd9dd-3372-49ca-9321-421a05e76922-kube-api-access-sbnpc\") pod \"placement-db-create-875vf\" (UID: \"27cbd9dd-3372-49ca-9321-421a05e76922\") " pod="openstack/placement-db-create-875vf" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.860113 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-734b-account-create-8vfn6"] Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.861200 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-734b-account-create-8vfn6" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.867122 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.878463 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-734b-account-create-8vfn6"] Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.893581 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-875vf" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.898394 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e6916e1b-5874-4095-8306-0af203ba7f7d-operator-scripts\") pod \"glance-db-create-ktwq8\" (UID: \"e6916e1b-5874-4095-8306-0af203ba7f7d\") " pod="openstack/glance-db-create-ktwq8" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.898538 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4bf9e433-c2f5-434e-8f06-56813a38e287-operator-scripts\") pod \"glance-bb96-account-create-mhdc2\" (UID: \"4bf9e433-c2f5-434e-8f06-56813a38e287\") " pod="openstack/glance-bb96-account-create-mhdc2" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.898640 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qw9gn\" (UniqueName: \"kubernetes.io/projected/4bf9e433-c2f5-434e-8f06-56813a38e287-kube-api-access-qw9gn\") pod \"glance-bb96-account-create-mhdc2\" (UID: \"4bf9e433-c2f5-434e-8f06-56813a38e287\") " pod="openstack/glance-bb96-account-create-mhdc2" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.898674 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzb9l\" (UniqueName: \"kubernetes.io/projected/e6916e1b-5874-4095-8306-0af203ba7f7d-kube-api-access-lzb9l\") pod \"glance-db-create-ktwq8\" (UID: \"e6916e1b-5874-4095-8306-0af203ba7f7d\") " pod="openstack/glance-db-create-ktwq8" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.899649 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e6916e1b-5874-4095-8306-0af203ba7f7d-operator-scripts\") pod \"glance-db-create-ktwq8\" (UID: \"e6916e1b-5874-4095-8306-0af203ba7f7d\") " pod="openstack/glance-db-create-ktwq8" Nov 23 14:59:31 crc kubenswrapper[5050]: I1123 14:59:31.920783 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzb9l\" (UniqueName: \"kubernetes.io/projected/e6916e1b-5874-4095-8306-0af203ba7f7d-kube-api-access-lzb9l\") pod \"glance-db-create-ktwq8\" (UID: \"e6916e1b-5874-4095-8306-0af203ba7f7d\") " pod="openstack/glance-db-create-ktwq8" Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.004939 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4bf9e433-c2f5-434e-8f06-56813a38e287-operator-scripts\") pod \"glance-bb96-account-create-mhdc2\" (UID: \"4bf9e433-c2f5-434e-8f06-56813a38e287\") " pod="openstack/glance-bb96-account-create-mhdc2" Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.005026 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qw9gn\" (UniqueName: \"kubernetes.io/projected/4bf9e433-c2f5-434e-8f06-56813a38e287-kube-api-access-qw9gn\") pod \"glance-bb96-account-create-mhdc2\" (UID: \"4bf9e433-c2f5-434e-8f06-56813a38e287\") " pod="openstack/glance-bb96-account-create-mhdc2" Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.005109 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cr8dv\" (UniqueName: \"kubernetes.io/projected/dc6bbfa7-abb2-4676-814f-7a65c33b2e61-kube-api-access-cr8dv\") pod \"placement-734b-account-create-8vfn6\" (UID: \"dc6bbfa7-abb2-4676-814f-7a65c33b2e61\") " pod="openstack/placement-734b-account-create-8vfn6" Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.005162 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc6bbfa7-abb2-4676-814f-7a65c33b2e61-operator-scripts\") pod \"placement-734b-account-create-8vfn6\" (UID: \"dc6bbfa7-abb2-4676-814f-7a65c33b2e61\") " pod="openstack/placement-734b-account-create-8vfn6" Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.006548 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4bf9e433-c2f5-434e-8f06-56813a38e287-operator-scripts\") pod \"glance-bb96-account-create-mhdc2\" (UID: \"4bf9e433-c2f5-434e-8f06-56813a38e287\") " pod="openstack/glance-bb96-account-create-mhdc2" Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.023743 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-ktwq8" Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.028017 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qw9gn\" (UniqueName: \"kubernetes.io/projected/4bf9e433-c2f5-434e-8f06-56813a38e287-kube-api-access-qw9gn\") pod \"glance-bb96-account-create-mhdc2\" (UID: \"4bf9e433-c2f5-434e-8f06-56813a38e287\") " pod="openstack/glance-bb96-account-create-mhdc2" Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.110016 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4947b94e-805e-457e-bb17-f7faea3b5fca-etc-swift\") pod \"swift-storage-0\" (UID: \"4947b94e-805e-457e-bb17-f7faea3b5fca\") " pod="openstack/swift-storage-0" Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.110139 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cr8dv\" (UniqueName: \"kubernetes.io/projected/dc6bbfa7-abb2-4676-814f-7a65c33b2e61-kube-api-access-cr8dv\") pod \"placement-734b-account-create-8vfn6\" (UID: \"dc6bbfa7-abb2-4676-814f-7a65c33b2e61\") " pod="openstack/placement-734b-account-create-8vfn6" Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.110281 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc6bbfa7-abb2-4676-814f-7a65c33b2e61-operator-scripts\") pod \"placement-734b-account-create-8vfn6\" (UID: \"dc6bbfa7-abb2-4676-814f-7a65c33b2e61\") " pod="openstack/placement-734b-account-create-8vfn6" Nov 23 14:59:32 crc kubenswrapper[5050]: E1123 14:59:32.113354 5050 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 23 14:59:32 crc kubenswrapper[5050]: E1123 14:59:32.113393 5050 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 23 14:59:32 crc kubenswrapper[5050]: E1123 14:59:32.113478 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4947b94e-805e-457e-bb17-f7faea3b5fca-etc-swift podName:4947b94e-805e-457e-bb17-f7faea3b5fca nodeName:}" failed. No retries permitted until 2025-11-23 14:59:40.113436167 +0000 UTC m=+1075.280432652 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/4947b94e-805e-457e-bb17-f7faea3b5fca-etc-swift") pod "swift-storage-0" (UID: "4947b94e-805e-457e-bb17-f7faea3b5fca") : configmap "swift-ring-files" not found Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.115336 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc6bbfa7-abb2-4676-814f-7a65c33b2e61-operator-scripts\") pod \"placement-734b-account-create-8vfn6\" (UID: \"dc6bbfa7-abb2-4676-814f-7a65c33b2e61\") " pod="openstack/placement-734b-account-create-8vfn6" Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.126819 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-bb96-account-create-mhdc2" Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.130463 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cr8dv\" (UniqueName: \"kubernetes.io/projected/dc6bbfa7-abb2-4676-814f-7a65c33b2e61-kube-api-access-cr8dv\") pod \"placement-734b-account-create-8vfn6\" (UID: \"dc6bbfa7-abb2-4676-814f-7a65c33b2e61\") " pod="openstack/placement-734b-account-create-8vfn6" Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.183280 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-734b-account-create-8vfn6" Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.196779 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-d6xrh"] Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.221192 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-c056-account-create-tc55d"] Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.375411 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-ktwq8"] Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.380347 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-875vf"] Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.705675 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-bb96-account-create-mhdc2"] Nov 23 14:59:32 crc kubenswrapper[5050]: W1123 14:59:32.759611 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4bf9e433_c2f5_434e_8f06_56813a38e287.slice/crio-219124bb90baf3a8a4696911a5e1ae9529e46e1fd7fedf72a030917233c31402 WatchSource:0}: Error finding container 219124bb90baf3a8a4696911a5e1ae9529e46e1fd7fedf72a030917233c31402: Status 404 returned error can't find the container with id 219124bb90baf3a8a4696911a5e1ae9529e46e1fd7fedf72a030917233c31402 Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.809904 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-734b-account-create-8vfn6"] Nov 23 14:59:32 crc kubenswrapper[5050]: W1123 14:59:32.817049 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddc6bbfa7_abb2_4676_814f_7a65c33b2e61.slice/crio-534e5c6de673b565dd597f5b12c18a7f847c670e0c89f9840537b2ee95f3d616 WatchSource:0}: Error finding container 534e5c6de673b565dd597f5b12c18a7f847c670e0c89f9840537b2ee95f3d616: Status 404 returned error can't find the container with id 534e5c6de673b565dd597f5b12c18a7f847c670e0c89f9840537b2ee95f3d616 Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.927460 5050 generic.go:334] "Generic (PLEG): container finished" podID="d52e824b-4a80-4af9-961a-42652860b157" containerID="4406dcd09b9388169d2c1dc5682c54223678311807c5dd014e1a115a16dcbddc" exitCode=0 Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.927545 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-c056-account-create-tc55d" event={"ID":"d52e824b-4a80-4af9-961a-42652860b157","Type":"ContainerDied","Data":"4406dcd09b9388169d2c1dc5682c54223678311807c5dd014e1a115a16dcbddc"} Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.927575 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-c056-account-create-tc55d" event={"ID":"d52e824b-4a80-4af9-961a-42652860b157","Type":"ContainerStarted","Data":"4d8a04e2ebef03b3ce152caf9c25c6fccd68ccbe9c3bf65f1e895dfcdd43ac92"} Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.929784 5050 generic.go:334] "Generic (PLEG): container finished" podID="e6916e1b-5874-4095-8306-0af203ba7f7d" containerID="841882bd34efc6f91dc9d992f938beffc9123274287f5c4036ba44df4cda2b87" exitCode=0 Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.929826 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-ktwq8" event={"ID":"e6916e1b-5874-4095-8306-0af203ba7f7d","Type":"ContainerDied","Data":"841882bd34efc6f91dc9d992f938beffc9123274287f5c4036ba44df4cda2b87"} Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.929842 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-ktwq8" event={"ID":"e6916e1b-5874-4095-8306-0af203ba7f7d","Type":"ContainerStarted","Data":"740fa8dd5579578a92cd81f2fe56ab2d938eb408e6d9dcb8f84cd7573655fdc1"} Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.934428 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-bb96-account-create-mhdc2" event={"ID":"4bf9e433-c2f5-434e-8f06-56813a38e287","Type":"ContainerStarted","Data":"ab68ce1a21506eded71804ea43eab7e0d792aeeb1a795d53cbc5840aa073d4e2"} Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.934477 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-bb96-account-create-mhdc2" event={"ID":"4bf9e433-c2f5-434e-8f06-56813a38e287","Type":"ContainerStarted","Data":"219124bb90baf3a8a4696911a5e1ae9529e46e1fd7fedf72a030917233c31402"} Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.936215 5050 generic.go:334] "Generic (PLEG): container finished" podID="241ba8d0-d451-4df3-888a-ff7df9129456" containerID="17238151ee0c60557cc7e297ae575f8dcc2f0ca8d6df997fe8811099e7fe93ce" exitCode=0 Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.936277 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-d6xrh" event={"ID":"241ba8d0-d451-4df3-888a-ff7df9129456","Type":"ContainerDied","Data":"17238151ee0c60557cc7e297ae575f8dcc2f0ca8d6df997fe8811099e7fe93ce"} Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.936304 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-d6xrh" event={"ID":"241ba8d0-d451-4df3-888a-ff7df9129456","Type":"ContainerStarted","Data":"b2fe88f00d368039d8ae2b34805da5f08acd04b5657e951bef98224f807b9f1a"} Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.937726 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-734b-account-create-8vfn6" event={"ID":"dc6bbfa7-abb2-4676-814f-7a65c33b2e61","Type":"ContainerStarted","Data":"534e5c6de673b565dd597f5b12c18a7f847c670e0c89f9840537b2ee95f3d616"} Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.941432 5050 generic.go:334] "Generic (PLEG): container finished" podID="27cbd9dd-3372-49ca-9321-421a05e76922" containerID="e729bbcb8d7b322a25efb13eeec7403f0b1d628f261b1bbd8be04eea5dc29292" exitCode=0 Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.941496 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-875vf" event={"ID":"27cbd9dd-3372-49ca-9321-421a05e76922","Type":"ContainerDied","Data":"e729bbcb8d7b322a25efb13eeec7403f0b1d628f261b1bbd8be04eea5dc29292"} Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.941522 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-875vf" event={"ID":"27cbd9dd-3372-49ca-9321-421a05e76922","Type":"ContainerStarted","Data":"5cf836bff6a1fe72403e4324430077649e73d060dc0bfa1ccf4dcc015d5b1cc2"} Nov 23 14:59:32 crc kubenswrapper[5050]: I1123 14:59:32.970426 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-bb96-account-create-mhdc2" podStartSLOduration=1.970401502 podStartE2EDuration="1.970401502s" podCreationTimestamp="2025-11-23 14:59:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:59:32.956915079 +0000 UTC m=+1068.123911594" watchObservedRunningTime="2025-11-23 14:59:32.970401502 +0000 UTC m=+1068.137397987" Nov 23 14:59:33 crc kubenswrapper[5050]: I1123 14:59:33.589776 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b8fbc5445-98kts" Nov 23 14:59:33 crc kubenswrapper[5050]: I1123 14:59:33.698882 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-n7gcr"] Nov 23 14:59:33 crc kubenswrapper[5050]: I1123 14:59:33.699753 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8554648995-n7gcr" podUID="f115d163-3bbf-44ac-8c45-6f8753667208" containerName="dnsmasq-dns" containerID="cri-o://adff62585b8a79ee0446188a12ee8521f072ef528a14ce65e4dc3e9431fe030a" gracePeriod=10 Nov 23 14:59:33 crc kubenswrapper[5050]: I1123 14:59:33.952011 5050 generic.go:334] "Generic (PLEG): container finished" podID="dc6bbfa7-abb2-4676-814f-7a65c33b2e61" containerID="b844cbd1a4937573b0a13e755f35bcb710017da1977f2f16470e0f9921d64853" exitCode=0 Nov 23 14:59:33 crc kubenswrapper[5050]: I1123 14:59:33.952074 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-734b-account-create-8vfn6" event={"ID":"dc6bbfa7-abb2-4676-814f-7a65c33b2e61","Type":"ContainerDied","Data":"b844cbd1a4937573b0a13e755f35bcb710017da1977f2f16470e0f9921d64853"} Nov 23 14:59:33 crc kubenswrapper[5050]: I1123 14:59:33.954634 5050 generic.go:334] "Generic (PLEG): container finished" podID="4bf9e433-c2f5-434e-8f06-56813a38e287" containerID="ab68ce1a21506eded71804ea43eab7e0d792aeeb1a795d53cbc5840aa073d4e2" exitCode=0 Nov 23 14:59:33 crc kubenswrapper[5050]: I1123 14:59:33.954749 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-bb96-account-create-mhdc2" event={"ID":"4bf9e433-c2f5-434e-8f06-56813a38e287","Type":"ContainerDied","Data":"ab68ce1a21506eded71804ea43eab7e0d792aeeb1a795d53cbc5840aa073d4e2"} Nov 23 14:59:33 crc kubenswrapper[5050]: I1123 14:59:33.961791 5050 generic.go:334] "Generic (PLEG): container finished" podID="f115d163-3bbf-44ac-8c45-6f8753667208" containerID="adff62585b8a79ee0446188a12ee8521f072ef528a14ce65e4dc3e9431fe030a" exitCode=0 Nov 23 14:59:33 crc kubenswrapper[5050]: I1123 14:59:33.962050 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-n7gcr" event={"ID":"f115d163-3bbf-44ac-8c45-6f8753667208","Type":"ContainerDied","Data":"adff62585b8a79ee0446188a12ee8521f072ef528a14ce65e4dc3e9431fe030a"} Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.219045 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-n7gcr" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.376239 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f115d163-3bbf-44ac-8c45-6f8753667208-config\") pod \"f115d163-3bbf-44ac-8c45-6f8753667208\" (UID: \"f115d163-3bbf-44ac-8c45-6f8753667208\") " Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.376351 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f115d163-3bbf-44ac-8c45-6f8753667208-dns-svc\") pod \"f115d163-3bbf-44ac-8c45-6f8753667208\" (UID: \"f115d163-3bbf-44ac-8c45-6f8753667208\") " Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.376373 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f115d163-3bbf-44ac-8c45-6f8753667208-ovsdbserver-nb\") pod \"f115d163-3bbf-44ac-8c45-6f8753667208\" (UID: \"f115d163-3bbf-44ac-8c45-6f8753667208\") " Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.376534 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-67vlg\" (UniqueName: \"kubernetes.io/projected/f115d163-3bbf-44ac-8c45-6f8753667208-kube-api-access-67vlg\") pod \"f115d163-3bbf-44ac-8c45-6f8753667208\" (UID: \"f115d163-3bbf-44ac-8c45-6f8753667208\") " Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.376670 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f115d163-3bbf-44ac-8c45-6f8753667208-ovsdbserver-sb\") pod \"f115d163-3bbf-44ac-8c45-6f8753667208\" (UID: \"f115d163-3bbf-44ac-8c45-6f8753667208\") " Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.401387 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f115d163-3bbf-44ac-8c45-6f8753667208-kube-api-access-67vlg" (OuterVolumeSpecName: "kube-api-access-67vlg") pod "f115d163-3bbf-44ac-8c45-6f8753667208" (UID: "f115d163-3bbf-44ac-8c45-6f8753667208"). InnerVolumeSpecName "kube-api-access-67vlg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.478524 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-67vlg\" (UniqueName: \"kubernetes.io/projected/f115d163-3bbf-44ac-8c45-6f8753667208-kube-api-access-67vlg\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.491869 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f115d163-3bbf-44ac-8c45-6f8753667208-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f115d163-3bbf-44ac-8c45-6f8753667208" (UID: "f115d163-3bbf-44ac-8c45-6f8753667208"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.510861 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f115d163-3bbf-44ac-8c45-6f8753667208-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f115d163-3bbf-44ac-8c45-6f8753667208" (UID: "f115d163-3bbf-44ac-8c45-6f8753667208"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.527268 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f115d163-3bbf-44ac-8c45-6f8753667208-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f115d163-3bbf-44ac-8c45-6f8753667208" (UID: "f115d163-3bbf-44ac-8c45-6f8753667208"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.540945 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f115d163-3bbf-44ac-8c45-6f8753667208-config" (OuterVolumeSpecName: "config") pod "f115d163-3bbf-44ac-8c45-6f8753667208" (UID: "f115d163-3bbf-44ac-8c45-6f8753667208"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.555092 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-d6xrh" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.562341 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-875vf" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.574257 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-ktwq8" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.580135 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f115d163-3bbf-44ac-8c45-6f8753667208-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.580167 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f115d163-3bbf-44ac-8c45-6f8753667208-config\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.580178 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f115d163-3bbf-44ac-8c45-6f8753667208-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.580186 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f115d163-3bbf-44ac-8c45-6f8753667208-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.586616 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-c056-account-create-tc55d" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.681917 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5qrdd\" (UniqueName: \"kubernetes.io/projected/d52e824b-4a80-4af9-961a-42652860b157-kube-api-access-5qrdd\") pod \"d52e824b-4a80-4af9-961a-42652860b157\" (UID: \"d52e824b-4a80-4af9-961a-42652860b157\") " Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.682046 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d52e824b-4a80-4af9-961a-42652860b157-operator-scripts\") pod \"d52e824b-4a80-4af9-961a-42652860b157\" (UID: \"d52e824b-4a80-4af9-961a-42652860b157\") " Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.682108 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/241ba8d0-d451-4df3-888a-ff7df9129456-operator-scripts\") pod \"241ba8d0-d451-4df3-888a-ff7df9129456\" (UID: \"241ba8d0-d451-4df3-888a-ff7df9129456\") " Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.682199 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sbnpc\" (UniqueName: \"kubernetes.io/projected/27cbd9dd-3372-49ca-9321-421a05e76922-kube-api-access-sbnpc\") pod \"27cbd9dd-3372-49ca-9321-421a05e76922\" (UID: \"27cbd9dd-3372-49ca-9321-421a05e76922\") " Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.682240 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzb9l\" (UniqueName: \"kubernetes.io/projected/e6916e1b-5874-4095-8306-0af203ba7f7d-kube-api-access-lzb9l\") pod \"e6916e1b-5874-4095-8306-0af203ba7f7d\" (UID: \"e6916e1b-5874-4095-8306-0af203ba7f7d\") " Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.682277 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m5ztp\" (UniqueName: \"kubernetes.io/projected/241ba8d0-d451-4df3-888a-ff7df9129456-kube-api-access-m5ztp\") pod \"241ba8d0-d451-4df3-888a-ff7df9129456\" (UID: \"241ba8d0-d451-4df3-888a-ff7df9129456\") " Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.682309 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e6916e1b-5874-4095-8306-0af203ba7f7d-operator-scripts\") pod \"e6916e1b-5874-4095-8306-0af203ba7f7d\" (UID: \"e6916e1b-5874-4095-8306-0af203ba7f7d\") " Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.682341 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27cbd9dd-3372-49ca-9321-421a05e76922-operator-scripts\") pod \"27cbd9dd-3372-49ca-9321-421a05e76922\" (UID: \"27cbd9dd-3372-49ca-9321-421a05e76922\") " Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.682992 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d52e824b-4a80-4af9-961a-42652860b157-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d52e824b-4a80-4af9-961a-42652860b157" (UID: "d52e824b-4a80-4af9-961a-42652860b157"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.683018 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6916e1b-5874-4095-8306-0af203ba7f7d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e6916e1b-5874-4095-8306-0af203ba7f7d" (UID: "e6916e1b-5874-4095-8306-0af203ba7f7d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.683383 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27cbd9dd-3372-49ca-9321-421a05e76922-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "27cbd9dd-3372-49ca-9321-421a05e76922" (UID: "27cbd9dd-3372-49ca-9321-421a05e76922"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.683409 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/241ba8d0-d451-4df3-888a-ff7df9129456-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "241ba8d0-d451-4df3-888a-ff7df9129456" (UID: "241ba8d0-d451-4df3-888a-ff7df9129456"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.685374 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d52e824b-4a80-4af9-961a-42652860b157-kube-api-access-5qrdd" (OuterVolumeSpecName: "kube-api-access-5qrdd") pod "d52e824b-4a80-4af9-961a-42652860b157" (UID: "d52e824b-4a80-4af9-961a-42652860b157"). InnerVolumeSpecName "kube-api-access-5qrdd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.686885 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6916e1b-5874-4095-8306-0af203ba7f7d-kube-api-access-lzb9l" (OuterVolumeSpecName: "kube-api-access-lzb9l") pod "e6916e1b-5874-4095-8306-0af203ba7f7d" (UID: "e6916e1b-5874-4095-8306-0af203ba7f7d"). InnerVolumeSpecName "kube-api-access-lzb9l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.687954 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27cbd9dd-3372-49ca-9321-421a05e76922-kube-api-access-sbnpc" (OuterVolumeSpecName: "kube-api-access-sbnpc") pod "27cbd9dd-3372-49ca-9321-421a05e76922" (UID: "27cbd9dd-3372-49ca-9321-421a05e76922"). InnerVolumeSpecName "kube-api-access-sbnpc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.688045 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/241ba8d0-d451-4df3-888a-ff7df9129456-kube-api-access-m5ztp" (OuterVolumeSpecName: "kube-api-access-m5ztp") pod "241ba8d0-d451-4df3-888a-ff7df9129456" (UID: "241ba8d0-d451-4df3-888a-ff7df9129456"). InnerVolumeSpecName "kube-api-access-m5ztp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.784914 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzb9l\" (UniqueName: \"kubernetes.io/projected/e6916e1b-5874-4095-8306-0af203ba7f7d-kube-api-access-lzb9l\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.784965 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m5ztp\" (UniqueName: \"kubernetes.io/projected/241ba8d0-d451-4df3-888a-ff7df9129456-kube-api-access-m5ztp\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.784981 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e6916e1b-5874-4095-8306-0af203ba7f7d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.784995 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27cbd9dd-3372-49ca-9321-421a05e76922-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.785005 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5qrdd\" (UniqueName: \"kubernetes.io/projected/d52e824b-4a80-4af9-961a-42652860b157-kube-api-access-5qrdd\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.785016 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d52e824b-4a80-4af9-961a-42652860b157-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.785025 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/241ba8d0-d451-4df3-888a-ff7df9129456-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.785036 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sbnpc\" (UniqueName: \"kubernetes.io/projected/27cbd9dd-3372-49ca-9321-421a05e76922-kube-api-access-sbnpc\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.978149 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-n7gcr" event={"ID":"f115d163-3bbf-44ac-8c45-6f8753667208","Type":"ContainerDied","Data":"feb58f9c6f53de96a8f280741d4fecc1371e663929d4e101b7a292f83ae4da71"} Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.978243 5050 scope.go:117] "RemoveContainer" containerID="adff62585b8a79ee0446188a12ee8521f072ef528a14ce65e4dc3e9431fe030a" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.978526 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-n7gcr" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.986169 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-d6xrh" event={"ID":"241ba8d0-d451-4df3-888a-ff7df9129456","Type":"ContainerDied","Data":"b2fe88f00d368039d8ae2b34805da5f08acd04b5657e951bef98224f807b9f1a"} Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.986231 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b2fe88f00d368039d8ae2b34805da5f08acd04b5657e951bef98224f807b9f1a" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.986277 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-d6xrh" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.993853 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-875vf" event={"ID":"27cbd9dd-3372-49ca-9321-421a05e76922","Type":"ContainerDied","Data":"5cf836bff6a1fe72403e4324430077649e73d060dc0bfa1ccf4dcc015d5b1cc2"} Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.993895 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5cf836bff6a1fe72403e4324430077649e73d060dc0bfa1ccf4dcc015d5b1cc2" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.993900 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-875vf" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.996418 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-c056-account-create-tc55d" Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.996469 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-c056-account-create-tc55d" event={"ID":"d52e824b-4a80-4af9-961a-42652860b157","Type":"ContainerDied","Data":"4d8a04e2ebef03b3ce152caf9c25c6fccd68ccbe9c3bf65f1e895dfcdd43ac92"} Nov 23 14:59:34 crc kubenswrapper[5050]: I1123 14:59:34.996533 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4d8a04e2ebef03b3ce152caf9c25c6fccd68ccbe9c3bf65f1e895dfcdd43ac92" Nov 23 14:59:35 crc kubenswrapper[5050]: I1123 14:59:35.003285 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-ktwq8" event={"ID":"e6916e1b-5874-4095-8306-0af203ba7f7d","Type":"ContainerDied","Data":"740fa8dd5579578a92cd81f2fe56ab2d938eb408e6d9dcb8f84cd7573655fdc1"} Nov 23 14:59:35 crc kubenswrapper[5050]: I1123 14:59:35.003600 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="740fa8dd5579578a92cd81f2fe56ab2d938eb408e6d9dcb8f84cd7573655fdc1" Nov 23 14:59:35 crc kubenswrapper[5050]: I1123 14:59:35.003693 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-ktwq8" Nov 23 14:59:35 crc kubenswrapper[5050]: I1123 14:59:35.021860 5050 scope.go:117] "RemoveContainer" containerID="a3dccc85a62c2f329e73f72bb946446b9d1b93dc280fbc7f88851bfcf7bacf42" Nov 23 14:59:35 crc kubenswrapper[5050]: I1123 14:59:35.042670 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-n7gcr"] Nov 23 14:59:35 crc kubenswrapper[5050]: I1123 14:59:35.052940 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8554648995-n7gcr"] Nov 23 14:59:35 crc kubenswrapper[5050]: I1123 14:59:35.346012 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-bb96-account-create-mhdc2" Nov 23 14:59:35 crc kubenswrapper[5050]: I1123 14:59:35.397300 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qw9gn\" (UniqueName: \"kubernetes.io/projected/4bf9e433-c2f5-434e-8f06-56813a38e287-kube-api-access-qw9gn\") pod \"4bf9e433-c2f5-434e-8f06-56813a38e287\" (UID: \"4bf9e433-c2f5-434e-8f06-56813a38e287\") " Nov 23 14:59:35 crc kubenswrapper[5050]: I1123 14:59:35.397582 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4bf9e433-c2f5-434e-8f06-56813a38e287-operator-scripts\") pod \"4bf9e433-c2f5-434e-8f06-56813a38e287\" (UID: \"4bf9e433-c2f5-434e-8f06-56813a38e287\") " Nov 23 14:59:35 crc kubenswrapper[5050]: I1123 14:59:35.398359 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bf9e433-c2f5-434e-8f06-56813a38e287-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4bf9e433-c2f5-434e-8f06-56813a38e287" (UID: "4bf9e433-c2f5-434e-8f06-56813a38e287"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:59:35 crc kubenswrapper[5050]: I1123 14:59:35.403166 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bf9e433-c2f5-434e-8f06-56813a38e287-kube-api-access-qw9gn" (OuterVolumeSpecName: "kube-api-access-qw9gn") pod "4bf9e433-c2f5-434e-8f06-56813a38e287" (UID: "4bf9e433-c2f5-434e-8f06-56813a38e287"). InnerVolumeSpecName "kube-api-access-qw9gn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:59:35 crc kubenswrapper[5050]: I1123 14:59:35.474419 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-734b-account-create-8vfn6" Nov 23 14:59:35 crc kubenswrapper[5050]: I1123 14:59:35.501930 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4bf9e433-c2f5-434e-8f06-56813a38e287-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:35 crc kubenswrapper[5050]: I1123 14:59:35.501984 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qw9gn\" (UniqueName: \"kubernetes.io/projected/4bf9e433-c2f5-434e-8f06-56813a38e287-kube-api-access-qw9gn\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:35 crc kubenswrapper[5050]: I1123 14:59:35.564175 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f115d163-3bbf-44ac-8c45-6f8753667208" path="/var/lib/kubelet/pods/f115d163-3bbf-44ac-8c45-6f8753667208/volumes" Nov 23 14:59:35 crc kubenswrapper[5050]: I1123 14:59:35.603423 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc6bbfa7-abb2-4676-814f-7a65c33b2e61-operator-scripts\") pod \"dc6bbfa7-abb2-4676-814f-7a65c33b2e61\" (UID: \"dc6bbfa7-abb2-4676-814f-7a65c33b2e61\") " Nov 23 14:59:35 crc kubenswrapper[5050]: I1123 14:59:35.603531 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cr8dv\" (UniqueName: \"kubernetes.io/projected/dc6bbfa7-abb2-4676-814f-7a65c33b2e61-kube-api-access-cr8dv\") pod \"dc6bbfa7-abb2-4676-814f-7a65c33b2e61\" (UID: \"dc6bbfa7-abb2-4676-814f-7a65c33b2e61\") " Nov 23 14:59:35 crc kubenswrapper[5050]: I1123 14:59:35.604108 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc6bbfa7-abb2-4676-814f-7a65c33b2e61-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "dc6bbfa7-abb2-4676-814f-7a65c33b2e61" (UID: "dc6bbfa7-abb2-4676-814f-7a65c33b2e61"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:59:35 crc kubenswrapper[5050]: I1123 14:59:35.607400 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc6bbfa7-abb2-4676-814f-7a65c33b2e61-kube-api-access-cr8dv" (OuterVolumeSpecName: "kube-api-access-cr8dv") pod "dc6bbfa7-abb2-4676-814f-7a65c33b2e61" (UID: "dc6bbfa7-abb2-4676-814f-7a65c33b2e61"). InnerVolumeSpecName "kube-api-access-cr8dv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:59:35 crc kubenswrapper[5050]: I1123 14:59:35.705834 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc6bbfa7-abb2-4676-814f-7a65c33b2e61-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:35 crc kubenswrapper[5050]: I1123 14:59:35.705880 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cr8dv\" (UniqueName: \"kubernetes.io/projected/dc6bbfa7-abb2-4676-814f-7a65c33b2e61-kube-api-access-cr8dv\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:36 crc kubenswrapper[5050]: I1123 14:59:36.013856 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-734b-account-create-8vfn6" event={"ID":"dc6bbfa7-abb2-4676-814f-7a65c33b2e61","Type":"ContainerDied","Data":"534e5c6de673b565dd597f5b12c18a7f847c670e0c89f9840537b2ee95f3d616"} Nov 23 14:59:36 crc kubenswrapper[5050]: I1123 14:59:36.013900 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="534e5c6de673b565dd597f5b12c18a7f847c670e0c89f9840537b2ee95f3d616" Nov 23 14:59:36 crc kubenswrapper[5050]: I1123 14:59:36.013951 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-734b-account-create-8vfn6" Nov 23 14:59:36 crc kubenswrapper[5050]: I1123 14:59:36.022322 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-bb96-account-create-mhdc2" event={"ID":"4bf9e433-c2f5-434e-8f06-56813a38e287","Type":"ContainerDied","Data":"219124bb90baf3a8a4696911a5e1ae9529e46e1fd7fedf72a030917233c31402"} Nov 23 14:59:36 crc kubenswrapper[5050]: I1123 14:59:36.022430 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-bb96-account-create-mhdc2" Nov 23 14:59:36 crc kubenswrapper[5050]: I1123 14:59:36.022433 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="219124bb90baf3a8a4696911a5e1ae9529e46e1fd7fedf72a030917233c31402" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.008105 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-lzdd7"] Nov 23 14:59:37 crc kubenswrapper[5050]: E1123 14:59:37.008502 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc6bbfa7-abb2-4676-814f-7a65c33b2e61" containerName="mariadb-account-create" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.008518 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc6bbfa7-abb2-4676-814f-7a65c33b2e61" containerName="mariadb-account-create" Nov 23 14:59:37 crc kubenswrapper[5050]: E1123 14:59:37.008528 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="241ba8d0-d451-4df3-888a-ff7df9129456" containerName="mariadb-database-create" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.008533 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="241ba8d0-d451-4df3-888a-ff7df9129456" containerName="mariadb-database-create" Nov 23 14:59:37 crc kubenswrapper[5050]: E1123 14:59:37.008549 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27cbd9dd-3372-49ca-9321-421a05e76922" containerName="mariadb-database-create" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.008555 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="27cbd9dd-3372-49ca-9321-421a05e76922" containerName="mariadb-database-create" Nov 23 14:59:37 crc kubenswrapper[5050]: E1123 14:59:37.008566 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4bf9e433-c2f5-434e-8f06-56813a38e287" containerName="mariadb-account-create" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.008573 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4bf9e433-c2f5-434e-8f06-56813a38e287" containerName="mariadb-account-create" Nov 23 14:59:37 crc kubenswrapper[5050]: E1123 14:59:37.008584 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6916e1b-5874-4095-8306-0af203ba7f7d" containerName="mariadb-database-create" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.008591 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6916e1b-5874-4095-8306-0af203ba7f7d" containerName="mariadb-database-create" Nov 23 14:59:37 crc kubenswrapper[5050]: E1123 14:59:37.008608 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f115d163-3bbf-44ac-8c45-6f8753667208" containerName="dnsmasq-dns" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.008616 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f115d163-3bbf-44ac-8c45-6f8753667208" containerName="dnsmasq-dns" Nov 23 14:59:37 crc kubenswrapper[5050]: E1123 14:59:37.008631 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d52e824b-4a80-4af9-961a-42652860b157" containerName="mariadb-account-create" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.008638 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="d52e824b-4a80-4af9-961a-42652860b157" containerName="mariadb-account-create" Nov 23 14:59:37 crc kubenswrapper[5050]: E1123 14:59:37.008652 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f115d163-3bbf-44ac-8c45-6f8753667208" containerName="init" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.008658 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f115d163-3bbf-44ac-8c45-6f8753667208" containerName="init" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.008825 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4bf9e433-c2f5-434e-8f06-56813a38e287" containerName="mariadb-account-create" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.008855 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc6bbfa7-abb2-4676-814f-7a65c33b2e61" containerName="mariadb-account-create" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.008870 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6916e1b-5874-4095-8306-0af203ba7f7d" containerName="mariadb-database-create" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.008912 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="27cbd9dd-3372-49ca-9321-421a05e76922" containerName="mariadb-database-create" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.008923 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="f115d163-3bbf-44ac-8c45-6f8753667208" containerName="dnsmasq-dns" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.009027 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="241ba8d0-d451-4df3-888a-ff7df9129456" containerName="mariadb-database-create" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.009044 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="d52e824b-4a80-4af9-961a-42652860b157" containerName="mariadb-account-create" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.009826 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-lzdd7" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.014937 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-g8gzt" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.025960 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.074671 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-lzdd7"] Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.132766 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b347d677-8b98-4402-b7b8-88fe0b080ac0-db-sync-config-data\") pod \"glance-db-sync-lzdd7\" (UID: \"b347d677-8b98-4402-b7b8-88fe0b080ac0\") " pod="openstack/glance-db-sync-lzdd7" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.133135 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b347d677-8b98-4402-b7b8-88fe0b080ac0-config-data\") pod \"glance-db-sync-lzdd7\" (UID: \"b347d677-8b98-4402-b7b8-88fe0b080ac0\") " pod="openstack/glance-db-sync-lzdd7" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.133238 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2kwws\" (UniqueName: \"kubernetes.io/projected/b347d677-8b98-4402-b7b8-88fe0b080ac0-kube-api-access-2kwws\") pod \"glance-db-sync-lzdd7\" (UID: \"b347d677-8b98-4402-b7b8-88fe0b080ac0\") " pod="openstack/glance-db-sync-lzdd7" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.133691 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b347d677-8b98-4402-b7b8-88fe0b080ac0-combined-ca-bundle\") pod \"glance-db-sync-lzdd7\" (UID: \"b347d677-8b98-4402-b7b8-88fe0b080ac0\") " pod="openstack/glance-db-sync-lzdd7" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.253176 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b347d677-8b98-4402-b7b8-88fe0b080ac0-db-sync-config-data\") pod \"glance-db-sync-lzdd7\" (UID: \"b347d677-8b98-4402-b7b8-88fe0b080ac0\") " pod="openstack/glance-db-sync-lzdd7" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.253628 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b347d677-8b98-4402-b7b8-88fe0b080ac0-config-data\") pod \"glance-db-sync-lzdd7\" (UID: \"b347d677-8b98-4402-b7b8-88fe0b080ac0\") " pod="openstack/glance-db-sync-lzdd7" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.253680 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2kwws\" (UniqueName: \"kubernetes.io/projected/b347d677-8b98-4402-b7b8-88fe0b080ac0-kube-api-access-2kwws\") pod \"glance-db-sync-lzdd7\" (UID: \"b347d677-8b98-4402-b7b8-88fe0b080ac0\") " pod="openstack/glance-db-sync-lzdd7" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.253742 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b347d677-8b98-4402-b7b8-88fe0b080ac0-combined-ca-bundle\") pod \"glance-db-sync-lzdd7\" (UID: \"b347d677-8b98-4402-b7b8-88fe0b080ac0\") " pod="openstack/glance-db-sync-lzdd7" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.260585 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b347d677-8b98-4402-b7b8-88fe0b080ac0-config-data\") pod \"glance-db-sync-lzdd7\" (UID: \"b347d677-8b98-4402-b7b8-88fe0b080ac0\") " pod="openstack/glance-db-sync-lzdd7" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.261211 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b347d677-8b98-4402-b7b8-88fe0b080ac0-combined-ca-bundle\") pod \"glance-db-sync-lzdd7\" (UID: \"b347d677-8b98-4402-b7b8-88fe0b080ac0\") " pod="openstack/glance-db-sync-lzdd7" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.261382 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b347d677-8b98-4402-b7b8-88fe0b080ac0-db-sync-config-data\") pod \"glance-db-sync-lzdd7\" (UID: \"b347d677-8b98-4402-b7b8-88fe0b080ac0\") " pod="openstack/glance-db-sync-lzdd7" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.284503 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2kwws\" (UniqueName: \"kubernetes.io/projected/b347d677-8b98-4402-b7b8-88fe0b080ac0-kube-api-access-2kwws\") pod \"glance-db-sync-lzdd7\" (UID: \"b347d677-8b98-4402-b7b8-88fe0b080ac0\") " pod="openstack/glance-db-sync-lzdd7" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.326879 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-lzdd7" Nov 23 14:59:37 crc kubenswrapper[5050]: I1123 14:59:37.902379 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-lzdd7"] Nov 23 14:59:37 crc kubenswrapper[5050]: W1123 14:59:37.940582 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb347d677_8b98_4402_b7b8_88fe0b080ac0.slice/crio-fd53c99d558cb19f8f041b810f15159ad9e243dfca1bda3d6b797e01a445079a WatchSource:0}: Error finding container fd53c99d558cb19f8f041b810f15159ad9e243dfca1bda3d6b797e01a445079a: Status 404 returned error can't find the container with id fd53c99d558cb19f8f041b810f15159ad9e243dfca1bda3d6b797e01a445079a Nov 23 14:59:38 crc kubenswrapper[5050]: I1123 14:59:38.039378 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-lzdd7" event={"ID":"b347d677-8b98-4402-b7b8-88fe0b080ac0","Type":"ContainerStarted","Data":"fd53c99d558cb19f8f041b810f15159ad9e243dfca1bda3d6b797e01a445079a"} Nov 23 14:59:38 crc kubenswrapper[5050]: I1123 14:59:38.044220 5050 generic.go:334] "Generic (PLEG): container finished" podID="f4e77f81-6a4b-4c66-8e27-891159e3da45" containerID="e3acb2767a7451b13639ee8d1428b93e74fd54c5a63166ef91e7d202125e96e6" exitCode=0 Nov 23 14:59:38 crc kubenswrapper[5050]: I1123 14:59:38.044262 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-99xwj" event={"ID":"f4e77f81-6a4b-4c66-8e27-891159e3da45","Type":"ContainerDied","Data":"e3acb2767a7451b13639ee8d1428b93e74fd54c5a63166ef91e7d202125e96e6"} Nov 23 14:59:38 crc kubenswrapper[5050]: I1123 14:59:38.392880 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 23 14:59:39 crc kubenswrapper[5050]: I1123 14:59:39.403387 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-99xwj" Nov 23 14:59:39 crc kubenswrapper[5050]: I1123 14:59:39.508601 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f4e77f81-6a4b-4c66-8e27-891159e3da45-etc-swift\") pod \"f4e77f81-6a4b-4c66-8e27-891159e3da45\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " Nov 23 14:59:39 crc kubenswrapper[5050]: I1123 14:59:39.508738 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f4e77f81-6a4b-4c66-8e27-891159e3da45-ring-data-devices\") pod \"f4e77f81-6a4b-4c66-8e27-891159e3da45\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " Nov 23 14:59:39 crc kubenswrapper[5050]: I1123 14:59:39.508843 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4e77f81-6a4b-4c66-8e27-891159e3da45-combined-ca-bundle\") pod \"f4e77f81-6a4b-4c66-8e27-891159e3da45\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " Nov 23 14:59:39 crc kubenswrapper[5050]: I1123 14:59:39.508886 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4wf9s\" (UniqueName: \"kubernetes.io/projected/f4e77f81-6a4b-4c66-8e27-891159e3da45-kube-api-access-4wf9s\") pod \"f4e77f81-6a4b-4c66-8e27-891159e3da45\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " Nov 23 14:59:39 crc kubenswrapper[5050]: I1123 14:59:39.508924 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f4e77f81-6a4b-4c66-8e27-891159e3da45-swiftconf\") pod \"f4e77f81-6a4b-4c66-8e27-891159e3da45\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " Nov 23 14:59:39 crc kubenswrapper[5050]: I1123 14:59:39.508951 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f4e77f81-6a4b-4c66-8e27-891159e3da45-scripts\") pod \"f4e77f81-6a4b-4c66-8e27-891159e3da45\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " Nov 23 14:59:39 crc kubenswrapper[5050]: I1123 14:59:39.508997 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f4e77f81-6a4b-4c66-8e27-891159e3da45-dispersionconf\") pod \"f4e77f81-6a4b-4c66-8e27-891159e3da45\" (UID: \"f4e77f81-6a4b-4c66-8e27-891159e3da45\") " Nov 23 14:59:39 crc kubenswrapper[5050]: I1123 14:59:39.510327 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4e77f81-6a4b-4c66-8e27-891159e3da45-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "f4e77f81-6a4b-4c66-8e27-891159e3da45" (UID: "f4e77f81-6a4b-4c66-8e27-891159e3da45"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:59:39 crc kubenswrapper[5050]: I1123 14:59:39.511218 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4e77f81-6a4b-4c66-8e27-891159e3da45-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "f4e77f81-6a4b-4c66-8e27-891159e3da45" (UID: "f4e77f81-6a4b-4c66-8e27-891159e3da45"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 14:59:39 crc kubenswrapper[5050]: I1123 14:59:39.514180 5050 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f4e77f81-6a4b-4c66-8e27-891159e3da45-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:39 crc kubenswrapper[5050]: I1123 14:59:39.514207 5050 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f4e77f81-6a4b-4c66-8e27-891159e3da45-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:39 crc kubenswrapper[5050]: I1123 14:59:39.517083 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4e77f81-6a4b-4c66-8e27-891159e3da45-kube-api-access-4wf9s" (OuterVolumeSpecName: "kube-api-access-4wf9s") pod "f4e77f81-6a4b-4c66-8e27-891159e3da45" (UID: "f4e77f81-6a4b-4c66-8e27-891159e3da45"). InnerVolumeSpecName "kube-api-access-4wf9s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:59:39 crc kubenswrapper[5050]: I1123 14:59:39.520178 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4e77f81-6a4b-4c66-8e27-891159e3da45-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "f4e77f81-6a4b-4c66-8e27-891159e3da45" (UID: "f4e77f81-6a4b-4c66-8e27-891159e3da45"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:59:39 crc kubenswrapper[5050]: I1123 14:59:39.542469 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4e77f81-6a4b-4c66-8e27-891159e3da45-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "f4e77f81-6a4b-4c66-8e27-891159e3da45" (UID: "f4e77f81-6a4b-4c66-8e27-891159e3da45"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:59:39 crc kubenswrapper[5050]: I1123 14:59:39.546314 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4e77f81-6a4b-4c66-8e27-891159e3da45-scripts" (OuterVolumeSpecName: "scripts") pod "f4e77f81-6a4b-4c66-8e27-891159e3da45" (UID: "f4e77f81-6a4b-4c66-8e27-891159e3da45"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:59:39 crc kubenswrapper[5050]: I1123 14:59:39.575042 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4e77f81-6a4b-4c66-8e27-891159e3da45-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f4e77f81-6a4b-4c66-8e27-891159e3da45" (UID: "f4e77f81-6a4b-4c66-8e27-891159e3da45"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 14:59:39 crc kubenswrapper[5050]: I1123 14:59:39.616799 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4e77f81-6a4b-4c66-8e27-891159e3da45-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:39 crc kubenswrapper[5050]: I1123 14:59:39.616859 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4wf9s\" (UniqueName: \"kubernetes.io/projected/f4e77f81-6a4b-4c66-8e27-891159e3da45-kube-api-access-4wf9s\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:39 crc kubenswrapper[5050]: I1123 14:59:39.616877 5050 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f4e77f81-6a4b-4c66-8e27-891159e3da45-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:39 crc kubenswrapper[5050]: I1123 14:59:39.616891 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f4e77f81-6a4b-4c66-8e27-891159e3da45-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:39 crc kubenswrapper[5050]: I1123 14:59:39.616903 5050 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f4e77f81-6a4b-4c66-8e27-891159e3da45-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:40 crc kubenswrapper[5050]: I1123 14:59:40.080471 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-99xwj" event={"ID":"f4e77f81-6a4b-4c66-8e27-891159e3da45","Type":"ContainerDied","Data":"f65487eb6925dad9b190466f20f35976f26db9e3c815b7e59746b34010f1e092"} Nov 23 14:59:40 crc kubenswrapper[5050]: I1123 14:59:40.080521 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f65487eb6925dad9b190466f20f35976f26db9e3c815b7e59746b34010f1e092" Nov 23 14:59:40 crc kubenswrapper[5050]: I1123 14:59:40.080623 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-99xwj" Nov 23 14:59:40 crc kubenswrapper[5050]: I1123 14:59:40.129670 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4947b94e-805e-457e-bb17-f7faea3b5fca-etc-swift\") pod \"swift-storage-0\" (UID: \"4947b94e-805e-457e-bb17-f7faea3b5fca\") " pod="openstack/swift-storage-0" Nov 23 14:59:40 crc kubenswrapper[5050]: I1123 14:59:40.136304 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4947b94e-805e-457e-bb17-f7faea3b5fca-etc-swift\") pod \"swift-storage-0\" (UID: \"4947b94e-805e-457e-bb17-f7faea3b5fca\") " pod="openstack/swift-storage-0" Nov 23 14:59:40 crc kubenswrapper[5050]: I1123 14:59:40.322623 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 23 14:59:40 crc kubenswrapper[5050]: I1123 14:59:40.917108 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 23 14:59:40 crc kubenswrapper[5050]: W1123 14:59:40.928732 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4947b94e_805e_457e_bb17_f7faea3b5fca.slice/crio-fa7fbbc9ee7bf5c539752bbd4d457f5b3344a2a87f7c2acaa651ed614e052891 WatchSource:0}: Error finding container fa7fbbc9ee7bf5c539752bbd4d457f5b3344a2a87f7c2acaa651ed614e052891: Status 404 returned error can't find the container with id fa7fbbc9ee7bf5c539752bbd4d457f5b3344a2a87f7c2acaa651ed614e052891 Nov 23 14:59:41 crc kubenswrapper[5050]: I1123 14:59:41.091678 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerStarted","Data":"fa7fbbc9ee7bf5c539752bbd4d457f5b3344a2a87f7c2acaa651ed614e052891"} Nov 23 14:59:43 crc kubenswrapper[5050]: I1123 14:59:43.110660 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerStarted","Data":"4211198ac477c9fe45e3ef0bd5dffc47dd9911807782e0d46558842b2d2b48fd"} Nov 23 14:59:43 crc kubenswrapper[5050]: I1123 14:59:43.111634 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerStarted","Data":"0001c3500cc85fc0a925e13354d27829e719cc2b2c22f8ba291e2016b3e3b365"} Nov 23 14:59:44 crc kubenswrapper[5050]: I1123 14:59:44.121305 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerStarted","Data":"1cec0ebe1304d052bb25cc3f36043c71b0777d6f8cbf3b9d8a22f1e057b1fddb"} Nov 23 14:59:44 crc kubenswrapper[5050]: I1123 14:59:44.121852 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerStarted","Data":"9a3ab1117cae597494105d7c65afeef7161729ed4e56b6205f5b1b5b9c2c0980"} Nov 23 14:59:46 crc kubenswrapper[5050]: I1123 14:59:46.921921 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-tbww8" podUID="33af3c2e-dea7-4448-8b28-b579d77490b9" containerName="ovn-controller" probeResult="failure" output=< Nov 23 14:59:46 crc kubenswrapper[5050]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 23 14:59:46 crc kubenswrapper[5050]: > Nov 23 14:59:46 crc kubenswrapper[5050]: I1123 14:59:46.930136 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-n9v8v" Nov 23 14:59:48 crc kubenswrapper[5050]: I1123 14:59:48.176277 5050 generic.go:334] "Generic (PLEG): container finished" podID="f92353db-5352-4216-ad2d-50242e58dfb7" containerID="8fa79fa4d3da8bc1f764d8d40be5053a2b08532c54d8bfc67df9434fce12f626" exitCode=0 Nov 23 14:59:48 crc kubenswrapper[5050]: I1123 14:59:48.176356 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f92353db-5352-4216-ad2d-50242e58dfb7","Type":"ContainerDied","Data":"8fa79fa4d3da8bc1f764d8d40be5053a2b08532c54d8bfc67df9434fce12f626"} Nov 23 14:59:49 crc kubenswrapper[5050]: I1123 14:59:49.189335 5050 generic.go:334] "Generic (PLEG): container finished" podID="5eff539e-c641-4873-aeae-450aaf0b4ac8" containerID="e92ae2c79d385eac2b74e01fa6c265a4e1892dff4fe0ad87c1b8d47c3cad30bd" exitCode=0 Nov 23 14:59:49 crc kubenswrapper[5050]: I1123 14:59:49.189489 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5eff539e-c641-4873-aeae-450aaf0b4ac8","Type":"ContainerDied","Data":"e92ae2c79d385eac2b74e01fa6c265a4e1892dff4fe0ad87c1b8d47c3cad30bd"} Nov 23 14:59:51 crc kubenswrapper[5050]: I1123 14:59:51.896687 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-tbww8" podUID="33af3c2e-dea7-4448-8b28-b579d77490b9" containerName="ovn-controller" probeResult="failure" output=< Nov 23 14:59:51 crc kubenswrapper[5050]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 23 14:59:51 crc kubenswrapper[5050]: > Nov 23 14:59:51 crc kubenswrapper[5050]: I1123 14:59:51.916803 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-n9v8v" Nov 23 14:59:52 crc kubenswrapper[5050]: I1123 14:59:52.123424 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-tbww8-config-j75vm"] Nov 23 14:59:52 crc kubenswrapper[5050]: E1123 14:59:52.124718 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4e77f81-6a4b-4c66-8e27-891159e3da45" containerName="swift-ring-rebalance" Nov 23 14:59:52 crc kubenswrapper[5050]: I1123 14:59:52.124860 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4e77f81-6a4b-4c66-8e27-891159e3da45" containerName="swift-ring-rebalance" Nov 23 14:59:52 crc kubenswrapper[5050]: I1123 14:59:52.125316 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4e77f81-6a4b-4c66-8e27-891159e3da45" containerName="swift-ring-rebalance" Nov 23 14:59:52 crc kubenswrapper[5050]: I1123 14:59:52.126661 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-tbww8-config-j75vm" Nov 23 14:59:52 crc kubenswrapper[5050]: I1123 14:59:52.130115 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 23 14:59:52 crc kubenswrapper[5050]: I1123 14:59:52.141968 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-tbww8-config-j75vm"] Nov 23 14:59:52 crc kubenswrapper[5050]: I1123 14:59:52.203510 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pnwn8\" (UniqueName: \"kubernetes.io/projected/8490479f-8250-4ec9-a133-d0eac2ab3351-kube-api-access-pnwn8\") pod \"ovn-controller-tbww8-config-j75vm\" (UID: \"8490479f-8250-4ec9-a133-d0eac2ab3351\") " pod="openstack/ovn-controller-tbww8-config-j75vm" Nov 23 14:59:52 crc kubenswrapper[5050]: I1123 14:59:52.203578 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/8490479f-8250-4ec9-a133-d0eac2ab3351-var-run-ovn\") pod \"ovn-controller-tbww8-config-j75vm\" (UID: \"8490479f-8250-4ec9-a133-d0eac2ab3351\") " pod="openstack/ovn-controller-tbww8-config-j75vm" Nov 23 14:59:52 crc kubenswrapper[5050]: I1123 14:59:52.203614 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8490479f-8250-4ec9-a133-d0eac2ab3351-var-run\") pod \"ovn-controller-tbww8-config-j75vm\" (UID: \"8490479f-8250-4ec9-a133-d0eac2ab3351\") " pod="openstack/ovn-controller-tbww8-config-j75vm" Nov 23 14:59:52 crc kubenswrapper[5050]: I1123 14:59:52.203655 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8490479f-8250-4ec9-a133-d0eac2ab3351-scripts\") pod \"ovn-controller-tbww8-config-j75vm\" (UID: \"8490479f-8250-4ec9-a133-d0eac2ab3351\") " pod="openstack/ovn-controller-tbww8-config-j75vm" Nov 23 14:59:52 crc kubenswrapper[5050]: I1123 14:59:52.203702 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/8490479f-8250-4ec9-a133-d0eac2ab3351-additional-scripts\") pod \"ovn-controller-tbww8-config-j75vm\" (UID: \"8490479f-8250-4ec9-a133-d0eac2ab3351\") " pod="openstack/ovn-controller-tbww8-config-j75vm" Nov 23 14:59:52 crc kubenswrapper[5050]: I1123 14:59:52.203746 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/8490479f-8250-4ec9-a133-d0eac2ab3351-var-log-ovn\") pod \"ovn-controller-tbww8-config-j75vm\" (UID: \"8490479f-8250-4ec9-a133-d0eac2ab3351\") " pod="openstack/ovn-controller-tbww8-config-j75vm" Nov 23 14:59:52 crc kubenswrapper[5050]: I1123 14:59:52.305926 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pnwn8\" (UniqueName: \"kubernetes.io/projected/8490479f-8250-4ec9-a133-d0eac2ab3351-kube-api-access-pnwn8\") pod \"ovn-controller-tbww8-config-j75vm\" (UID: \"8490479f-8250-4ec9-a133-d0eac2ab3351\") " pod="openstack/ovn-controller-tbww8-config-j75vm" Nov 23 14:59:52 crc kubenswrapper[5050]: I1123 14:59:52.306523 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/8490479f-8250-4ec9-a133-d0eac2ab3351-var-run-ovn\") pod \"ovn-controller-tbww8-config-j75vm\" (UID: \"8490479f-8250-4ec9-a133-d0eac2ab3351\") " pod="openstack/ovn-controller-tbww8-config-j75vm" Nov 23 14:59:52 crc kubenswrapper[5050]: I1123 14:59:52.306589 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8490479f-8250-4ec9-a133-d0eac2ab3351-var-run\") pod \"ovn-controller-tbww8-config-j75vm\" (UID: \"8490479f-8250-4ec9-a133-d0eac2ab3351\") " pod="openstack/ovn-controller-tbww8-config-j75vm" Nov 23 14:59:52 crc kubenswrapper[5050]: I1123 14:59:52.306640 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8490479f-8250-4ec9-a133-d0eac2ab3351-scripts\") pod \"ovn-controller-tbww8-config-j75vm\" (UID: \"8490479f-8250-4ec9-a133-d0eac2ab3351\") " pod="openstack/ovn-controller-tbww8-config-j75vm" Nov 23 14:59:52 crc kubenswrapper[5050]: I1123 14:59:52.306695 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/8490479f-8250-4ec9-a133-d0eac2ab3351-additional-scripts\") pod \"ovn-controller-tbww8-config-j75vm\" (UID: \"8490479f-8250-4ec9-a133-d0eac2ab3351\") " pod="openstack/ovn-controller-tbww8-config-j75vm" Nov 23 14:59:52 crc kubenswrapper[5050]: I1123 14:59:52.306732 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/8490479f-8250-4ec9-a133-d0eac2ab3351-var-log-ovn\") pod \"ovn-controller-tbww8-config-j75vm\" (UID: \"8490479f-8250-4ec9-a133-d0eac2ab3351\") " pod="openstack/ovn-controller-tbww8-config-j75vm" Nov 23 14:59:52 crc kubenswrapper[5050]: I1123 14:59:52.307220 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/8490479f-8250-4ec9-a133-d0eac2ab3351-var-log-ovn\") pod \"ovn-controller-tbww8-config-j75vm\" (UID: \"8490479f-8250-4ec9-a133-d0eac2ab3351\") " pod="openstack/ovn-controller-tbww8-config-j75vm" Nov 23 14:59:52 crc kubenswrapper[5050]: I1123 14:59:52.307267 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8490479f-8250-4ec9-a133-d0eac2ab3351-var-run\") pod \"ovn-controller-tbww8-config-j75vm\" (UID: \"8490479f-8250-4ec9-a133-d0eac2ab3351\") " pod="openstack/ovn-controller-tbww8-config-j75vm" Nov 23 14:59:52 crc kubenswrapper[5050]: I1123 14:59:52.307743 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/8490479f-8250-4ec9-a133-d0eac2ab3351-var-run-ovn\") pod \"ovn-controller-tbww8-config-j75vm\" (UID: \"8490479f-8250-4ec9-a133-d0eac2ab3351\") " pod="openstack/ovn-controller-tbww8-config-j75vm" Nov 23 14:59:52 crc kubenswrapper[5050]: I1123 14:59:52.308644 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/8490479f-8250-4ec9-a133-d0eac2ab3351-additional-scripts\") pod \"ovn-controller-tbww8-config-j75vm\" (UID: \"8490479f-8250-4ec9-a133-d0eac2ab3351\") " pod="openstack/ovn-controller-tbww8-config-j75vm" Nov 23 14:59:52 crc kubenswrapper[5050]: I1123 14:59:52.309732 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8490479f-8250-4ec9-a133-d0eac2ab3351-scripts\") pod \"ovn-controller-tbww8-config-j75vm\" (UID: \"8490479f-8250-4ec9-a133-d0eac2ab3351\") " pod="openstack/ovn-controller-tbww8-config-j75vm" Nov 23 14:59:52 crc kubenswrapper[5050]: I1123 14:59:52.348366 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pnwn8\" (UniqueName: \"kubernetes.io/projected/8490479f-8250-4ec9-a133-d0eac2ab3351-kube-api-access-pnwn8\") pod \"ovn-controller-tbww8-config-j75vm\" (UID: \"8490479f-8250-4ec9-a133-d0eac2ab3351\") " pod="openstack/ovn-controller-tbww8-config-j75vm" Nov 23 14:59:52 crc kubenswrapper[5050]: I1123 14:59:52.483973 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-tbww8-config-j75vm" Nov 23 14:59:53 crc kubenswrapper[5050]: I1123 14:59:53.112177 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-tbww8-config-j75vm"] Nov 23 14:59:53 crc kubenswrapper[5050]: W1123 14:59:53.113727 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8490479f_8250_4ec9_a133_d0eac2ab3351.slice/crio-3b547c34d3863699c1602a2c4581df25c35a59c690cc2d087c611f96afad49ca WatchSource:0}: Error finding container 3b547c34d3863699c1602a2c4581df25c35a59c690cc2d087c611f96afad49ca: Status 404 returned error can't find the container with id 3b547c34d3863699c1602a2c4581df25c35a59c690cc2d087c611f96afad49ca Nov 23 14:59:53 crc kubenswrapper[5050]: I1123 14:59:53.282612 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f92353db-5352-4216-ad2d-50242e58dfb7","Type":"ContainerStarted","Data":"fbb98bc62f9119abace6a66eef302292172343ff03acadf76beb838abacc4705"} Nov 23 14:59:53 crc kubenswrapper[5050]: I1123 14:59:53.283892 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 23 14:59:53 crc kubenswrapper[5050]: I1123 14:59:53.286001 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5eff539e-c641-4873-aeae-450aaf0b4ac8","Type":"ContainerStarted","Data":"a9dbba1cde13781acfcbcff23b41231ce6338e634ff0c22cf2de26d62d3a3f34"} Nov 23 14:59:53 crc kubenswrapper[5050]: I1123 14:59:53.286686 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 23 14:59:53 crc kubenswrapper[5050]: I1123 14:59:53.289072 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-tbww8-config-j75vm" event={"ID":"8490479f-8250-4ec9-a133-d0eac2ab3351","Type":"ContainerStarted","Data":"3b547c34d3863699c1602a2c4581df25c35a59c690cc2d087c611f96afad49ca"} Nov 23 14:59:53 crc kubenswrapper[5050]: I1123 14:59:53.291659 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerStarted","Data":"f9e7a939140c48c7a80ae5b9b609b9b098e6c60e33ce09927c76177950647981"} Nov 23 14:59:53 crc kubenswrapper[5050]: I1123 14:59:53.313350 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=41.979991691 podStartE2EDuration="1m17.313331145s" podCreationTimestamp="2025-11-23 14:58:36 +0000 UTC" firstStartedPulling="2025-11-23 14:58:38.781816651 +0000 UTC m=+1013.948813136" lastFinishedPulling="2025-11-23 14:59:14.115156115 +0000 UTC m=+1049.282152590" observedRunningTime="2025-11-23 14:59:53.30880781 +0000 UTC m=+1088.475804305" watchObservedRunningTime="2025-11-23 14:59:53.313331145 +0000 UTC m=+1088.480327630" Nov 23 14:59:54 crc kubenswrapper[5050]: I1123 14:59:54.305814 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerStarted","Data":"f9d2cfcc4ee11e5c2631dbc7f3cfcfc91553670b329dfc79c70030ffc3518f25"} Nov 23 14:59:54 crc kubenswrapper[5050]: I1123 14:59:54.306558 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerStarted","Data":"9d6c10414c5bea42690cbd8da8cba7bb0cf669191242a30c4e88b58e7464d151"} Nov 23 14:59:54 crc kubenswrapper[5050]: I1123 14:59:54.306570 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerStarted","Data":"b4f43cb835f2029a79a141c233333ef0e2ea5ee6f6d05bd0c246b7323bb38310"} Nov 23 14:59:54 crc kubenswrapper[5050]: I1123 14:59:54.307252 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-lzdd7" event={"ID":"b347d677-8b98-4402-b7b8-88fe0b080ac0","Type":"ContainerStarted","Data":"baa900082661a9828476231a8149439c9dd2f733f95eb32f3e6ed2627e03453b"} Nov 23 14:59:54 crc kubenswrapper[5050]: I1123 14:59:54.310323 5050 generic.go:334] "Generic (PLEG): container finished" podID="8490479f-8250-4ec9-a133-d0eac2ab3351" containerID="655f490e0c7aa13171353d2848ca0d68815a663102d03d5563818ce0757f8067" exitCode=0 Nov 23 14:59:54 crc kubenswrapper[5050]: I1123 14:59:54.310399 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-tbww8-config-j75vm" event={"ID":"8490479f-8250-4ec9-a133-d0eac2ab3351","Type":"ContainerDied","Data":"655f490e0c7aa13171353d2848ca0d68815a663102d03d5563818ce0757f8067"} Nov 23 14:59:54 crc kubenswrapper[5050]: I1123 14:59:54.332324 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-lzdd7" podStartSLOduration=3.421891274 podStartE2EDuration="18.332303887s" podCreationTimestamp="2025-11-23 14:59:36 +0000 UTC" firstStartedPulling="2025-11-23 14:59:37.944241205 +0000 UTC m=+1073.111237700" lastFinishedPulling="2025-11-23 14:59:52.854653828 +0000 UTC m=+1088.021650313" observedRunningTime="2025-11-23 14:59:54.327125454 +0000 UTC m=+1089.494121939" watchObservedRunningTime="2025-11-23 14:59:54.332303887 +0000 UTC m=+1089.499300372" Nov 23 14:59:54 crc kubenswrapper[5050]: I1123 14:59:54.333605 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=-9223371958.521177 podStartE2EDuration="1m18.333598153s" podCreationTimestamp="2025-11-23 14:58:36 +0000 UTC" firstStartedPulling="2025-11-23 14:58:39.092849416 +0000 UTC m=+1014.259845901" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 14:59:53.34207741 +0000 UTC m=+1088.509073895" watchObservedRunningTime="2025-11-23 14:59:54.333598153 +0000 UTC m=+1089.500594638" Nov 23 14:59:55 crc kubenswrapper[5050]: I1123 14:59:55.340612 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerStarted","Data":"53cc065da29b9c9511ae76a606c6be3934715316c98087e1bceb4aedfb391ba5"} Nov 23 14:59:55 crc kubenswrapper[5050]: I1123 14:59:55.783224 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-tbww8-config-j75vm" Nov 23 14:59:55 crc kubenswrapper[5050]: I1123 14:59:55.923256 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8490479f-8250-4ec9-a133-d0eac2ab3351-scripts\") pod \"8490479f-8250-4ec9-a133-d0eac2ab3351\" (UID: \"8490479f-8250-4ec9-a133-d0eac2ab3351\") " Nov 23 14:59:55 crc kubenswrapper[5050]: I1123 14:59:55.923360 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8490479f-8250-4ec9-a133-d0eac2ab3351-var-run\") pod \"8490479f-8250-4ec9-a133-d0eac2ab3351\" (UID: \"8490479f-8250-4ec9-a133-d0eac2ab3351\") " Nov 23 14:59:55 crc kubenswrapper[5050]: I1123 14:59:55.923540 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/8490479f-8250-4ec9-a133-d0eac2ab3351-additional-scripts\") pod \"8490479f-8250-4ec9-a133-d0eac2ab3351\" (UID: \"8490479f-8250-4ec9-a133-d0eac2ab3351\") " Nov 23 14:59:55 crc kubenswrapper[5050]: I1123 14:59:55.923577 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8490479f-8250-4ec9-a133-d0eac2ab3351-var-run" (OuterVolumeSpecName: "var-run") pod "8490479f-8250-4ec9-a133-d0eac2ab3351" (UID: "8490479f-8250-4ec9-a133-d0eac2ab3351"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 14:59:55 crc kubenswrapper[5050]: I1123 14:59:55.923660 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8490479f-8250-4ec9-a133-d0eac2ab3351-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "8490479f-8250-4ec9-a133-d0eac2ab3351" (UID: "8490479f-8250-4ec9-a133-d0eac2ab3351"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 14:59:55 crc kubenswrapper[5050]: I1123 14:59:55.923627 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/8490479f-8250-4ec9-a133-d0eac2ab3351-var-run-ovn\") pod \"8490479f-8250-4ec9-a133-d0eac2ab3351\" (UID: \"8490479f-8250-4ec9-a133-d0eac2ab3351\") " Nov 23 14:59:55 crc kubenswrapper[5050]: I1123 14:59:55.923851 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/8490479f-8250-4ec9-a133-d0eac2ab3351-var-log-ovn\") pod \"8490479f-8250-4ec9-a133-d0eac2ab3351\" (UID: \"8490479f-8250-4ec9-a133-d0eac2ab3351\") " Nov 23 14:59:55 crc kubenswrapper[5050]: I1123 14:59:55.923887 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pnwn8\" (UniqueName: \"kubernetes.io/projected/8490479f-8250-4ec9-a133-d0eac2ab3351-kube-api-access-pnwn8\") pod \"8490479f-8250-4ec9-a133-d0eac2ab3351\" (UID: \"8490479f-8250-4ec9-a133-d0eac2ab3351\") " Nov 23 14:59:55 crc kubenswrapper[5050]: I1123 14:59:55.923985 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8490479f-8250-4ec9-a133-d0eac2ab3351-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "8490479f-8250-4ec9-a133-d0eac2ab3351" (UID: "8490479f-8250-4ec9-a133-d0eac2ab3351"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 14:59:55 crc kubenswrapper[5050]: I1123 14:59:55.924269 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8490479f-8250-4ec9-a133-d0eac2ab3351-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "8490479f-8250-4ec9-a133-d0eac2ab3351" (UID: "8490479f-8250-4ec9-a133-d0eac2ab3351"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:59:55 crc kubenswrapper[5050]: I1123 14:59:55.924382 5050 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/8490479f-8250-4ec9-a133-d0eac2ab3351-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:55 crc kubenswrapper[5050]: I1123 14:59:55.924403 5050 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8490479f-8250-4ec9-a133-d0eac2ab3351-var-run\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:55 crc kubenswrapper[5050]: I1123 14:59:55.924414 5050 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/8490479f-8250-4ec9-a133-d0eac2ab3351-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:55 crc kubenswrapper[5050]: I1123 14:59:55.924427 5050 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/8490479f-8250-4ec9-a133-d0eac2ab3351-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:55 crc kubenswrapper[5050]: I1123 14:59:55.925003 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8490479f-8250-4ec9-a133-d0eac2ab3351-scripts" (OuterVolumeSpecName: "scripts") pod "8490479f-8250-4ec9-a133-d0eac2ab3351" (UID: "8490479f-8250-4ec9-a133-d0eac2ab3351"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 14:59:55 crc kubenswrapper[5050]: I1123 14:59:55.937717 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8490479f-8250-4ec9-a133-d0eac2ab3351-kube-api-access-pnwn8" (OuterVolumeSpecName: "kube-api-access-pnwn8") pod "8490479f-8250-4ec9-a133-d0eac2ab3351" (UID: "8490479f-8250-4ec9-a133-d0eac2ab3351"). InnerVolumeSpecName "kube-api-access-pnwn8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 14:59:56 crc kubenswrapper[5050]: I1123 14:59:56.026541 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pnwn8\" (UniqueName: \"kubernetes.io/projected/8490479f-8250-4ec9-a133-d0eac2ab3351-kube-api-access-pnwn8\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:56 crc kubenswrapper[5050]: I1123 14:59:56.026603 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8490479f-8250-4ec9-a133-d0eac2ab3351-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 14:59:56 crc kubenswrapper[5050]: I1123 14:59:56.350382 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-tbww8-config-j75vm" event={"ID":"8490479f-8250-4ec9-a133-d0eac2ab3351","Type":"ContainerDied","Data":"3b547c34d3863699c1602a2c4581df25c35a59c690cc2d087c611f96afad49ca"} Nov 23 14:59:56 crc kubenswrapper[5050]: I1123 14:59:56.350458 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3b547c34d3863699c1602a2c4581df25c35a59c690cc2d087c611f96afad49ca" Nov 23 14:59:56 crc kubenswrapper[5050]: I1123 14:59:56.350538 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-tbww8-config-j75vm" Nov 23 14:59:56 crc kubenswrapper[5050]: I1123 14:59:56.360478 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerStarted","Data":"1fcdee9bec472e24e3ee2fa6e158b77914f69e25bb807a1be9ca536d728a1ddf"} Nov 23 14:59:56 crc kubenswrapper[5050]: I1123 14:59:56.360532 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerStarted","Data":"e0f8af78c1a802c881b2c04585b63daab17211922673261ca7523f7d84b8e187"} Nov 23 14:59:56 crc kubenswrapper[5050]: I1123 14:59:56.360544 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerStarted","Data":"8dae616c0d8bc5a23b5cfb9e119b706796daaa7ba5419e533d3fb7176e7bf222"} Nov 23 14:59:56 crc kubenswrapper[5050]: I1123 14:59:56.928036 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-tbww8" Nov 23 14:59:56 crc kubenswrapper[5050]: I1123 14:59:56.945895 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-tbww8-config-j75vm"] Nov 23 14:59:56 crc kubenswrapper[5050]: I1123 14:59:56.951802 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-tbww8-config-j75vm"] Nov 23 14:59:57 crc kubenswrapper[5050]: I1123 14:59:57.562832 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8490479f-8250-4ec9-a133-d0eac2ab3351" path="/var/lib/kubelet/pods/8490479f-8250-4ec9-a133-d0eac2ab3351/volumes" Nov 23 14:59:58 crc kubenswrapper[5050]: I1123 14:59:58.390998 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerStarted","Data":"b22acf79827fd3ac0a34b514be2b00ed37d6b8a1dac44f85032fc97fb4bd5540"} Nov 23 14:59:58 crc kubenswrapper[5050]: I1123 14:59:58.391552 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerStarted","Data":"0e36c69aa375c539b26f58a19aaffeb696f806ace3d6046abb778218a238b336"} Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.224467 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.224532 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.405328 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerStarted","Data":"61d7218bc7db3ee0defb5601931d237c30251b28aeaf13b1fc22ffcbd83b4e84"} Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.448015 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=22.413452863 podStartE2EDuration="36.44799283s" podCreationTimestamp="2025-11-23 14:59:23 +0000 UTC" firstStartedPulling="2025-11-23 14:59:40.932581865 +0000 UTC m=+1076.099578350" lastFinishedPulling="2025-11-23 14:59:54.967121832 +0000 UTC m=+1090.134118317" observedRunningTime="2025-11-23 14:59:59.441262514 +0000 UTC m=+1094.608258999" watchObservedRunningTime="2025-11-23 14:59:59.44799283 +0000 UTC m=+1094.614989325" Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.723617 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-xsz2b"] Nov 23 14:59:59 crc kubenswrapper[5050]: E1123 14:59:59.724341 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8490479f-8250-4ec9-a133-d0eac2ab3351" containerName="ovn-config" Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.724360 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="8490479f-8250-4ec9-a133-d0eac2ab3351" containerName="ovn-config" Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.724609 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="8490479f-8250-4ec9-a133-d0eac2ab3351" containerName="ovn-config" Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.725557 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.727313 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.742623 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-xsz2b"] Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.806334 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-xsz2b\" (UID: \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\") " pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.806378 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-xsz2b\" (UID: \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\") " pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.806421 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxqfr\" (UniqueName: \"kubernetes.io/projected/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-kube-api-access-qxqfr\") pod \"dnsmasq-dns-5c79d794d7-xsz2b\" (UID: \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\") " pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.806484 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-config\") pod \"dnsmasq-dns-5c79d794d7-xsz2b\" (UID: \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\") " pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.806535 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-xsz2b\" (UID: \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\") " pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.806552 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-xsz2b\" (UID: \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\") " pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.908530 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-xsz2b\" (UID: \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\") " pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.908591 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-xsz2b\" (UID: \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\") " pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.908635 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxqfr\" (UniqueName: \"kubernetes.io/projected/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-kube-api-access-qxqfr\") pod \"dnsmasq-dns-5c79d794d7-xsz2b\" (UID: \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\") " pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.908691 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-config\") pod \"dnsmasq-dns-5c79d794d7-xsz2b\" (UID: \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\") " pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.908728 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-xsz2b\" (UID: \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\") " pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.908747 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-xsz2b\" (UID: \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\") " pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.909813 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-xsz2b\" (UID: \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\") " pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.909973 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-xsz2b\" (UID: \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\") " pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.909964 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-xsz2b\" (UID: \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\") " pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.910348 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-xsz2b\" (UID: \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\") " pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.910597 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-config\") pod \"dnsmasq-dns-5c79d794d7-xsz2b\" (UID: \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\") " pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" Nov 23 14:59:59 crc kubenswrapper[5050]: I1123 14:59:59.944403 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxqfr\" (UniqueName: \"kubernetes.io/projected/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-kube-api-access-qxqfr\") pod \"dnsmasq-dns-5c79d794d7-xsz2b\" (UID: \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\") " pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" Nov 23 15:00:00 crc kubenswrapper[5050]: I1123 15:00:00.045790 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" Nov 23 15:00:00 crc kubenswrapper[5050]: I1123 15:00:00.172691 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398500-97j52"] Nov 23 15:00:00 crc kubenswrapper[5050]: I1123 15:00:00.174108 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398500-97j52" Nov 23 15:00:00 crc kubenswrapper[5050]: I1123 15:00:00.180647 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 23 15:00:00 crc kubenswrapper[5050]: I1123 15:00:00.188753 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398500-97j52"] Nov 23 15:00:00 crc kubenswrapper[5050]: I1123 15:00:00.188998 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 23 15:00:00 crc kubenswrapper[5050]: I1123 15:00:00.315217 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0e3888fc-521a-4f7d-9f93-74ca490639a2-secret-volume\") pod \"collect-profiles-29398500-97j52\" (UID: \"0e3888fc-521a-4f7d-9f93-74ca490639a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398500-97j52" Nov 23 15:00:00 crc kubenswrapper[5050]: I1123 15:00:00.315902 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0e3888fc-521a-4f7d-9f93-74ca490639a2-config-volume\") pod \"collect-profiles-29398500-97j52\" (UID: \"0e3888fc-521a-4f7d-9f93-74ca490639a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398500-97j52" Nov 23 15:00:00 crc kubenswrapper[5050]: I1123 15:00:00.315974 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k28nr\" (UniqueName: \"kubernetes.io/projected/0e3888fc-521a-4f7d-9f93-74ca490639a2-kube-api-access-k28nr\") pod \"collect-profiles-29398500-97j52\" (UID: \"0e3888fc-521a-4f7d-9f93-74ca490639a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398500-97j52" Nov 23 15:00:00 crc kubenswrapper[5050]: I1123 15:00:00.408203 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-xsz2b"] Nov 23 15:00:00 crc kubenswrapper[5050]: I1123 15:00:00.417723 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" event={"ID":"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa","Type":"ContainerStarted","Data":"d8b8bf43ccb402b25eabc87fbe9eec846cb0eedc4f979ecebc125cffe0720011"} Nov 23 15:00:00 crc kubenswrapper[5050]: I1123 15:00:00.417997 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0e3888fc-521a-4f7d-9f93-74ca490639a2-config-volume\") pod \"collect-profiles-29398500-97j52\" (UID: \"0e3888fc-521a-4f7d-9f93-74ca490639a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398500-97j52" Nov 23 15:00:00 crc kubenswrapper[5050]: I1123 15:00:00.418110 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k28nr\" (UniqueName: \"kubernetes.io/projected/0e3888fc-521a-4f7d-9f93-74ca490639a2-kube-api-access-k28nr\") pod \"collect-profiles-29398500-97j52\" (UID: \"0e3888fc-521a-4f7d-9f93-74ca490639a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398500-97j52" Nov 23 15:00:00 crc kubenswrapper[5050]: I1123 15:00:00.418167 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0e3888fc-521a-4f7d-9f93-74ca490639a2-secret-volume\") pod \"collect-profiles-29398500-97j52\" (UID: \"0e3888fc-521a-4f7d-9f93-74ca490639a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398500-97j52" Nov 23 15:00:00 crc kubenswrapper[5050]: I1123 15:00:00.419011 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0e3888fc-521a-4f7d-9f93-74ca490639a2-config-volume\") pod \"collect-profiles-29398500-97j52\" (UID: \"0e3888fc-521a-4f7d-9f93-74ca490639a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398500-97j52" Nov 23 15:00:00 crc kubenswrapper[5050]: I1123 15:00:00.422432 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0e3888fc-521a-4f7d-9f93-74ca490639a2-secret-volume\") pod \"collect-profiles-29398500-97j52\" (UID: \"0e3888fc-521a-4f7d-9f93-74ca490639a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398500-97j52" Nov 23 15:00:00 crc kubenswrapper[5050]: I1123 15:00:00.436834 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k28nr\" (UniqueName: \"kubernetes.io/projected/0e3888fc-521a-4f7d-9f93-74ca490639a2-kube-api-access-k28nr\") pod \"collect-profiles-29398500-97j52\" (UID: \"0e3888fc-521a-4f7d-9f93-74ca490639a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398500-97j52" Nov 23 15:00:00 crc kubenswrapper[5050]: I1123 15:00:00.494872 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398500-97j52" Nov 23 15:00:00 crc kubenswrapper[5050]: W1123 15:00:00.753545 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0e3888fc_521a_4f7d_9f93_74ca490639a2.slice/crio-99a351c307caeb5226e7f54ddc94c677eff9a5905b642fad284da25d170fa644 WatchSource:0}: Error finding container 99a351c307caeb5226e7f54ddc94c677eff9a5905b642fad284da25d170fa644: Status 404 returned error can't find the container with id 99a351c307caeb5226e7f54ddc94c677eff9a5905b642fad284da25d170fa644 Nov 23 15:00:00 crc kubenswrapper[5050]: I1123 15:00:00.756068 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398500-97j52"] Nov 23 15:00:01 crc kubenswrapper[5050]: I1123 15:00:01.428840 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398500-97j52" event={"ID":"0e3888fc-521a-4f7d-9f93-74ca490639a2","Type":"ContainerStarted","Data":"99a351c307caeb5226e7f54ddc94c677eff9a5905b642fad284da25d170fa644"} Nov 23 15:00:02 crc kubenswrapper[5050]: I1123 15:00:02.438350 5050 generic.go:334] "Generic (PLEG): container finished" podID="a8c801eb-e5f2-440d-836f-9cdef0f7c6fa" containerID="58d4bc79fdfa548b8f4d2bf08e5563898ea857d43ee5c0327af29804b140da45" exitCode=0 Nov 23 15:00:02 crc kubenswrapper[5050]: I1123 15:00:02.438407 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" event={"ID":"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa","Type":"ContainerDied","Data":"58d4bc79fdfa548b8f4d2bf08e5563898ea857d43ee5c0327af29804b140da45"} Nov 23 15:00:02 crc kubenswrapper[5050]: I1123 15:00:02.441182 5050 generic.go:334] "Generic (PLEG): container finished" podID="0e3888fc-521a-4f7d-9f93-74ca490639a2" containerID="138037c713ab0fa4192e9ebda67a28af602733a323c753d428a4d54cafb4b619" exitCode=0 Nov 23 15:00:02 crc kubenswrapper[5050]: I1123 15:00:02.441250 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398500-97j52" event={"ID":"0e3888fc-521a-4f7d-9f93-74ca490639a2","Type":"ContainerDied","Data":"138037c713ab0fa4192e9ebda67a28af602733a323c753d428a4d54cafb4b619"} Nov 23 15:00:03 crc kubenswrapper[5050]: I1123 15:00:03.450950 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" event={"ID":"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa","Type":"ContainerStarted","Data":"4daaea88e92dc85e936a5194161107e5608c9bc7c8b3460f9edf3b56131d5699"} Nov 23 15:00:03 crc kubenswrapper[5050]: I1123 15:00:03.451706 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" Nov 23 15:00:03 crc kubenswrapper[5050]: I1123 15:00:03.479350 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" podStartSLOduration=4.479331995 podStartE2EDuration="4.479331995s" podCreationTimestamp="2025-11-23 14:59:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:00:03.476540418 +0000 UTC m=+1098.643536903" watchObservedRunningTime="2025-11-23 15:00:03.479331995 +0000 UTC m=+1098.646328480" Nov 23 15:00:03 crc kubenswrapper[5050]: I1123 15:00:03.791827 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398500-97j52" Nov 23 15:00:03 crc kubenswrapper[5050]: I1123 15:00:03.885095 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0e3888fc-521a-4f7d-9f93-74ca490639a2-config-volume\") pod \"0e3888fc-521a-4f7d-9f93-74ca490639a2\" (UID: \"0e3888fc-521a-4f7d-9f93-74ca490639a2\") " Nov 23 15:00:03 crc kubenswrapper[5050]: I1123 15:00:03.885254 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k28nr\" (UniqueName: \"kubernetes.io/projected/0e3888fc-521a-4f7d-9f93-74ca490639a2-kube-api-access-k28nr\") pod \"0e3888fc-521a-4f7d-9f93-74ca490639a2\" (UID: \"0e3888fc-521a-4f7d-9f93-74ca490639a2\") " Nov 23 15:00:03 crc kubenswrapper[5050]: I1123 15:00:03.885286 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0e3888fc-521a-4f7d-9f93-74ca490639a2-secret-volume\") pod \"0e3888fc-521a-4f7d-9f93-74ca490639a2\" (UID: \"0e3888fc-521a-4f7d-9f93-74ca490639a2\") " Nov 23 15:00:03 crc kubenswrapper[5050]: I1123 15:00:03.886267 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e3888fc-521a-4f7d-9f93-74ca490639a2-config-volume" (OuterVolumeSpecName: "config-volume") pod "0e3888fc-521a-4f7d-9f93-74ca490639a2" (UID: "0e3888fc-521a-4f7d-9f93-74ca490639a2"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:00:03 crc kubenswrapper[5050]: I1123 15:00:03.893204 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e3888fc-521a-4f7d-9f93-74ca490639a2-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "0e3888fc-521a-4f7d-9f93-74ca490639a2" (UID: "0e3888fc-521a-4f7d-9f93-74ca490639a2"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:03 crc kubenswrapper[5050]: I1123 15:00:03.893566 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e3888fc-521a-4f7d-9f93-74ca490639a2-kube-api-access-k28nr" (OuterVolumeSpecName: "kube-api-access-k28nr") pod "0e3888fc-521a-4f7d-9f93-74ca490639a2" (UID: "0e3888fc-521a-4f7d-9f93-74ca490639a2"). InnerVolumeSpecName "kube-api-access-k28nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:00:03 crc kubenswrapper[5050]: I1123 15:00:03.988130 5050 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0e3888fc-521a-4f7d-9f93-74ca490639a2-config-volume\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:03 crc kubenswrapper[5050]: I1123 15:00:03.988169 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k28nr\" (UniqueName: \"kubernetes.io/projected/0e3888fc-521a-4f7d-9f93-74ca490639a2-kube-api-access-k28nr\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:03 crc kubenswrapper[5050]: I1123 15:00:03.988184 5050 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0e3888fc-521a-4f7d-9f93-74ca490639a2-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:04 crc kubenswrapper[5050]: I1123 15:00:04.461703 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398500-97j52" Nov 23 15:00:04 crc kubenswrapper[5050]: I1123 15:00:04.470366 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398500-97j52" event={"ID":"0e3888fc-521a-4f7d-9f93-74ca490639a2","Type":"ContainerDied","Data":"99a351c307caeb5226e7f54ddc94c677eff9a5905b642fad284da25d170fa644"} Nov 23 15:00:04 crc kubenswrapper[5050]: I1123 15:00:04.470400 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="99a351c307caeb5226e7f54ddc94c677eff9a5905b642fad284da25d170fa644" Nov 23 15:00:06 crc kubenswrapper[5050]: I1123 15:00:06.497328 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-lzdd7" event={"ID":"b347d677-8b98-4402-b7b8-88fe0b080ac0","Type":"ContainerDied","Data":"baa900082661a9828476231a8149439c9dd2f733f95eb32f3e6ed2627e03453b"} Nov 23 15:00:06 crc kubenswrapper[5050]: I1123 15:00:06.497332 5050 generic.go:334] "Generic (PLEG): container finished" podID="b347d677-8b98-4402-b7b8-88fe0b080ac0" containerID="baa900082661a9828476231a8149439c9dd2f733f95eb32f3e6ed2627e03453b" exitCode=0 Nov 23 15:00:07 crc kubenswrapper[5050]: I1123 15:00:07.971477 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-lzdd7" Nov 23 15:00:08 crc kubenswrapper[5050]: I1123 15:00:08.060955 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b347d677-8b98-4402-b7b8-88fe0b080ac0-config-data\") pod \"b347d677-8b98-4402-b7b8-88fe0b080ac0\" (UID: \"b347d677-8b98-4402-b7b8-88fe0b080ac0\") " Nov 23 15:00:08 crc kubenswrapper[5050]: I1123 15:00:08.061480 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b347d677-8b98-4402-b7b8-88fe0b080ac0-db-sync-config-data\") pod \"b347d677-8b98-4402-b7b8-88fe0b080ac0\" (UID: \"b347d677-8b98-4402-b7b8-88fe0b080ac0\") " Nov 23 15:00:08 crc kubenswrapper[5050]: I1123 15:00:08.061597 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2kwws\" (UniqueName: \"kubernetes.io/projected/b347d677-8b98-4402-b7b8-88fe0b080ac0-kube-api-access-2kwws\") pod \"b347d677-8b98-4402-b7b8-88fe0b080ac0\" (UID: \"b347d677-8b98-4402-b7b8-88fe0b080ac0\") " Nov 23 15:00:08 crc kubenswrapper[5050]: I1123 15:00:08.061654 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b347d677-8b98-4402-b7b8-88fe0b080ac0-combined-ca-bundle\") pod \"b347d677-8b98-4402-b7b8-88fe0b080ac0\" (UID: \"b347d677-8b98-4402-b7b8-88fe0b080ac0\") " Nov 23 15:00:08 crc kubenswrapper[5050]: I1123 15:00:08.068722 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b347d677-8b98-4402-b7b8-88fe0b080ac0-kube-api-access-2kwws" (OuterVolumeSpecName: "kube-api-access-2kwws") pod "b347d677-8b98-4402-b7b8-88fe0b080ac0" (UID: "b347d677-8b98-4402-b7b8-88fe0b080ac0"). InnerVolumeSpecName "kube-api-access-2kwws". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:00:08 crc kubenswrapper[5050]: I1123 15:00:08.070536 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b347d677-8b98-4402-b7b8-88fe0b080ac0-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "b347d677-8b98-4402-b7b8-88fe0b080ac0" (UID: "b347d677-8b98-4402-b7b8-88fe0b080ac0"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:08 crc kubenswrapper[5050]: I1123 15:00:08.090943 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b347d677-8b98-4402-b7b8-88fe0b080ac0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b347d677-8b98-4402-b7b8-88fe0b080ac0" (UID: "b347d677-8b98-4402-b7b8-88fe0b080ac0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:08 crc kubenswrapper[5050]: I1123 15:00:08.114511 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b347d677-8b98-4402-b7b8-88fe0b080ac0-config-data" (OuterVolumeSpecName: "config-data") pod "b347d677-8b98-4402-b7b8-88fe0b080ac0" (UID: "b347d677-8b98-4402-b7b8-88fe0b080ac0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:08 crc kubenswrapper[5050]: I1123 15:00:08.160679 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:00:08 crc kubenswrapper[5050]: I1123 15:00:08.164640 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b347d677-8b98-4402-b7b8-88fe0b080ac0-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:08 crc kubenswrapper[5050]: I1123 15:00:08.164698 5050 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b347d677-8b98-4402-b7b8-88fe0b080ac0-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:08 crc kubenswrapper[5050]: I1123 15:00:08.164720 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2kwws\" (UniqueName: \"kubernetes.io/projected/b347d677-8b98-4402-b7b8-88fe0b080ac0-kube-api-access-2kwws\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:08 crc kubenswrapper[5050]: I1123 15:00:08.164741 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b347d677-8b98-4402-b7b8-88fe0b080ac0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:08 crc kubenswrapper[5050]: I1123 15:00:08.308866 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 23 15:00:08 crc kubenswrapper[5050]: I1123 15:00:08.519835 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-lzdd7" event={"ID":"b347d677-8b98-4402-b7b8-88fe0b080ac0","Type":"ContainerDied","Data":"fd53c99d558cb19f8f041b810f15159ad9e243dfca1bda3d6b797e01a445079a"} Nov 23 15:00:08 crc kubenswrapper[5050]: I1123 15:00:08.519886 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fd53c99d558cb19f8f041b810f15159ad9e243dfca1bda3d6b797e01a445079a" Nov 23 15:00:08 crc kubenswrapper[5050]: I1123 15:00:08.520409 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-lzdd7" Nov 23 15:00:08 crc kubenswrapper[5050]: I1123 15:00:08.970224 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-xsz2b"] Nov 23 15:00:08 crc kubenswrapper[5050]: I1123 15:00:08.971002 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" podUID="a8c801eb-e5f2-440d-836f-9cdef0f7c6fa" containerName="dnsmasq-dns" containerID="cri-o://4daaea88e92dc85e936a5194161107e5608c9bc7c8b3460f9edf3b56131d5699" gracePeriod=10 Nov 23 15:00:08 crc kubenswrapper[5050]: I1123 15:00:08.973615 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.028758 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-58mlp"] Nov 23 15:00:09 crc kubenswrapper[5050]: E1123 15:00:09.029219 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e3888fc-521a-4f7d-9f93-74ca490639a2" containerName="collect-profiles" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.029238 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e3888fc-521a-4f7d-9f93-74ca490639a2" containerName="collect-profiles" Nov 23 15:00:09 crc kubenswrapper[5050]: E1123 15:00:09.029248 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b347d677-8b98-4402-b7b8-88fe0b080ac0" containerName="glance-db-sync" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.029254 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="b347d677-8b98-4402-b7b8-88fe0b080ac0" containerName="glance-db-sync" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.029412 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="b347d677-8b98-4402-b7b8-88fe0b080ac0" containerName="glance-db-sync" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.029436 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e3888fc-521a-4f7d-9f93-74ca490639a2" containerName="collect-profiles" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.030376 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.049390 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-58mlp"] Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.086611 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-config\") pod \"dnsmasq-dns-5f59b8f679-58mlp\" (UID: \"245f4876-8b58-42ff-864d-a9bda0fd9c32\") " pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.086677 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-ovsdbserver-nb\") pod \"dnsmasq-dns-5f59b8f679-58mlp\" (UID: \"245f4876-8b58-42ff-864d-a9bda0fd9c32\") " pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.086727 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64mvb\" (UniqueName: \"kubernetes.io/projected/245f4876-8b58-42ff-864d-a9bda0fd9c32-kube-api-access-64mvb\") pod \"dnsmasq-dns-5f59b8f679-58mlp\" (UID: \"245f4876-8b58-42ff-864d-a9bda0fd9c32\") " pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.086818 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-dns-swift-storage-0\") pod \"dnsmasq-dns-5f59b8f679-58mlp\" (UID: \"245f4876-8b58-42ff-864d-a9bda0fd9c32\") " pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.086911 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-ovsdbserver-sb\") pod \"dnsmasq-dns-5f59b8f679-58mlp\" (UID: \"245f4876-8b58-42ff-864d-a9bda0fd9c32\") " pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.086934 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-dns-svc\") pod \"dnsmasq-dns-5f59b8f679-58mlp\" (UID: \"245f4876-8b58-42ff-864d-a9bda0fd9c32\") " pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.188645 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-dns-swift-storage-0\") pod \"dnsmasq-dns-5f59b8f679-58mlp\" (UID: \"245f4876-8b58-42ff-864d-a9bda0fd9c32\") " pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.188710 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-ovsdbserver-sb\") pod \"dnsmasq-dns-5f59b8f679-58mlp\" (UID: \"245f4876-8b58-42ff-864d-a9bda0fd9c32\") " pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.188732 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-dns-svc\") pod \"dnsmasq-dns-5f59b8f679-58mlp\" (UID: \"245f4876-8b58-42ff-864d-a9bda0fd9c32\") " pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.188827 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-config\") pod \"dnsmasq-dns-5f59b8f679-58mlp\" (UID: \"245f4876-8b58-42ff-864d-a9bda0fd9c32\") " pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.188853 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-ovsdbserver-nb\") pod \"dnsmasq-dns-5f59b8f679-58mlp\" (UID: \"245f4876-8b58-42ff-864d-a9bda0fd9c32\") " pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.188895 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64mvb\" (UniqueName: \"kubernetes.io/projected/245f4876-8b58-42ff-864d-a9bda0fd9c32-kube-api-access-64mvb\") pod \"dnsmasq-dns-5f59b8f679-58mlp\" (UID: \"245f4876-8b58-42ff-864d-a9bda0fd9c32\") " pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.189867 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-ovsdbserver-sb\") pod \"dnsmasq-dns-5f59b8f679-58mlp\" (UID: \"245f4876-8b58-42ff-864d-a9bda0fd9c32\") " pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.190256 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-dns-swift-storage-0\") pod \"dnsmasq-dns-5f59b8f679-58mlp\" (UID: \"245f4876-8b58-42ff-864d-a9bda0fd9c32\") " pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.196111 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-dns-svc\") pod \"dnsmasq-dns-5f59b8f679-58mlp\" (UID: \"245f4876-8b58-42ff-864d-a9bda0fd9c32\") " pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.198060 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-config\") pod \"dnsmasq-dns-5f59b8f679-58mlp\" (UID: \"245f4876-8b58-42ff-864d-a9bda0fd9c32\") " pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.198271 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-ovsdbserver-nb\") pod \"dnsmasq-dns-5f59b8f679-58mlp\" (UID: \"245f4876-8b58-42ff-864d-a9bda0fd9c32\") " pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.213628 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64mvb\" (UniqueName: \"kubernetes.io/projected/245f4876-8b58-42ff-864d-a9bda0fd9c32-kube-api-access-64mvb\") pod \"dnsmasq-dns-5f59b8f679-58mlp\" (UID: \"245f4876-8b58-42ff-864d-a9bda0fd9c32\") " pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.395576 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.478978 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.537021 5050 generic.go:334] "Generic (PLEG): container finished" podID="a8c801eb-e5f2-440d-836f-9cdef0f7c6fa" containerID="4daaea88e92dc85e936a5194161107e5608c9bc7c8b3460f9edf3b56131d5699" exitCode=0 Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.537070 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" event={"ID":"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa","Type":"ContainerDied","Data":"4daaea88e92dc85e936a5194161107e5608c9bc7c8b3460f9edf3b56131d5699"} Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.537105 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" event={"ID":"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa","Type":"ContainerDied","Data":"d8b8bf43ccb402b25eabc87fbe9eec846cb0eedc4f979ecebc125cffe0720011"} Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.537105 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-xsz2b" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.537123 5050 scope.go:117] "RemoveContainer" containerID="4daaea88e92dc85e936a5194161107e5608c9bc7c8b3460f9edf3b56131d5699" Nov 23 15:00:09 crc kubenswrapper[5050]: I1123 15:00:09.565247 5050 scope.go:117] "RemoveContainer" containerID="58d4bc79fdfa548b8f4d2bf08e5563898ea857d43ee5c0327af29804b140da45" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:09.595382 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qxqfr\" (UniqueName: \"kubernetes.io/projected/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-kube-api-access-qxqfr\") pod \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\" (UID: \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\") " Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:09.595497 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-config\") pod \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\" (UID: \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\") " Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:09.595527 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-ovsdbserver-sb\") pod \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\" (UID: \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\") " Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:09.595670 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-ovsdbserver-nb\") pod \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\" (UID: \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\") " Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:09.595698 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-dns-svc\") pod \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\" (UID: \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\") " Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:09.595717 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-dns-swift-storage-0\") pod \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\" (UID: \"a8c801eb-e5f2-440d-836f-9cdef0f7c6fa\") " Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:09.602534 5050 scope.go:117] "RemoveContainer" containerID="4daaea88e92dc85e936a5194161107e5608c9bc7c8b3460f9edf3b56131d5699" Nov 23 15:00:10 crc kubenswrapper[5050]: E1123 15:00:09.603044 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4daaea88e92dc85e936a5194161107e5608c9bc7c8b3460f9edf3b56131d5699\": container with ID starting with 4daaea88e92dc85e936a5194161107e5608c9bc7c8b3460f9edf3b56131d5699 not found: ID does not exist" containerID="4daaea88e92dc85e936a5194161107e5608c9bc7c8b3460f9edf3b56131d5699" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:09.603085 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4daaea88e92dc85e936a5194161107e5608c9bc7c8b3460f9edf3b56131d5699"} err="failed to get container status \"4daaea88e92dc85e936a5194161107e5608c9bc7c8b3460f9edf3b56131d5699\": rpc error: code = NotFound desc = could not find container \"4daaea88e92dc85e936a5194161107e5608c9bc7c8b3460f9edf3b56131d5699\": container with ID starting with 4daaea88e92dc85e936a5194161107e5608c9bc7c8b3460f9edf3b56131d5699 not found: ID does not exist" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:09.603117 5050 scope.go:117] "RemoveContainer" containerID="58d4bc79fdfa548b8f4d2bf08e5563898ea857d43ee5c0327af29804b140da45" Nov 23 15:00:10 crc kubenswrapper[5050]: E1123 15:00:09.603360 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"58d4bc79fdfa548b8f4d2bf08e5563898ea857d43ee5c0327af29804b140da45\": container with ID starting with 58d4bc79fdfa548b8f4d2bf08e5563898ea857d43ee5c0327af29804b140da45 not found: ID does not exist" containerID="58d4bc79fdfa548b8f4d2bf08e5563898ea857d43ee5c0327af29804b140da45" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:09.603388 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58d4bc79fdfa548b8f4d2bf08e5563898ea857d43ee5c0327af29804b140da45"} err="failed to get container status \"58d4bc79fdfa548b8f4d2bf08e5563898ea857d43ee5c0327af29804b140da45\": rpc error: code = NotFound desc = could not find container \"58d4bc79fdfa548b8f4d2bf08e5563898ea857d43ee5c0327af29804b140da45\": container with ID starting with 58d4bc79fdfa548b8f4d2bf08e5563898ea857d43ee5c0327af29804b140da45 not found: ID does not exist" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:09.626235 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-kube-api-access-qxqfr" (OuterVolumeSpecName: "kube-api-access-qxqfr") pod "a8c801eb-e5f2-440d-836f-9cdef0f7c6fa" (UID: "a8c801eb-e5f2-440d-836f-9cdef0f7c6fa"). InnerVolumeSpecName "kube-api-access-qxqfr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:09.671635 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a8c801eb-e5f2-440d-836f-9cdef0f7c6fa" (UID: "a8c801eb-e5f2-440d-836f-9cdef0f7c6fa"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:09.723109 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "a8c801eb-e5f2-440d-836f-9cdef0f7c6fa" (UID: "a8c801eb-e5f2-440d-836f-9cdef0f7c6fa"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:09.723502 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qxqfr\" (UniqueName: \"kubernetes.io/projected/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-kube-api-access-qxqfr\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:09.723537 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:09.723558 5050 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:09.726847 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a8c801eb-e5f2-440d-836f-9cdef0f7c6fa" (UID: "a8c801eb-e5f2-440d-836f-9cdef0f7c6fa"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:09.746512 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-config" (OuterVolumeSpecName: "config") pod "a8c801eb-e5f2-440d-836f-9cdef0f7c6fa" (UID: "a8c801eb-e5f2-440d-836f-9cdef0f7c6fa"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:09.755339 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a8c801eb-e5f2-440d-836f-9cdef0f7c6fa" (UID: "a8c801eb-e5f2-440d-836f-9cdef0f7c6fa"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:09.825250 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:09.825321 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-config\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:09.825332 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:09.879021 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-xsz2b"] Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:09.889657 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-xsz2b"] Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.378440 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-vsf8d"] Nov 23 15:00:10 crc kubenswrapper[5050]: E1123 15:00:10.378938 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8c801eb-e5f2-440d-836f-9cdef0f7c6fa" containerName="dnsmasq-dns" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.378956 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8c801eb-e5f2-440d-836f-9cdef0f7c6fa" containerName="dnsmasq-dns" Nov 23 15:00:10 crc kubenswrapper[5050]: E1123 15:00:10.378974 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8c801eb-e5f2-440d-836f-9cdef0f7c6fa" containerName="init" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.378980 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8c801eb-e5f2-440d-836f-9cdef0f7c6fa" containerName="init" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.379128 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8c801eb-e5f2-440d-836f-9cdef0f7c6fa" containerName="dnsmasq-dns" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.379876 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-vsf8d" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.408368 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-vsf8d"] Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.471223 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-rzl6b"] Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.476587 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-rzl6b" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.503624 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-rzl6b"] Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.538846 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f33bb25-e6da-43a1-8afd-08a47636b172-operator-scripts\") pod \"cinder-db-create-vsf8d\" (UID: \"7f33bb25-e6da-43a1-8afd-08a47636b172\") " pod="openstack/cinder-db-create-vsf8d" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.538952 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jkrks\" (UniqueName: \"kubernetes.io/projected/7f33bb25-e6da-43a1-8afd-08a47636b172-kube-api-access-jkrks\") pod \"cinder-db-create-vsf8d\" (UID: \"7f33bb25-e6da-43a1-8afd-08a47636b172\") " pod="openstack/cinder-db-create-vsf8d" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.552391 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-58mlp"] Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.595672 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-255c-account-create-9gdvk"] Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.596910 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-255c-account-create-9gdvk" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.601231 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.620015 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-255c-account-create-9gdvk"] Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.640488 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jkrks\" (UniqueName: \"kubernetes.io/projected/7f33bb25-e6da-43a1-8afd-08a47636b172-kube-api-access-jkrks\") pod \"cinder-db-create-vsf8d\" (UID: \"7f33bb25-e6da-43a1-8afd-08a47636b172\") " pod="openstack/cinder-db-create-vsf8d" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.640551 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e3ace8e3-1201-45e0-b9f7-7a8e272cef48-operator-scripts\") pod \"barbican-db-create-rzl6b\" (UID: \"e3ace8e3-1201-45e0-b9f7-7a8e272cef48\") " pod="openstack/barbican-db-create-rzl6b" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.640625 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlvsz\" (UniqueName: \"kubernetes.io/projected/e3ace8e3-1201-45e0-b9f7-7a8e272cef48-kube-api-access-nlvsz\") pod \"barbican-db-create-rzl6b\" (UID: \"e3ace8e3-1201-45e0-b9f7-7a8e272cef48\") " pod="openstack/barbican-db-create-rzl6b" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.640697 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f33bb25-e6da-43a1-8afd-08a47636b172-operator-scripts\") pod \"cinder-db-create-vsf8d\" (UID: \"7f33bb25-e6da-43a1-8afd-08a47636b172\") " pod="openstack/cinder-db-create-vsf8d" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.641576 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f33bb25-e6da-43a1-8afd-08a47636b172-operator-scripts\") pod \"cinder-db-create-vsf8d\" (UID: \"7f33bb25-e6da-43a1-8afd-08a47636b172\") " pod="openstack/cinder-db-create-vsf8d" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.687457 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-6ffa-account-create-pf9hk"] Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.688520 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-6ffa-account-create-pf9hk" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.693899 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.696086 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jkrks\" (UniqueName: \"kubernetes.io/projected/7f33bb25-e6da-43a1-8afd-08a47636b172-kube-api-access-jkrks\") pod \"cinder-db-create-vsf8d\" (UID: \"7f33bb25-e6da-43a1-8afd-08a47636b172\") " pod="openstack/cinder-db-create-vsf8d" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.715045 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-6ffa-account-create-pf9hk"] Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.717697 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-vsf8d" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.743513 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e3ace8e3-1201-45e0-b9f7-7a8e272cef48-operator-scripts\") pod \"barbican-db-create-rzl6b\" (UID: \"e3ace8e3-1201-45e0-b9f7-7a8e272cef48\") " pod="openstack/barbican-db-create-rzl6b" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.743595 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e9b2ddd2-95ef-4086-b1dc-b13acef90cb5-operator-scripts\") pod \"cinder-255c-account-create-9gdvk\" (UID: \"e9b2ddd2-95ef-4086-b1dc-b13acef90cb5\") " pod="openstack/cinder-255c-account-create-9gdvk" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.743654 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlvsz\" (UniqueName: \"kubernetes.io/projected/e3ace8e3-1201-45e0-b9f7-7a8e272cef48-kube-api-access-nlvsz\") pod \"barbican-db-create-rzl6b\" (UID: \"e3ace8e3-1201-45e0-b9f7-7a8e272cef48\") " pod="openstack/barbican-db-create-rzl6b" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.743754 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xqg2s\" (UniqueName: \"kubernetes.io/projected/e9b2ddd2-95ef-4086-b1dc-b13acef90cb5-kube-api-access-xqg2s\") pod \"cinder-255c-account-create-9gdvk\" (UID: \"e9b2ddd2-95ef-4086-b1dc-b13acef90cb5\") " pod="openstack/cinder-255c-account-create-9gdvk" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.744924 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e3ace8e3-1201-45e0-b9f7-7a8e272cef48-operator-scripts\") pod \"barbican-db-create-rzl6b\" (UID: \"e3ace8e3-1201-45e0-b9f7-7a8e272cef48\") " pod="openstack/barbican-db-create-rzl6b" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.774405 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlvsz\" (UniqueName: \"kubernetes.io/projected/e3ace8e3-1201-45e0-b9f7-7a8e272cef48-kube-api-access-nlvsz\") pod \"barbican-db-create-rzl6b\" (UID: \"e3ace8e3-1201-45e0-b9f7-7a8e272cef48\") " pod="openstack/barbican-db-create-rzl6b" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.795166 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-rzl6b" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.846177 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e9b2ddd2-95ef-4086-b1dc-b13acef90cb5-operator-scripts\") pod \"cinder-255c-account-create-9gdvk\" (UID: \"e9b2ddd2-95ef-4086-b1dc-b13acef90cb5\") " pod="openstack/cinder-255c-account-create-9gdvk" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.846276 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xqg2s\" (UniqueName: \"kubernetes.io/projected/e9b2ddd2-95ef-4086-b1dc-b13acef90cb5-kube-api-access-xqg2s\") pod \"cinder-255c-account-create-9gdvk\" (UID: \"e9b2ddd2-95ef-4086-b1dc-b13acef90cb5\") " pod="openstack/cinder-255c-account-create-9gdvk" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.846366 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd-operator-scripts\") pod \"barbican-6ffa-account-create-pf9hk\" (UID: \"d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd\") " pod="openstack/barbican-6ffa-account-create-pf9hk" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.846392 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrktb\" (UniqueName: \"kubernetes.io/projected/d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd-kube-api-access-wrktb\") pod \"barbican-6ffa-account-create-pf9hk\" (UID: \"d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd\") " pod="openstack/barbican-6ffa-account-create-pf9hk" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.847263 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e9b2ddd2-95ef-4086-b1dc-b13acef90cb5-operator-scripts\") pod \"cinder-255c-account-create-9gdvk\" (UID: \"e9b2ddd2-95ef-4086-b1dc-b13acef90cb5\") " pod="openstack/cinder-255c-account-create-9gdvk" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.874317 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-sdk6z"] Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.878240 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-sdk6z" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.886433 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xqg2s\" (UniqueName: \"kubernetes.io/projected/e9b2ddd2-95ef-4086-b1dc-b13acef90cb5-kube-api-access-xqg2s\") pod \"cinder-255c-account-create-9gdvk\" (UID: \"e9b2ddd2-95ef-4086-b1dc-b13acef90cb5\") " pod="openstack/cinder-255c-account-create-9gdvk" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.907178 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-sdk6z"] Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.919307 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-45e2-account-create-hj4jm"] Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.920881 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-45e2-account-create-hj4jm" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.922978 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.926605 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-45e2-account-create-hj4jm"] Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.957754 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd7c0f96-8702-4589-b3f5-a6a164a38d4e-operator-scripts\") pod \"neutron-db-create-sdk6z\" (UID: \"bd7c0f96-8702-4589-b3f5-a6a164a38d4e\") " pod="openstack/neutron-db-create-sdk6z" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.957902 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s7fnn\" (UniqueName: \"kubernetes.io/projected/bd7c0f96-8702-4589-b3f5-a6a164a38d4e-kube-api-access-s7fnn\") pod \"neutron-db-create-sdk6z\" (UID: \"bd7c0f96-8702-4589-b3f5-a6a164a38d4e\") " pod="openstack/neutron-db-create-sdk6z" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.957950 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd-operator-scripts\") pod \"barbican-6ffa-account-create-pf9hk\" (UID: \"d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd\") " pod="openstack/barbican-6ffa-account-create-pf9hk" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.957986 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrktb\" (UniqueName: \"kubernetes.io/projected/d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd-kube-api-access-wrktb\") pod \"barbican-6ffa-account-create-pf9hk\" (UID: \"d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd\") " pod="openstack/barbican-6ffa-account-create-pf9hk" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.959206 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd-operator-scripts\") pod \"barbican-6ffa-account-create-pf9hk\" (UID: \"d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd\") " pod="openstack/barbican-6ffa-account-create-pf9hk" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.987282 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrktb\" (UniqueName: \"kubernetes.io/projected/d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd-kube-api-access-wrktb\") pod \"barbican-6ffa-account-create-pf9hk\" (UID: \"d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd\") " pod="openstack/barbican-6ffa-account-create-pf9hk" Nov 23 15:00:10 crc kubenswrapper[5050]: I1123 15:00:10.993491 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-mzzv7"] Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:10.998103 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-mzzv7" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.007952 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-hkfk8" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.008604 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.009245 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.012663 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.058773 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-mzzv7"] Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.063311 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4a2308e0-8430-418a-b943-0f6bc4e22904-operator-scripts\") pod \"neutron-45e2-account-create-hj4jm\" (UID: \"4a2308e0-8430-418a-b943-0f6bc4e22904\") " pod="openstack/neutron-45e2-account-create-hj4jm" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.063503 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s7fnn\" (UniqueName: \"kubernetes.io/projected/bd7c0f96-8702-4589-b3f5-a6a164a38d4e-kube-api-access-s7fnn\") pod \"neutron-db-create-sdk6z\" (UID: \"bd7c0f96-8702-4589-b3f5-a6a164a38d4e\") " pod="openstack/neutron-db-create-sdk6z" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.063534 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wqsrr\" (UniqueName: \"kubernetes.io/projected/4a2308e0-8430-418a-b943-0f6bc4e22904-kube-api-access-wqsrr\") pod \"neutron-45e2-account-create-hj4jm\" (UID: \"4a2308e0-8430-418a-b943-0f6bc4e22904\") " pod="openstack/neutron-45e2-account-create-hj4jm" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.063594 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd7c0f96-8702-4589-b3f5-a6a164a38d4e-operator-scripts\") pod \"neutron-db-create-sdk6z\" (UID: \"bd7c0f96-8702-4589-b3f5-a6a164a38d4e\") " pod="openstack/neutron-db-create-sdk6z" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.068260 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-255c-account-create-9gdvk" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.072472 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd7c0f96-8702-4589-b3f5-a6a164a38d4e-operator-scripts\") pod \"neutron-db-create-sdk6z\" (UID: \"bd7c0f96-8702-4589-b3f5-a6a164a38d4e\") " pod="openstack/neutron-db-create-sdk6z" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.095219 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s7fnn\" (UniqueName: \"kubernetes.io/projected/bd7c0f96-8702-4589-b3f5-a6a164a38d4e-kube-api-access-s7fnn\") pod \"neutron-db-create-sdk6z\" (UID: \"bd7c0f96-8702-4589-b3f5-a6a164a38d4e\") " pod="openstack/neutron-db-create-sdk6z" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.100148 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-6ffa-account-create-pf9hk" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.165826 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99652fba-7c5d-4aaa-8de4-e29e38027171-config-data\") pod \"keystone-db-sync-mzzv7\" (UID: \"99652fba-7c5d-4aaa-8de4-e29e38027171\") " pod="openstack/keystone-db-sync-mzzv7" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.165964 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4a2308e0-8430-418a-b943-0f6bc4e22904-operator-scripts\") pod \"neutron-45e2-account-create-hj4jm\" (UID: \"4a2308e0-8430-418a-b943-0f6bc4e22904\") " pod="openstack/neutron-45e2-account-create-hj4jm" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.166001 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wqsrr\" (UniqueName: \"kubernetes.io/projected/4a2308e0-8430-418a-b943-0f6bc4e22904-kube-api-access-wqsrr\") pod \"neutron-45e2-account-create-hj4jm\" (UID: \"4a2308e0-8430-418a-b943-0f6bc4e22904\") " pod="openstack/neutron-45e2-account-create-hj4jm" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.166063 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99652fba-7c5d-4aaa-8de4-e29e38027171-combined-ca-bundle\") pod \"keystone-db-sync-mzzv7\" (UID: \"99652fba-7c5d-4aaa-8de4-e29e38027171\") " pod="openstack/keystone-db-sync-mzzv7" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.166088 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brfb2\" (UniqueName: \"kubernetes.io/projected/99652fba-7c5d-4aaa-8de4-e29e38027171-kube-api-access-brfb2\") pod \"keystone-db-sync-mzzv7\" (UID: \"99652fba-7c5d-4aaa-8de4-e29e38027171\") " pod="openstack/keystone-db-sync-mzzv7" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.167083 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4a2308e0-8430-418a-b943-0f6bc4e22904-operator-scripts\") pod \"neutron-45e2-account-create-hj4jm\" (UID: \"4a2308e0-8430-418a-b943-0f6bc4e22904\") " pod="openstack/neutron-45e2-account-create-hj4jm" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.195880 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wqsrr\" (UniqueName: \"kubernetes.io/projected/4a2308e0-8430-418a-b943-0f6bc4e22904-kube-api-access-wqsrr\") pod \"neutron-45e2-account-create-hj4jm\" (UID: \"4a2308e0-8430-418a-b943-0f6bc4e22904\") " pod="openstack/neutron-45e2-account-create-hj4jm" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.239934 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-sdk6z" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.268702 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99652fba-7c5d-4aaa-8de4-e29e38027171-config-data\") pod \"keystone-db-sync-mzzv7\" (UID: \"99652fba-7c5d-4aaa-8de4-e29e38027171\") " pod="openstack/keystone-db-sync-mzzv7" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.268779 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99652fba-7c5d-4aaa-8de4-e29e38027171-combined-ca-bundle\") pod \"keystone-db-sync-mzzv7\" (UID: \"99652fba-7c5d-4aaa-8de4-e29e38027171\") " pod="openstack/keystone-db-sync-mzzv7" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.268807 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brfb2\" (UniqueName: \"kubernetes.io/projected/99652fba-7c5d-4aaa-8de4-e29e38027171-kube-api-access-brfb2\") pod \"keystone-db-sync-mzzv7\" (UID: \"99652fba-7c5d-4aaa-8de4-e29e38027171\") " pod="openstack/keystone-db-sync-mzzv7" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.274805 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99652fba-7c5d-4aaa-8de4-e29e38027171-config-data\") pod \"keystone-db-sync-mzzv7\" (UID: \"99652fba-7c5d-4aaa-8de4-e29e38027171\") " pod="openstack/keystone-db-sync-mzzv7" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.275639 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99652fba-7c5d-4aaa-8de4-e29e38027171-combined-ca-bundle\") pod \"keystone-db-sync-mzzv7\" (UID: \"99652fba-7c5d-4aaa-8de4-e29e38027171\") " pod="openstack/keystone-db-sync-mzzv7" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.289496 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brfb2\" (UniqueName: \"kubernetes.io/projected/99652fba-7c5d-4aaa-8de4-e29e38027171-kube-api-access-brfb2\") pod \"keystone-db-sync-mzzv7\" (UID: \"99652fba-7c5d-4aaa-8de4-e29e38027171\") " pod="openstack/keystone-db-sync-mzzv7" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.298250 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-45e2-account-create-hj4jm" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.392316 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-mzzv7" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.449321 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-rzl6b"] Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.531257 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-vsf8d"] Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.577586 5050 generic.go:334] "Generic (PLEG): container finished" podID="245f4876-8b58-42ff-864d-a9bda0fd9c32" containerID="bd22171393bb20d0c3e16b69fe310cf436b5cd98cf076a7e65ca368fc12901b7" exitCode=0 Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.581906 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8c801eb-e5f2-440d-836f-9cdef0f7c6fa" path="/var/lib/kubelet/pods/a8c801eb-e5f2-440d-836f-9cdef0f7c6fa/volumes" Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.582715 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-vsf8d" event={"ID":"7f33bb25-e6da-43a1-8afd-08a47636b172","Type":"ContainerStarted","Data":"59c41b1206412a90aa68c0ae41e9bea59156b9261176192dd5ae672885e6188a"} Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.583075 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" event={"ID":"245f4876-8b58-42ff-864d-a9bda0fd9c32","Type":"ContainerDied","Data":"bd22171393bb20d0c3e16b69fe310cf436b5cd98cf076a7e65ca368fc12901b7"} Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.583099 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" event={"ID":"245f4876-8b58-42ff-864d-a9bda0fd9c32","Type":"ContainerStarted","Data":"43775857791c54766f7c66c4aed8b9f2c9259bd699c3f717673ee9aa40ae0b23"} Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.590846 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-rzl6b" event={"ID":"e3ace8e3-1201-45e0-b9f7-7a8e272cef48","Type":"ContainerStarted","Data":"6c9c86e60fdedab69042b6f10fed3639f06eb46e3a7b7da7ca81921dd730ea86"} Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.697966 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-255c-account-create-9gdvk"] Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.704074 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-6ffa-account-create-pf9hk"] Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.749190 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-45e2-account-create-hj4jm"] Nov 23 15:00:11 crc kubenswrapper[5050]: W1123 15:00:11.762184 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4a2308e0_8430_418a_b943_0f6bc4e22904.slice/crio-fd65ee88dd4b95368f975f48e03bfb266af7b141bd194cacccfe88eb0ad1e9b7 WatchSource:0}: Error finding container fd65ee88dd4b95368f975f48e03bfb266af7b141bd194cacccfe88eb0ad1e9b7: Status 404 returned error can't find the container with id fd65ee88dd4b95368f975f48e03bfb266af7b141bd194cacccfe88eb0ad1e9b7 Nov 23 15:00:11 crc kubenswrapper[5050]: I1123 15:00:11.861964 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-sdk6z"] Nov 23 15:00:11 crc kubenswrapper[5050]: W1123 15:00:11.878874 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbd7c0f96_8702_4589_b3f5_a6a164a38d4e.slice/crio-bd8bcd2c0997c9510278a29f47d8b019c508e95d535f00a03d8bac635d1108fa WatchSource:0}: Error finding container bd8bcd2c0997c9510278a29f47d8b019c508e95d535f00a03d8bac635d1108fa: Status 404 returned error can't find the container with id bd8bcd2c0997c9510278a29f47d8b019c508e95d535f00a03d8bac635d1108fa Nov 23 15:00:12 crc kubenswrapper[5050]: I1123 15:00:12.020588 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-mzzv7"] Nov 23 15:00:12 crc kubenswrapper[5050]: W1123 15:00:12.034146 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod99652fba_7c5d_4aaa_8de4_e29e38027171.slice/crio-012d6cb564a9df8aae59e345cbfae4ead4fc9fea3f68f5108fb4ab288259b901 WatchSource:0}: Error finding container 012d6cb564a9df8aae59e345cbfae4ead4fc9fea3f68f5108fb4ab288259b901: Status 404 returned error can't find the container with id 012d6cb564a9df8aae59e345cbfae4ead4fc9fea3f68f5108fb4ab288259b901 Nov 23 15:00:12 crc kubenswrapper[5050]: I1123 15:00:12.610486 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" event={"ID":"245f4876-8b58-42ff-864d-a9bda0fd9c32","Type":"ContainerStarted","Data":"e44f54d0198ef92639aceb836d4552afa065a7a78bcc0146999c46d9e307c41a"} Nov 23 15:00:12 crc kubenswrapper[5050]: I1123 15:00:12.611346 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" Nov 23 15:00:12 crc kubenswrapper[5050]: I1123 15:00:12.614360 5050 generic.go:334] "Generic (PLEG): container finished" podID="e9b2ddd2-95ef-4086-b1dc-b13acef90cb5" containerID="727dc0a552ebf37c7874c54993d577aa166a14da4f6f9ca0a707173e2c9665aa" exitCode=0 Nov 23 15:00:12 crc kubenswrapper[5050]: I1123 15:00:12.614496 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-255c-account-create-9gdvk" event={"ID":"e9b2ddd2-95ef-4086-b1dc-b13acef90cb5","Type":"ContainerDied","Data":"727dc0a552ebf37c7874c54993d577aa166a14da4f6f9ca0a707173e2c9665aa"} Nov 23 15:00:12 crc kubenswrapper[5050]: I1123 15:00:12.614556 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-255c-account-create-9gdvk" event={"ID":"e9b2ddd2-95ef-4086-b1dc-b13acef90cb5","Type":"ContainerStarted","Data":"7a400ee30ad222641f1c0c5dbd86be0514455056cb7542abbaaa1b554cf4c0bb"} Nov 23 15:00:12 crc kubenswrapper[5050]: I1123 15:00:12.616903 5050 generic.go:334] "Generic (PLEG): container finished" podID="e3ace8e3-1201-45e0-b9f7-7a8e272cef48" containerID="299161f6b132a8506fc1999a7498e9f4796a83355fc8b6e0420a451e80357c9e" exitCode=0 Nov 23 15:00:12 crc kubenswrapper[5050]: I1123 15:00:12.617017 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-rzl6b" event={"ID":"e3ace8e3-1201-45e0-b9f7-7a8e272cef48","Type":"ContainerDied","Data":"299161f6b132a8506fc1999a7498e9f4796a83355fc8b6e0420a451e80357c9e"} Nov 23 15:00:12 crc kubenswrapper[5050]: I1123 15:00:12.618850 5050 generic.go:334] "Generic (PLEG): container finished" podID="4a2308e0-8430-418a-b943-0f6bc4e22904" containerID="ed687669db467b72cf51858bbabe382365b14403b1f4ed39dea5604437cad30a" exitCode=0 Nov 23 15:00:12 crc kubenswrapper[5050]: I1123 15:00:12.618926 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-45e2-account-create-hj4jm" event={"ID":"4a2308e0-8430-418a-b943-0f6bc4e22904","Type":"ContainerDied","Data":"ed687669db467b72cf51858bbabe382365b14403b1f4ed39dea5604437cad30a"} Nov 23 15:00:12 crc kubenswrapper[5050]: I1123 15:00:12.618949 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-45e2-account-create-hj4jm" event={"ID":"4a2308e0-8430-418a-b943-0f6bc4e22904","Type":"ContainerStarted","Data":"fd65ee88dd4b95368f975f48e03bfb266af7b141bd194cacccfe88eb0ad1e9b7"} Nov 23 15:00:12 crc kubenswrapper[5050]: I1123 15:00:12.632253 5050 generic.go:334] "Generic (PLEG): container finished" podID="d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd" containerID="1894b6fbea8bbd84fe163e12069d94ea74e1467c86f754b3d1bb202e766111df" exitCode=0 Nov 23 15:00:12 crc kubenswrapper[5050]: I1123 15:00:12.632356 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-6ffa-account-create-pf9hk" event={"ID":"d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd","Type":"ContainerDied","Data":"1894b6fbea8bbd84fe163e12069d94ea74e1467c86f754b3d1bb202e766111df"} Nov 23 15:00:12 crc kubenswrapper[5050]: I1123 15:00:12.632393 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-6ffa-account-create-pf9hk" event={"ID":"d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd","Type":"ContainerStarted","Data":"f75334c656eba74acff0a488f76503be1d1f7497544bbfc902cccd900834da2c"} Nov 23 15:00:12 crc kubenswrapper[5050]: I1123 15:00:12.637836 5050 generic.go:334] "Generic (PLEG): container finished" podID="bd7c0f96-8702-4589-b3f5-a6a164a38d4e" containerID="b47139886aa07e280e1a2070c1d4af199afdeb3729f1a9c4995f9f5cc7a034cd" exitCode=0 Nov 23 15:00:12 crc kubenswrapper[5050]: I1123 15:00:12.637991 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-sdk6z" event={"ID":"bd7c0f96-8702-4589-b3f5-a6a164a38d4e","Type":"ContainerDied","Data":"b47139886aa07e280e1a2070c1d4af199afdeb3729f1a9c4995f9f5cc7a034cd"} Nov 23 15:00:12 crc kubenswrapper[5050]: I1123 15:00:12.638029 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-sdk6z" event={"ID":"bd7c0f96-8702-4589-b3f5-a6a164a38d4e","Type":"ContainerStarted","Data":"bd8bcd2c0997c9510278a29f47d8b019c508e95d535f00a03d8bac635d1108fa"} Nov 23 15:00:12 crc kubenswrapper[5050]: I1123 15:00:12.639677 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-mzzv7" event={"ID":"99652fba-7c5d-4aaa-8de4-e29e38027171","Type":"ContainerStarted","Data":"012d6cb564a9df8aae59e345cbfae4ead4fc9fea3f68f5108fb4ab288259b901"} Nov 23 15:00:12 crc kubenswrapper[5050]: I1123 15:00:12.647353 5050 generic.go:334] "Generic (PLEG): container finished" podID="7f33bb25-e6da-43a1-8afd-08a47636b172" containerID="1109890e497c0f796b8443638532b27dfc04e6ba90d2aa51286205529a6a9e0b" exitCode=0 Nov 23 15:00:12 crc kubenswrapper[5050]: I1123 15:00:12.647434 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-vsf8d" event={"ID":"7f33bb25-e6da-43a1-8afd-08a47636b172","Type":"ContainerDied","Data":"1109890e497c0f796b8443638532b27dfc04e6ba90d2aa51286205529a6a9e0b"} Nov 23 15:00:12 crc kubenswrapper[5050]: I1123 15:00:12.656347 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" podStartSLOduration=3.656308818 podStartE2EDuration="3.656308818s" podCreationTimestamp="2025-11-23 15:00:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:00:12.647247146 +0000 UTC m=+1107.814243641" watchObservedRunningTime="2025-11-23 15:00:12.656308818 +0000 UTC m=+1107.823305303" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.049999 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-vsf8d" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.151562 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkrks\" (UniqueName: \"kubernetes.io/projected/7f33bb25-e6da-43a1-8afd-08a47636b172-kube-api-access-jkrks\") pod \"7f33bb25-e6da-43a1-8afd-08a47636b172\" (UID: \"7f33bb25-e6da-43a1-8afd-08a47636b172\") " Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.151913 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f33bb25-e6da-43a1-8afd-08a47636b172-operator-scripts\") pod \"7f33bb25-e6da-43a1-8afd-08a47636b172\" (UID: \"7f33bb25-e6da-43a1-8afd-08a47636b172\") " Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.152748 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f33bb25-e6da-43a1-8afd-08a47636b172-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7f33bb25-e6da-43a1-8afd-08a47636b172" (UID: "7f33bb25-e6da-43a1-8afd-08a47636b172"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.165919 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f33bb25-e6da-43a1-8afd-08a47636b172-kube-api-access-jkrks" (OuterVolumeSpecName: "kube-api-access-jkrks") pod "7f33bb25-e6da-43a1-8afd-08a47636b172" (UID: "7f33bb25-e6da-43a1-8afd-08a47636b172"). InnerVolumeSpecName "kube-api-access-jkrks". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.254227 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f33bb25-e6da-43a1-8afd-08a47636b172-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.254269 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkrks\" (UniqueName: \"kubernetes.io/projected/7f33bb25-e6da-43a1-8afd-08a47636b172-kube-api-access-jkrks\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.269072 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-6ffa-account-create-pf9hk" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.278327 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-sdk6z" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.299854 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-255c-account-create-9gdvk" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.318892 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-45e2-account-create-hj4jm" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.324779 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-rzl6b" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.356262 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wrktb\" (UniqueName: \"kubernetes.io/projected/d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd-kube-api-access-wrktb\") pod \"d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd\" (UID: \"d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd\") " Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.356352 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd-operator-scripts\") pod \"d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd\" (UID: \"d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd\") " Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.356595 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s7fnn\" (UniqueName: \"kubernetes.io/projected/bd7c0f96-8702-4589-b3f5-a6a164a38d4e-kube-api-access-s7fnn\") pod \"bd7c0f96-8702-4589-b3f5-a6a164a38d4e\" (UID: \"bd7c0f96-8702-4589-b3f5-a6a164a38d4e\") " Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.356632 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd7c0f96-8702-4589-b3f5-a6a164a38d4e-operator-scripts\") pod \"bd7c0f96-8702-4589-b3f5-a6a164a38d4e\" (UID: \"bd7c0f96-8702-4589-b3f5-a6a164a38d4e\") " Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.357184 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd" (UID: "d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.360734 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd7c0f96-8702-4589-b3f5-a6a164a38d4e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bd7c0f96-8702-4589-b3f5-a6a164a38d4e" (UID: "bd7c0f96-8702-4589-b3f5-a6a164a38d4e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.378636 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd7c0f96-8702-4589-b3f5-a6a164a38d4e-kube-api-access-s7fnn" (OuterVolumeSpecName: "kube-api-access-s7fnn") pod "bd7c0f96-8702-4589-b3f5-a6a164a38d4e" (UID: "bd7c0f96-8702-4589-b3f5-a6a164a38d4e"). InnerVolumeSpecName "kube-api-access-s7fnn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.378722 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd-kube-api-access-wrktb" (OuterVolumeSpecName: "kube-api-access-wrktb") pod "d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd" (UID: "d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd"). InnerVolumeSpecName "kube-api-access-wrktb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.460509 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xqg2s\" (UniqueName: \"kubernetes.io/projected/e9b2ddd2-95ef-4086-b1dc-b13acef90cb5-kube-api-access-xqg2s\") pod \"e9b2ddd2-95ef-4086-b1dc-b13acef90cb5\" (UID: \"e9b2ddd2-95ef-4086-b1dc-b13acef90cb5\") " Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.460610 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4a2308e0-8430-418a-b943-0f6bc4e22904-operator-scripts\") pod \"4a2308e0-8430-418a-b943-0f6bc4e22904\" (UID: \"4a2308e0-8430-418a-b943-0f6bc4e22904\") " Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.460744 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nlvsz\" (UniqueName: \"kubernetes.io/projected/e3ace8e3-1201-45e0-b9f7-7a8e272cef48-kube-api-access-nlvsz\") pod \"e3ace8e3-1201-45e0-b9f7-7a8e272cef48\" (UID: \"e3ace8e3-1201-45e0-b9f7-7a8e272cef48\") " Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.460838 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e3ace8e3-1201-45e0-b9f7-7a8e272cef48-operator-scripts\") pod \"e3ace8e3-1201-45e0-b9f7-7a8e272cef48\" (UID: \"e3ace8e3-1201-45e0-b9f7-7a8e272cef48\") " Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.460864 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wqsrr\" (UniqueName: \"kubernetes.io/projected/4a2308e0-8430-418a-b943-0f6bc4e22904-kube-api-access-wqsrr\") pod \"4a2308e0-8430-418a-b943-0f6bc4e22904\" (UID: \"4a2308e0-8430-418a-b943-0f6bc4e22904\") " Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.460902 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e9b2ddd2-95ef-4086-b1dc-b13acef90cb5-operator-scripts\") pod \"e9b2ddd2-95ef-4086-b1dc-b13acef90cb5\" (UID: \"e9b2ddd2-95ef-4086-b1dc-b13acef90cb5\") " Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.461268 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wrktb\" (UniqueName: \"kubernetes.io/projected/d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd-kube-api-access-wrktb\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.461289 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.461301 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s7fnn\" (UniqueName: \"kubernetes.io/projected/bd7c0f96-8702-4589-b3f5-a6a164a38d4e-kube-api-access-s7fnn\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.461310 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd7c0f96-8702-4589-b3f5-a6a164a38d4e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.461474 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4a2308e0-8430-418a-b943-0f6bc4e22904-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4a2308e0-8430-418a-b943-0f6bc4e22904" (UID: "4a2308e0-8430-418a-b943-0f6bc4e22904"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.461832 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e9b2ddd2-95ef-4086-b1dc-b13acef90cb5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e9b2ddd2-95ef-4086-b1dc-b13acef90cb5" (UID: "e9b2ddd2-95ef-4086-b1dc-b13acef90cb5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.461898 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3ace8e3-1201-45e0-b9f7-7a8e272cef48-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e3ace8e3-1201-45e0-b9f7-7a8e272cef48" (UID: "e3ace8e3-1201-45e0-b9f7-7a8e272cef48"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.464381 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9b2ddd2-95ef-4086-b1dc-b13acef90cb5-kube-api-access-xqg2s" (OuterVolumeSpecName: "kube-api-access-xqg2s") pod "e9b2ddd2-95ef-4086-b1dc-b13acef90cb5" (UID: "e9b2ddd2-95ef-4086-b1dc-b13acef90cb5"). InnerVolumeSpecName "kube-api-access-xqg2s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.465892 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3ace8e3-1201-45e0-b9f7-7a8e272cef48-kube-api-access-nlvsz" (OuterVolumeSpecName: "kube-api-access-nlvsz") pod "e3ace8e3-1201-45e0-b9f7-7a8e272cef48" (UID: "e3ace8e3-1201-45e0-b9f7-7a8e272cef48"). InnerVolumeSpecName "kube-api-access-nlvsz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.466608 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a2308e0-8430-418a-b943-0f6bc4e22904-kube-api-access-wqsrr" (OuterVolumeSpecName: "kube-api-access-wqsrr") pod "4a2308e0-8430-418a-b943-0f6bc4e22904" (UID: "4a2308e0-8430-418a-b943-0f6bc4e22904"). InnerVolumeSpecName "kube-api-access-wqsrr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.563634 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xqg2s\" (UniqueName: \"kubernetes.io/projected/e9b2ddd2-95ef-4086-b1dc-b13acef90cb5-kube-api-access-xqg2s\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.563677 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4a2308e0-8430-418a-b943-0f6bc4e22904-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.563690 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nlvsz\" (UniqueName: \"kubernetes.io/projected/e3ace8e3-1201-45e0-b9f7-7a8e272cef48-kube-api-access-nlvsz\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.563718 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e3ace8e3-1201-45e0-b9f7-7a8e272cef48-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.563728 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wqsrr\" (UniqueName: \"kubernetes.io/projected/4a2308e0-8430-418a-b943-0f6bc4e22904-kube-api-access-wqsrr\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.563737 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e9b2ddd2-95ef-4086-b1dc-b13acef90cb5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.668336 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-6ffa-account-create-pf9hk" event={"ID":"d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd","Type":"ContainerDied","Data":"f75334c656eba74acff0a488f76503be1d1f7497544bbfc902cccd900834da2c"} Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.668387 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-6ffa-account-create-pf9hk" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.668408 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f75334c656eba74acff0a488f76503be1d1f7497544bbfc902cccd900834da2c" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.669798 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-sdk6z" event={"ID":"bd7c0f96-8702-4589-b3f5-a6a164a38d4e","Type":"ContainerDied","Data":"bd8bcd2c0997c9510278a29f47d8b019c508e95d535f00a03d8bac635d1108fa"} Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.669836 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bd8bcd2c0997c9510278a29f47d8b019c508e95d535f00a03d8bac635d1108fa" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.669838 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-sdk6z" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.672462 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-vsf8d" event={"ID":"7f33bb25-e6da-43a1-8afd-08a47636b172","Type":"ContainerDied","Data":"59c41b1206412a90aa68c0ae41e9bea59156b9261176192dd5ae672885e6188a"} Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.672518 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-vsf8d" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.672538 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="59c41b1206412a90aa68c0ae41e9bea59156b9261176192dd5ae672885e6188a" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.674262 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-255c-account-create-9gdvk" event={"ID":"e9b2ddd2-95ef-4086-b1dc-b13acef90cb5","Type":"ContainerDied","Data":"7a400ee30ad222641f1c0c5dbd86be0514455056cb7542abbaaa1b554cf4c0bb"} Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.674302 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7a400ee30ad222641f1c0c5dbd86be0514455056cb7542abbaaa1b554cf4c0bb" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.674332 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-255c-account-create-9gdvk" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.675971 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-rzl6b" event={"ID":"e3ace8e3-1201-45e0-b9f7-7a8e272cef48","Type":"ContainerDied","Data":"6c9c86e60fdedab69042b6f10fed3639f06eb46e3a7b7da7ca81921dd730ea86"} Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.676037 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6c9c86e60fdedab69042b6f10fed3639f06eb46e3a7b7da7ca81921dd730ea86" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.676080 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-rzl6b" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.679078 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-45e2-account-create-hj4jm" event={"ID":"4a2308e0-8430-418a-b943-0f6bc4e22904","Type":"ContainerDied","Data":"fd65ee88dd4b95368f975f48e03bfb266af7b141bd194cacccfe88eb0ad1e9b7"} Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.679114 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fd65ee88dd4b95368f975f48e03bfb266af7b141bd194cacccfe88eb0ad1e9b7" Nov 23 15:00:14 crc kubenswrapper[5050]: I1123 15:00:14.679181 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-45e2-account-create-hj4jm" Nov 23 15:00:18 crc kubenswrapper[5050]: I1123 15:00:18.736868 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-mzzv7" event={"ID":"99652fba-7c5d-4aaa-8de4-e29e38027171","Type":"ContainerStarted","Data":"bf86ac4b6838a47cd93889d0d9491ce491f000ed4be649f3721e2a86aeb4e9ae"} Nov 23 15:00:18 crc kubenswrapper[5050]: I1123 15:00:18.758025 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-mzzv7" podStartSLOduration=3.058858465 podStartE2EDuration="8.757994374s" podCreationTimestamp="2025-11-23 15:00:10 +0000 UTC" firstStartedPulling="2025-11-23 15:00:12.040783362 +0000 UTC m=+1107.207779847" lastFinishedPulling="2025-11-23 15:00:17.739919281 +0000 UTC m=+1112.906915756" observedRunningTime="2025-11-23 15:00:18.754495077 +0000 UTC m=+1113.921491602" watchObservedRunningTime="2025-11-23 15:00:18.757994374 +0000 UTC m=+1113.924990889" Nov 23 15:00:19 crc kubenswrapper[5050]: I1123 15:00:19.397659 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" Nov 23 15:00:19 crc kubenswrapper[5050]: I1123 15:00:19.470173 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-98kts"] Nov 23 15:00:19 crc kubenswrapper[5050]: I1123 15:00:19.470437 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b8fbc5445-98kts" podUID="8982637f-7dc1-4454-9213-47ea0f43971a" containerName="dnsmasq-dns" containerID="cri-o://2d7a198938bb1d3a0d558342939a2598c548f53de9bad190abfab99ec80ae0a2" gracePeriod=10 Nov 23 15:00:19 crc kubenswrapper[5050]: I1123 15:00:19.761256 5050 generic.go:334] "Generic (PLEG): container finished" podID="8982637f-7dc1-4454-9213-47ea0f43971a" containerID="2d7a198938bb1d3a0d558342939a2598c548f53de9bad190abfab99ec80ae0a2" exitCode=0 Nov 23 15:00:19 crc kubenswrapper[5050]: I1123 15:00:19.761374 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-98kts" event={"ID":"8982637f-7dc1-4454-9213-47ea0f43971a","Type":"ContainerDied","Data":"2d7a198938bb1d3a0d558342939a2598c548f53de9bad190abfab99ec80ae0a2"} Nov 23 15:00:20 crc kubenswrapper[5050]: I1123 15:00:20.092598 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-98kts" Nov 23 15:00:20 crc kubenswrapper[5050]: I1123 15:00:20.177507 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8982637f-7dc1-4454-9213-47ea0f43971a-ovsdbserver-sb\") pod \"8982637f-7dc1-4454-9213-47ea0f43971a\" (UID: \"8982637f-7dc1-4454-9213-47ea0f43971a\") " Nov 23 15:00:20 crc kubenswrapper[5050]: I1123 15:00:20.177636 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4zrl2\" (UniqueName: \"kubernetes.io/projected/8982637f-7dc1-4454-9213-47ea0f43971a-kube-api-access-4zrl2\") pod \"8982637f-7dc1-4454-9213-47ea0f43971a\" (UID: \"8982637f-7dc1-4454-9213-47ea0f43971a\") " Nov 23 15:00:20 crc kubenswrapper[5050]: I1123 15:00:20.177792 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8982637f-7dc1-4454-9213-47ea0f43971a-dns-svc\") pod \"8982637f-7dc1-4454-9213-47ea0f43971a\" (UID: \"8982637f-7dc1-4454-9213-47ea0f43971a\") " Nov 23 15:00:20 crc kubenswrapper[5050]: I1123 15:00:20.177914 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8982637f-7dc1-4454-9213-47ea0f43971a-ovsdbserver-nb\") pod \"8982637f-7dc1-4454-9213-47ea0f43971a\" (UID: \"8982637f-7dc1-4454-9213-47ea0f43971a\") " Nov 23 15:00:20 crc kubenswrapper[5050]: I1123 15:00:20.177952 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8982637f-7dc1-4454-9213-47ea0f43971a-config\") pod \"8982637f-7dc1-4454-9213-47ea0f43971a\" (UID: \"8982637f-7dc1-4454-9213-47ea0f43971a\") " Nov 23 15:00:20 crc kubenswrapper[5050]: I1123 15:00:20.188674 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8982637f-7dc1-4454-9213-47ea0f43971a-kube-api-access-4zrl2" (OuterVolumeSpecName: "kube-api-access-4zrl2") pod "8982637f-7dc1-4454-9213-47ea0f43971a" (UID: "8982637f-7dc1-4454-9213-47ea0f43971a"). InnerVolumeSpecName "kube-api-access-4zrl2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:00:20 crc kubenswrapper[5050]: I1123 15:00:20.237799 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8982637f-7dc1-4454-9213-47ea0f43971a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8982637f-7dc1-4454-9213-47ea0f43971a" (UID: "8982637f-7dc1-4454-9213-47ea0f43971a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:00:20 crc kubenswrapper[5050]: I1123 15:00:20.247039 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8982637f-7dc1-4454-9213-47ea0f43971a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8982637f-7dc1-4454-9213-47ea0f43971a" (UID: "8982637f-7dc1-4454-9213-47ea0f43971a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:00:20 crc kubenswrapper[5050]: I1123 15:00:20.250039 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8982637f-7dc1-4454-9213-47ea0f43971a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8982637f-7dc1-4454-9213-47ea0f43971a" (UID: "8982637f-7dc1-4454-9213-47ea0f43971a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:00:20 crc kubenswrapper[5050]: I1123 15:00:20.258878 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8982637f-7dc1-4454-9213-47ea0f43971a-config" (OuterVolumeSpecName: "config") pod "8982637f-7dc1-4454-9213-47ea0f43971a" (UID: "8982637f-7dc1-4454-9213-47ea0f43971a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:00:20 crc kubenswrapper[5050]: I1123 15:00:20.281466 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8982637f-7dc1-4454-9213-47ea0f43971a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:20 crc kubenswrapper[5050]: I1123 15:00:20.281562 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4zrl2\" (UniqueName: \"kubernetes.io/projected/8982637f-7dc1-4454-9213-47ea0f43971a-kube-api-access-4zrl2\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:20 crc kubenswrapper[5050]: I1123 15:00:20.281638 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8982637f-7dc1-4454-9213-47ea0f43971a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:20 crc kubenswrapper[5050]: I1123 15:00:20.281721 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8982637f-7dc1-4454-9213-47ea0f43971a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:20 crc kubenswrapper[5050]: I1123 15:00:20.281785 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8982637f-7dc1-4454-9213-47ea0f43971a-config\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:20 crc kubenswrapper[5050]: I1123 15:00:20.772626 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-98kts" event={"ID":"8982637f-7dc1-4454-9213-47ea0f43971a","Type":"ContainerDied","Data":"727b1a17874f8c0695ddba66aaf3aed331b1b5652c646f784041f11a6e4207af"} Nov 23 15:00:20 crc kubenswrapper[5050]: I1123 15:00:20.772706 5050 scope.go:117] "RemoveContainer" containerID="2d7a198938bb1d3a0d558342939a2598c548f53de9bad190abfab99ec80ae0a2" Nov 23 15:00:20 crc kubenswrapper[5050]: I1123 15:00:20.772720 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-98kts" Nov 23 15:00:20 crc kubenswrapper[5050]: I1123 15:00:20.808747 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-98kts"] Nov 23 15:00:20 crc kubenswrapper[5050]: I1123 15:00:20.817755 5050 scope.go:117] "RemoveContainer" containerID="faedd9482b7ef82e8458f6c7533ea9e2546fbb705468e4e4c265eab9f4f12302" Nov 23 15:00:20 crc kubenswrapper[5050]: I1123 15:00:20.821721 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-98kts"] Nov 23 15:00:21 crc kubenswrapper[5050]: I1123 15:00:21.572595 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8982637f-7dc1-4454-9213-47ea0f43971a" path="/var/lib/kubelet/pods/8982637f-7dc1-4454-9213-47ea0f43971a/volumes" Nov 23 15:00:21 crc kubenswrapper[5050]: I1123 15:00:21.787014 5050 generic.go:334] "Generic (PLEG): container finished" podID="99652fba-7c5d-4aaa-8de4-e29e38027171" containerID="bf86ac4b6838a47cd93889d0d9491ce491f000ed4be649f3721e2a86aeb4e9ae" exitCode=0 Nov 23 15:00:21 crc kubenswrapper[5050]: I1123 15:00:21.787107 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-mzzv7" event={"ID":"99652fba-7c5d-4aaa-8de4-e29e38027171","Type":"ContainerDied","Data":"bf86ac4b6838a47cd93889d0d9491ce491f000ed4be649f3721e2a86aeb4e9ae"} Nov 23 15:00:23 crc kubenswrapper[5050]: I1123 15:00:23.200336 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-mzzv7" Nov 23 15:00:23 crc kubenswrapper[5050]: I1123 15:00:23.348132 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99652fba-7c5d-4aaa-8de4-e29e38027171-combined-ca-bundle\") pod \"99652fba-7c5d-4aaa-8de4-e29e38027171\" (UID: \"99652fba-7c5d-4aaa-8de4-e29e38027171\") " Nov 23 15:00:23 crc kubenswrapper[5050]: I1123 15:00:23.348307 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99652fba-7c5d-4aaa-8de4-e29e38027171-config-data\") pod \"99652fba-7c5d-4aaa-8de4-e29e38027171\" (UID: \"99652fba-7c5d-4aaa-8de4-e29e38027171\") " Nov 23 15:00:23 crc kubenswrapper[5050]: I1123 15:00:23.348472 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-brfb2\" (UniqueName: \"kubernetes.io/projected/99652fba-7c5d-4aaa-8de4-e29e38027171-kube-api-access-brfb2\") pod \"99652fba-7c5d-4aaa-8de4-e29e38027171\" (UID: \"99652fba-7c5d-4aaa-8de4-e29e38027171\") " Nov 23 15:00:23 crc kubenswrapper[5050]: I1123 15:00:23.354454 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99652fba-7c5d-4aaa-8de4-e29e38027171-kube-api-access-brfb2" (OuterVolumeSpecName: "kube-api-access-brfb2") pod "99652fba-7c5d-4aaa-8de4-e29e38027171" (UID: "99652fba-7c5d-4aaa-8de4-e29e38027171"). InnerVolumeSpecName "kube-api-access-brfb2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:00:23 crc kubenswrapper[5050]: I1123 15:00:23.400832 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99652fba-7c5d-4aaa-8de4-e29e38027171-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "99652fba-7c5d-4aaa-8de4-e29e38027171" (UID: "99652fba-7c5d-4aaa-8de4-e29e38027171"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:23 crc kubenswrapper[5050]: I1123 15:00:23.400920 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99652fba-7c5d-4aaa-8de4-e29e38027171-config-data" (OuterVolumeSpecName: "config-data") pod "99652fba-7c5d-4aaa-8de4-e29e38027171" (UID: "99652fba-7c5d-4aaa-8de4-e29e38027171"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:23 crc kubenswrapper[5050]: I1123 15:00:23.450663 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99652fba-7c5d-4aaa-8de4-e29e38027171-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:23 crc kubenswrapper[5050]: I1123 15:00:23.450699 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99652fba-7c5d-4aaa-8de4-e29e38027171-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:23 crc kubenswrapper[5050]: I1123 15:00:23.450709 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-brfb2\" (UniqueName: \"kubernetes.io/projected/99652fba-7c5d-4aaa-8de4-e29e38027171-kube-api-access-brfb2\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:23 crc kubenswrapper[5050]: I1123 15:00:23.808951 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-mzzv7" event={"ID":"99652fba-7c5d-4aaa-8de4-e29e38027171","Type":"ContainerDied","Data":"012d6cb564a9df8aae59e345cbfae4ead4fc9fea3f68f5108fb4ab288259b901"} Nov 23 15:00:23 crc kubenswrapper[5050]: I1123 15:00:23.808993 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="012d6cb564a9df8aae59e345cbfae4ead4fc9fea3f68f5108fb4ab288259b901" Nov 23 15:00:23 crc kubenswrapper[5050]: I1123 15:00:23.809069 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-mzzv7" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.102130 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-qjwdw"] Nov 23 15:00:24 crc kubenswrapper[5050]: E1123 15:00:24.102849 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3ace8e3-1201-45e0-b9f7-7a8e272cef48" containerName="mariadb-database-create" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.102868 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3ace8e3-1201-45e0-b9f7-7a8e272cef48" containerName="mariadb-database-create" Nov 23 15:00:24 crc kubenswrapper[5050]: E1123 15:00:24.102878 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f33bb25-e6da-43a1-8afd-08a47636b172" containerName="mariadb-database-create" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.102885 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f33bb25-e6da-43a1-8afd-08a47636b172" containerName="mariadb-database-create" Nov 23 15:00:24 crc kubenswrapper[5050]: E1123 15:00:24.102896 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a2308e0-8430-418a-b943-0f6bc4e22904" containerName="mariadb-account-create" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.102902 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a2308e0-8430-418a-b943-0f6bc4e22904" containerName="mariadb-account-create" Nov 23 15:00:24 crc kubenswrapper[5050]: E1123 15:00:24.102911 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd" containerName="mariadb-account-create" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.102918 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd" containerName="mariadb-account-create" Nov 23 15:00:24 crc kubenswrapper[5050]: E1123 15:00:24.102940 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8982637f-7dc1-4454-9213-47ea0f43971a" containerName="init" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.102946 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="8982637f-7dc1-4454-9213-47ea0f43971a" containerName="init" Nov 23 15:00:24 crc kubenswrapper[5050]: E1123 15:00:24.102958 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99652fba-7c5d-4aaa-8de4-e29e38027171" containerName="keystone-db-sync" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.102965 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="99652fba-7c5d-4aaa-8de4-e29e38027171" containerName="keystone-db-sync" Nov 23 15:00:24 crc kubenswrapper[5050]: E1123 15:00:24.102978 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8982637f-7dc1-4454-9213-47ea0f43971a" containerName="dnsmasq-dns" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.102984 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="8982637f-7dc1-4454-9213-47ea0f43971a" containerName="dnsmasq-dns" Nov 23 15:00:24 crc kubenswrapper[5050]: E1123 15:00:24.102995 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9b2ddd2-95ef-4086-b1dc-b13acef90cb5" containerName="mariadb-account-create" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.103006 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9b2ddd2-95ef-4086-b1dc-b13acef90cb5" containerName="mariadb-account-create" Nov 23 15:00:24 crc kubenswrapper[5050]: E1123 15:00:24.103015 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd7c0f96-8702-4589-b3f5-a6a164a38d4e" containerName="mariadb-database-create" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.103022 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd7c0f96-8702-4589-b3f5-a6a164a38d4e" containerName="mariadb-database-create" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.103190 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f33bb25-e6da-43a1-8afd-08a47636b172" containerName="mariadb-database-create" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.103207 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="99652fba-7c5d-4aaa-8de4-e29e38027171" containerName="keystone-db-sync" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.103214 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd7c0f96-8702-4589-b3f5-a6a164a38d4e" containerName="mariadb-database-create" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.103226 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a2308e0-8430-418a-b943-0f6bc4e22904" containerName="mariadb-account-create" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.103235 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3ace8e3-1201-45e0-b9f7-7a8e272cef48" containerName="mariadb-database-create" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.103246 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd" containerName="mariadb-account-create" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.103259 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="8982637f-7dc1-4454-9213-47ea0f43971a" containerName="dnsmasq-dns" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.103267 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9b2ddd2-95ef-4086-b1dc-b13acef90cb5" containerName="mariadb-account-create" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.104634 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bbf5cc879-qjwdw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.129499 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-zxtmw"] Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.130688 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-zxtmw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.134413 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.134557 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.134606 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.134742 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-hkfk8" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.135833 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.142783 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-qjwdw"] Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.153828 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-zxtmw"] Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.263825 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-ovsdbserver-sb\") pod \"dnsmasq-dns-bbf5cc879-qjwdw\" (UID: \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\") " pod="openstack/dnsmasq-dns-bbf5cc879-qjwdw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.264030 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-dns-swift-storage-0\") pod \"dnsmasq-dns-bbf5cc879-qjwdw\" (UID: \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\") " pod="openstack/dnsmasq-dns-bbf5cc879-qjwdw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.264118 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-credential-keys\") pod \"keystone-bootstrap-zxtmw\" (UID: \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\") " pod="openstack/keystone-bootstrap-zxtmw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.264193 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7jjx2\" (UniqueName: \"kubernetes.io/projected/7907fcb1-82b6-48bc-8c87-107d9f137c9a-kube-api-access-7jjx2\") pod \"keystone-bootstrap-zxtmw\" (UID: \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\") " pod="openstack/keystone-bootstrap-zxtmw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.264243 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrcjv\" (UniqueName: \"kubernetes.io/projected/a1575caa-7652-4f58-92b8-fc433cc1fd8d-kube-api-access-wrcjv\") pod \"dnsmasq-dns-bbf5cc879-qjwdw\" (UID: \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\") " pod="openstack/dnsmasq-dns-bbf5cc879-qjwdw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.264280 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-ovsdbserver-nb\") pod \"dnsmasq-dns-bbf5cc879-qjwdw\" (UID: \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\") " pod="openstack/dnsmasq-dns-bbf5cc879-qjwdw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.264300 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-config\") pod \"dnsmasq-dns-bbf5cc879-qjwdw\" (UID: \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\") " pod="openstack/dnsmasq-dns-bbf5cc879-qjwdw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.264323 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-scripts\") pod \"keystone-bootstrap-zxtmw\" (UID: \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\") " pod="openstack/keystone-bootstrap-zxtmw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.264342 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-fernet-keys\") pod \"keystone-bootstrap-zxtmw\" (UID: \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\") " pod="openstack/keystone-bootstrap-zxtmw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.264372 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-dns-svc\") pod \"dnsmasq-dns-bbf5cc879-qjwdw\" (UID: \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\") " pod="openstack/dnsmasq-dns-bbf5cc879-qjwdw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.264393 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-combined-ca-bundle\") pod \"keystone-bootstrap-zxtmw\" (UID: \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\") " pod="openstack/keystone-bootstrap-zxtmw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.264420 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-config-data\") pod \"keystone-bootstrap-zxtmw\" (UID: \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\") " pod="openstack/keystone-bootstrap-zxtmw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.335282 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.337274 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.343235 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.343423 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.369170 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-config\") pod \"dnsmasq-dns-bbf5cc879-qjwdw\" (UID: \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\") " pod="openstack/dnsmasq-dns-bbf5cc879-qjwdw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.369230 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-ovsdbserver-nb\") pod \"dnsmasq-dns-bbf5cc879-qjwdw\" (UID: \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\") " pod="openstack/dnsmasq-dns-bbf5cc879-qjwdw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.369270 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-scripts\") pod \"keystone-bootstrap-zxtmw\" (UID: \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\") " pod="openstack/keystone-bootstrap-zxtmw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.369290 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-fernet-keys\") pod \"keystone-bootstrap-zxtmw\" (UID: \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\") " pod="openstack/keystone-bootstrap-zxtmw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.369314 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-dns-svc\") pod \"dnsmasq-dns-bbf5cc879-qjwdw\" (UID: \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\") " pod="openstack/dnsmasq-dns-bbf5cc879-qjwdw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.369340 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-combined-ca-bundle\") pod \"keystone-bootstrap-zxtmw\" (UID: \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\") " pod="openstack/keystone-bootstrap-zxtmw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.369372 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-config-data\") pod \"keystone-bootstrap-zxtmw\" (UID: \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\") " pod="openstack/keystone-bootstrap-zxtmw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.369427 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-ovsdbserver-sb\") pod \"dnsmasq-dns-bbf5cc879-qjwdw\" (UID: \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\") " pod="openstack/dnsmasq-dns-bbf5cc879-qjwdw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.369521 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-dns-swift-storage-0\") pod \"dnsmasq-dns-bbf5cc879-qjwdw\" (UID: \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\") " pod="openstack/dnsmasq-dns-bbf5cc879-qjwdw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.369551 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-credential-keys\") pod \"keystone-bootstrap-zxtmw\" (UID: \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\") " pod="openstack/keystone-bootstrap-zxtmw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.369582 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7jjx2\" (UniqueName: \"kubernetes.io/projected/7907fcb1-82b6-48bc-8c87-107d9f137c9a-kube-api-access-7jjx2\") pod \"keystone-bootstrap-zxtmw\" (UID: \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\") " pod="openstack/keystone-bootstrap-zxtmw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.369613 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrcjv\" (UniqueName: \"kubernetes.io/projected/a1575caa-7652-4f58-92b8-fc433cc1fd8d-kube-api-access-wrcjv\") pod \"dnsmasq-dns-bbf5cc879-qjwdw\" (UID: \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\") " pod="openstack/dnsmasq-dns-bbf5cc879-qjwdw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.371908 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-config\") pod \"dnsmasq-dns-bbf5cc879-qjwdw\" (UID: \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\") " pod="openstack/dnsmasq-dns-bbf5cc879-qjwdw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.372331 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-ovsdbserver-nb\") pod \"dnsmasq-dns-bbf5cc879-qjwdw\" (UID: \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\") " pod="openstack/dnsmasq-dns-bbf5cc879-qjwdw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.373149 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-dns-svc\") pod \"dnsmasq-dns-bbf5cc879-qjwdw\" (UID: \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\") " pod="openstack/dnsmasq-dns-bbf5cc879-qjwdw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.373743 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-ovsdbserver-sb\") pod \"dnsmasq-dns-bbf5cc879-qjwdw\" (UID: \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\") " pod="openstack/dnsmasq-dns-bbf5cc879-qjwdw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.383704 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-fernet-keys\") pod \"keystone-bootstrap-zxtmw\" (UID: \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\") " pod="openstack/keystone-bootstrap-zxtmw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.384196 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-dns-swift-storage-0\") pod \"dnsmasq-dns-bbf5cc879-qjwdw\" (UID: \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\") " pod="openstack/dnsmasq-dns-bbf5cc879-qjwdw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.389520 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-combined-ca-bundle\") pod \"keystone-bootstrap-zxtmw\" (UID: \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\") " pod="openstack/keystone-bootstrap-zxtmw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.399486 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.400748 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-scripts\") pod \"keystone-bootstrap-zxtmw\" (UID: \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\") " pod="openstack/keystone-bootstrap-zxtmw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.401191 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-config-data\") pod \"keystone-bootstrap-zxtmw\" (UID: \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\") " pod="openstack/keystone-bootstrap-zxtmw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.416754 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-credential-keys\") pod \"keystone-bootstrap-zxtmw\" (UID: \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\") " pod="openstack/keystone-bootstrap-zxtmw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.463283 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrcjv\" (UniqueName: \"kubernetes.io/projected/a1575caa-7652-4f58-92b8-fc433cc1fd8d-kube-api-access-wrcjv\") pod \"dnsmasq-dns-bbf5cc879-qjwdw\" (UID: \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\") " pod="openstack/dnsmasq-dns-bbf5cc879-qjwdw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.463648 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7jjx2\" (UniqueName: \"kubernetes.io/projected/7907fcb1-82b6-48bc-8c87-107d9f137c9a-kube-api-access-7jjx2\") pod \"keystone-bootstrap-zxtmw\" (UID: \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\") " pod="openstack/keystone-bootstrap-zxtmw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.480181 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/940dffda-38d6-469c-8453-5acb4092ed82-log-httpd\") pod \"ceilometer-0\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " pod="openstack/ceilometer-0" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.480240 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/940dffda-38d6-469c-8453-5acb4092ed82-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " pod="openstack/ceilometer-0" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.480261 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/940dffda-38d6-469c-8453-5acb4092ed82-scripts\") pod \"ceilometer-0\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " pod="openstack/ceilometer-0" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.480277 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/940dffda-38d6-469c-8453-5acb4092ed82-run-httpd\") pod \"ceilometer-0\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " pod="openstack/ceilometer-0" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.480334 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/940dffda-38d6-469c-8453-5acb4092ed82-config-data\") pod \"ceilometer-0\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " pod="openstack/ceilometer-0" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.480370 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nx4hd\" (UniqueName: \"kubernetes.io/projected/940dffda-38d6-469c-8453-5acb4092ed82-kube-api-access-nx4hd\") pod \"ceilometer-0\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " pod="openstack/ceilometer-0" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.480389 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/940dffda-38d6-469c-8453-5acb4092ed82-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " pod="openstack/ceilometer-0" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.507990 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-ftmg2"] Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.509265 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-ftmg2" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.511317 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-zxtmw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.512313 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.522903 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-82ms7"] Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.523946 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-hljng" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.524282 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-82ms7" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.538111 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.546689 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.546790 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-7sr8w" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.572201 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-ftmg2"] Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.583368 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/940dffda-38d6-469c-8453-5acb4092ed82-scripts\") pod \"ceilometer-0\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " pod="openstack/ceilometer-0" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.583419 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/940dffda-38d6-469c-8453-5acb4092ed82-run-httpd\") pod \"ceilometer-0\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " pod="openstack/ceilometer-0" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.583740 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/940dffda-38d6-469c-8453-5acb4092ed82-config-data\") pod \"ceilometer-0\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " pod="openstack/ceilometer-0" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.583788 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nx4hd\" (UniqueName: \"kubernetes.io/projected/940dffda-38d6-469c-8453-5acb4092ed82-kube-api-access-nx4hd\") pod \"ceilometer-0\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " pod="openstack/ceilometer-0" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.583816 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/940dffda-38d6-469c-8453-5acb4092ed82-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " pod="openstack/ceilometer-0" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.583856 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/940dffda-38d6-469c-8453-5acb4092ed82-log-httpd\") pod \"ceilometer-0\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " pod="openstack/ceilometer-0" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.583888 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/940dffda-38d6-469c-8453-5acb4092ed82-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " pod="openstack/ceilometer-0" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.593974 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/940dffda-38d6-469c-8453-5acb4092ed82-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " pod="openstack/ceilometer-0" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.595292 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/940dffda-38d6-469c-8453-5acb4092ed82-run-httpd\") pod \"ceilometer-0\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " pod="openstack/ceilometer-0" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.595569 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/940dffda-38d6-469c-8453-5acb4092ed82-log-httpd\") pod \"ceilometer-0\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " pod="openstack/ceilometer-0" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.606717 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/940dffda-38d6-469c-8453-5acb4092ed82-config-data\") pod \"ceilometer-0\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " pod="openstack/ceilometer-0" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.622280 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/940dffda-38d6-469c-8453-5acb4092ed82-scripts\") pod \"ceilometer-0\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " pod="openstack/ceilometer-0" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.638046 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/940dffda-38d6-469c-8453-5acb4092ed82-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " pod="openstack/ceilometer-0" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.649677 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-82ms7"] Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.681379 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nx4hd\" (UniqueName: \"kubernetes.io/projected/940dffda-38d6-469c-8453-5acb4092ed82-kube-api-access-nx4hd\") pod \"ceilometer-0\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " pod="openstack/ceilometer-0" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.688767 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j4g9r\" (UniqueName: \"kubernetes.io/projected/f53cab34-8b0f-45cf-8ef9-a524aa3578b4-kube-api-access-j4g9r\") pod \"barbican-db-sync-ftmg2\" (UID: \"f53cab34-8b0f-45cf-8ef9-a524aa3578b4\") " pod="openstack/barbican-db-sync-ftmg2" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.688815 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/93d8639a-cdc9-4e9f-a17f-883debe12333-etc-machine-id\") pod \"cinder-db-sync-82ms7\" (UID: \"93d8639a-cdc9-4e9f-a17f-883debe12333\") " pod="openstack/cinder-db-sync-82ms7" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.688847 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f53cab34-8b0f-45cf-8ef9-a524aa3578b4-db-sync-config-data\") pod \"barbican-db-sync-ftmg2\" (UID: \"f53cab34-8b0f-45cf-8ef9-a524aa3578b4\") " pod="openstack/barbican-db-sync-ftmg2" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.688893 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93d8639a-cdc9-4e9f-a17f-883debe12333-scripts\") pod \"cinder-db-sync-82ms7\" (UID: \"93d8639a-cdc9-4e9f-a17f-883debe12333\") " pod="openstack/cinder-db-sync-82ms7" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.688924 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/93d8639a-cdc9-4e9f-a17f-883debe12333-db-sync-config-data\") pod \"cinder-db-sync-82ms7\" (UID: \"93d8639a-cdc9-4e9f-a17f-883debe12333\") " pod="openstack/cinder-db-sync-82ms7" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.689096 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93d8639a-cdc9-4e9f-a17f-883debe12333-config-data\") pod \"cinder-db-sync-82ms7\" (UID: \"93d8639a-cdc9-4e9f-a17f-883debe12333\") " pod="openstack/cinder-db-sync-82ms7" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.689336 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mb2sl\" (UniqueName: \"kubernetes.io/projected/93d8639a-cdc9-4e9f-a17f-883debe12333-kube-api-access-mb2sl\") pod \"cinder-db-sync-82ms7\" (UID: \"93d8639a-cdc9-4e9f-a17f-883debe12333\") " pod="openstack/cinder-db-sync-82ms7" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.689365 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93d8639a-cdc9-4e9f-a17f-883debe12333-combined-ca-bundle\") pod \"cinder-db-sync-82ms7\" (UID: \"93d8639a-cdc9-4e9f-a17f-883debe12333\") " pod="openstack/cinder-db-sync-82ms7" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.689384 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f53cab34-8b0f-45cf-8ef9-a524aa3578b4-combined-ca-bundle\") pod \"barbican-db-sync-ftmg2\" (UID: \"f53cab34-8b0f-45cf-8ef9-a524aa3578b4\") " pod="openstack/barbican-db-sync-ftmg2" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.731515 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-sdvpr"] Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.732809 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-sdvpr" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.735608 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bbf5cc879-qjwdw" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.739856 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.741042 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-mn2cc" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.759903 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.793502 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/93d8639a-cdc9-4e9f-a17f-883debe12333-db-sync-config-data\") pod \"cinder-db-sync-82ms7\" (UID: \"93d8639a-cdc9-4e9f-a17f-883debe12333\") " pod="openstack/cinder-db-sync-82ms7" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.793597 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93d8639a-cdc9-4e9f-a17f-883debe12333-config-data\") pod \"cinder-db-sync-82ms7\" (UID: \"93d8639a-cdc9-4e9f-a17f-883debe12333\") " pod="openstack/cinder-db-sync-82ms7" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.793651 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mb2sl\" (UniqueName: \"kubernetes.io/projected/93d8639a-cdc9-4e9f-a17f-883debe12333-kube-api-access-mb2sl\") pod \"cinder-db-sync-82ms7\" (UID: \"93d8639a-cdc9-4e9f-a17f-883debe12333\") " pod="openstack/cinder-db-sync-82ms7" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.793698 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93d8639a-cdc9-4e9f-a17f-883debe12333-combined-ca-bundle\") pod \"cinder-db-sync-82ms7\" (UID: \"93d8639a-cdc9-4e9f-a17f-883debe12333\") " pod="openstack/cinder-db-sync-82ms7" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.793718 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f53cab34-8b0f-45cf-8ef9-a524aa3578b4-combined-ca-bundle\") pod \"barbican-db-sync-ftmg2\" (UID: \"f53cab34-8b0f-45cf-8ef9-a524aa3578b4\") " pod="openstack/barbican-db-sync-ftmg2" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.793777 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j4g9r\" (UniqueName: \"kubernetes.io/projected/f53cab34-8b0f-45cf-8ef9-a524aa3578b4-kube-api-access-j4g9r\") pod \"barbican-db-sync-ftmg2\" (UID: \"f53cab34-8b0f-45cf-8ef9-a524aa3578b4\") " pod="openstack/barbican-db-sync-ftmg2" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.793809 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/93d8639a-cdc9-4e9f-a17f-883debe12333-etc-machine-id\") pod \"cinder-db-sync-82ms7\" (UID: \"93d8639a-cdc9-4e9f-a17f-883debe12333\") " pod="openstack/cinder-db-sync-82ms7" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.793861 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f53cab34-8b0f-45cf-8ef9-a524aa3578b4-db-sync-config-data\") pod \"barbican-db-sync-ftmg2\" (UID: \"f53cab34-8b0f-45cf-8ef9-a524aa3578b4\") " pod="openstack/barbican-db-sync-ftmg2" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.793897 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93d8639a-cdc9-4e9f-a17f-883debe12333-scripts\") pod \"cinder-db-sync-82ms7\" (UID: \"93d8639a-cdc9-4e9f-a17f-883debe12333\") " pod="openstack/cinder-db-sync-82ms7" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.802209 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/93d8639a-cdc9-4e9f-a17f-883debe12333-etc-machine-id\") pod \"cinder-db-sync-82ms7\" (UID: \"93d8639a-cdc9-4e9f-a17f-883debe12333\") " pod="openstack/cinder-db-sync-82ms7" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.810912 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-sdvpr"] Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.817898 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93d8639a-cdc9-4e9f-a17f-883debe12333-scripts\") pod \"cinder-db-sync-82ms7\" (UID: \"93d8639a-cdc9-4e9f-a17f-883debe12333\") " pod="openstack/cinder-db-sync-82ms7" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.819199 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f53cab34-8b0f-45cf-8ef9-a524aa3578b4-combined-ca-bundle\") pod \"barbican-db-sync-ftmg2\" (UID: \"f53cab34-8b0f-45cf-8ef9-a524aa3578b4\") " pod="openstack/barbican-db-sync-ftmg2" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.827355 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f53cab34-8b0f-45cf-8ef9-a524aa3578b4-db-sync-config-data\") pod \"barbican-db-sync-ftmg2\" (UID: \"f53cab34-8b0f-45cf-8ef9-a524aa3578b4\") " pod="openstack/barbican-db-sync-ftmg2" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.827402 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-qjwdw"] Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.828837 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93d8639a-cdc9-4e9f-a17f-883debe12333-combined-ca-bundle\") pod \"cinder-db-sync-82ms7\" (UID: \"93d8639a-cdc9-4e9f-a17f-883debe12333\") " pod="openstack/cinder-db-sync-82ms7" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.831499 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/93d8639a-cdc9-4e9f-a17f-883debe12333-db-sync-config-data\") pod \"cinder-db-sync-82ms7\" (UID: \"93d8639a-cdc9-4e9f-a17f-883debe12333\") " pod="openstack/cinder-db-sync-82ms7" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.845096 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4g9r\" (UniqueName: \"kubernetes.io/projected/f53cab34-8b0f-45cf-8ef9-a524aa3578b4-kube-api-access-j4g9r\") pod \"barbican-db-sync-ftmg2\" (UID: \"f53cab34-8b0f-45cf-8ef9-a524aa3578b4\") " pod="openstack/barbican-db-sync-ftmg2" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.860471 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93d8639a-cdc9-4e9f-a17f-883debe12333-config-data\") pod \"cinder-db-sync-82ms7\" (UID: \"93d8639a-cdc9-4e9f-a17f-883debe12333\") " pod="openstack/cinder-db-sync-82ms7" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.868788 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mb2sl\" (UniqueName: \"kubernetes.io/projected/93d8639a-cdc9-4e9f-a17f-883debe12333-kube-api-access-mb2sl\") pod \"cinder-db-sync-82ms7\" (UID: \"93d8639a-cdc9-4e9f-a17f-883debe12333\") " pod="openstack/cinder-db-sync-82ms7" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.898551 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49ce1b12-d28e-481e-ad05-68355446dd4c-combined-ca-bundle\") pod \"neutron-db-sync-sdvpr\" (UID: \"49ce1b12-d28e-481e-ad05-68355446dd4c\") " pod="openstack/neutron-db-sync-sdvpr" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.898643 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pz5j9\" (UniqueName: \"kubernetes.io/projected/49ce1b12-d28e-481e-ad05-68355446dd4c-kube-api-access-pz5j9\") pod \"neutron-db-sync-sdvpr\" (UID: \"49ce1b12-d28e-481e-ad05-68355446dd4c\") " pod="openstack/neutron-db-sync-sdvpr" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.898680 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/49ce1b12-d28e-481e-ad05-68355446dd4c-config\") pod \"neutron-db-sync-sdvpr\" (UID: \"49ce1b12-d28e-481e-ad05-68355446dd4c\") " pod="openstack/neutron-db-sync-sdvpr" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.906649 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-j7p9t"] Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.907952 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-j7p9t" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.911899 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.912560 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.914084 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-k8v8m" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.930482 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-rl78g"] Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.932040 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.950512 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-ftmg2" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.956359 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.958540 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-j7p9t"] Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.986036 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-rl78g"] Nov 23 15:00:24 crc kubenswrapper[5050]: I1123 15:00:24.991904 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-82ms7" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.001511 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3291f014-f18b-4142-a770-d9c33b141d66-config-data\") pod \"placement-db-sync-j7p9t\" (UID: \"3291f014-f18b-4142-a770-d9c33b141d66\") " pod="openstack/placement-db-sync-j7p9t" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.001591 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49ce1b12-d28e-481e-ad05-68355446dd4c-combined-ca-bundle\") pod \"neutron-db-sync-sdvpr\" (UID: \"49ce1b12-d28e-481e-ad05-68355446dd4c\") " pod="openstack/neutron-db-sync-sdvpr" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.001664 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3291f014-f18b-4142-a770-d9c33b141d66-scripts\") pod \"placement-db-sync-j7p9t\" (UID: \"3291f014-f18b-4142-a770-d9c33b141d66\") " pod="openstack/placement-db-sync-j7p9t" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.001710 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bvdb\" (UniqueName: \"kubernetes.io/projected/3291f014-f18b-4142-a770-d9c33b141d66-kube-api-access-2bvdb\") pod \"placement-db-sync-j7p9t\" (UID: \"3291f014-f18b-4142-a770-d9c33b141d66\") " pod="openstack/placement-db-sync-j7p9t" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.001740 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pz5j9\" (UniqueName: \"kubernetes.io/projected/49ce1b12-d28e-481e-ad05-68355446dd4c-kube-api-access-pz5j9\") pod \"neutron-db-sync-sdvpr\" (UID: \"49ce1b12-d28e-481e-ad05-68355446dd4c\") " pod="openstack/neutron-db-sync-sdvpr" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.001768 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3291f014-f18b-4142-a770-d9c33b141d66-combined-ca-bundle\") pod \"placement-db-sync-j7p9t\" (UID: \"3291f014-f18b-4142-a770-d9c33b141d66\") " pod="openstack/placement-db-sync-j7p9t" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.001794 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/49ce1b12-d28e-481e-ad05-68355446dd4c-config\") pod \"neutron-db-sync-sdvpr\" (UID: \"49ce1b12-d28e-481e-ad05-68355446dd4c\") " pod="openstack/neutron-db-sync-sdvpr" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.002548 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3291f014-f18b-4142-a770-d9c33b141d66-logs\") pod \"placement-db-sync-j7p9t\" (UID: \"3291f014-f18b-4142-a770-d9c33b141d66\") " pod="openstack/placement-db-sync-j7p9t" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.008315 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49ce1b12-d28e-481e-ad05-68355446dd4c-combined-ca-bundle\") pod \"neutron-db-sync-sdvpr\" (UID: \"49ce1b12-d28e-481e-ad05-68355446dd4c\") " pod="openstack/neutron-db-sync-sdvpr" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.019690 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/49ce1b12-d28e-481e-ad05-68355446dd4c-config\") pod \"neutron-db-sync-sdvpr\" (UID: \"49ce1b12-d28e-481e-ad05-68355446dd4c\") " pod="openstack/neutron-db-sync-sdvpr" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.025282 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pz5j9\" (UniqueName: \"kubernetes.io/projected/49ce1b12-d28e-481e-ad05-68355446dd4c-kube-api-access-pz5j9\") pod \"neutron-db-sync-sdvpr\" (UID: \"49ce1b12-d28e-481e-ad05-68355446dd4c\") " pod="openstack/neutron-db-sync-sdvpr" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.094068 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-sdvpr" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.103801 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3291f014-f18b-4142-a770-d9c33b141d66-config-data\") pod \"placement-db-sync-j7p9t\" (UID: \"3291f014-f18b-4142-a770-d9c33b141d66\") " pod="openstack/placement-db-sync-j7p9t" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.103898 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-rl78g\" (UID: \"28a444a8-06a7-48ed-8345-f9347485ebfa\") " pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.103923 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3291f014-f18b-4142-a770-d9c33b141d66-scripts\") pod \"placement-db-sync-j7p9t\" (UID: \"3291f014-f18b-4142-a770-d9c33b141d66\") " pod="openstack/placement-db-sync-j7p9t" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.103942 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-rl78g\" (UID: \"28a444a8-06a7-48ed-8345-f9347485ebfa\") " pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.103959 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-rl78g\" (UID: \"28a444a8-06a7-48ed-8345-f9347485ebfa\") " pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.103982 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bvdb\" (UniqueName: \"kubernetes.io/projected/3291f014-f18b-4142-a770-d9c33b141d66-kube-api-access-2bvdb\") pod \"placement-db-sync-j7p9t\" (UID: \"3291f014-f18b-4142-a770-d9c33b141d66\") " pod="openstack/placement-db-sync-j7p9t" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.104005 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3291f014-f18b-4142-a770-d9c33b141d66-combined-ca-bundle\") pod \"placement-db-sync-j7p9t\" (UID: \"3291f014-f18b-4142-a770-d9c33b141d66\") " pod="openstack/placement-db-sync-j7p9t" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.104054 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3291f014-f18b-4142-a770-d9c33b141d66-logs\") pod \"placement-db-sync-j7p9t\" (UID: \"3291f014-f18b-4142-a770-d9c33b141d66\") " pod="openstack/placement-db-sync-j7p9t" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.104071 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thvfg\" (UniqueName: \"kubernetes.io/projected/28a444a8-06a7-48ed-8345-f9347485ebfa-kube-api-access-thvfg\") pod \"dnsmasq-dns-56df8fb6b7-rl78g\" (UID: \"28a444a8-06a7-48ed-8345-f9347485ebfa\") " pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.104106 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-config\") pod \"dnsmasq-dns-56df8fb6b7-rl78g\" (UID: \"28a444a8-06a7-48ed-8345-f9347485ebfa\") " pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.104137 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-rl78g\" (UID: \"28a444a8-06a7-48ed-8345-f9347485ebfa\") " pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.112853 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3291f014-f18b-4142-a770-d9c33b141d66-scripts\") pod \"placement-db-sync-j7p9t\" (UID: \"3291f014-f18b-4142-a770-d9c33b141d66\") " pod="openstack/placement-db-sync-j7p9t" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.114528 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3291f014-f18b-4142-a770-d9c33b141d66-logs\") pod \"placement-db-sync-j7p9t\" (UID: \"3291f014-f18b-4142-a770-d9c33b141d66\") " pod="openstack/placement-db-sync-j7p9t" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.115012 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3291f014-f18b-4142-a770-d9c33b141d66-config-data\") pod \"placement-db-sync-j7p9t\" (UID: \"3291f014-f18b-4142-a770-d9c33b141d66\") " pod="openstack/placement-db-sync-j7p9t" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.117203 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3291f014-f18b-4142-a770-d9c33b141d66-combined-ca-bundle\") pod \"placement-db-sync-j7p9t\" (UID: \"3291f014-f18b-4142-a770-d9c33b141d66\") " pod="openstack/placement-db-sync-j7p9t" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.125256 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bvdb\" (UniqueName: \"kubernetes.io/projected/3291f014-f18b-4142-a770-d9c33b141d66-kube-api-access-2bvdb\") pod \"placement-db-sync-j7p9t\" (UID: \"3291f014-f18b-4142-a770-d9c33b141d66\") " pod="openstack/placement-db-sync-j7p9t" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.205666 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-rl78g\" (UID: \"28a444a8-06a7-48ed-8345-f9347485ebfa\") " pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.205780 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thvfg\" (UniqueName: \"kubernetes.io/projected/28a444a8-06a7-48ed-8345-f9347485ebfa-kube-api-access-thvfg\") pod \"dnsmasq-dns-56df8fb6b7-rl78g\" (UID: \"28a444a8-06a7-48ed-8345-f9347485ebfa\") " pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.205829 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-config\") pod \"dnsmasq-dns-56df8fb6b7-rl78g\" (UID: \"28a444a8-06a7-48ed-8345-f9347485ebfa\") " pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.205867 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-rl78g\" (UID: \"28a444a8-06a7-48ed-8345-f9347485ebfa\") " pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.205947 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-rl78g\" (UID: \"28a444a8-06a7-48ed-8345-f9347485ebfa\") " pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.205975 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-rl78g\" (UID: \"28a444a8-06a7-48ed-8345-f9347485ebfa\") " pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.206832 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-rl78g\" (UID: \"28a444a8-06a7-48ed-8345-f9347485ebfa\") " pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.206914 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-config\") pod \"dnsmasq-dns-56df8fb6b7-rl78g\" (UID: \"28a444a8-06a7-48ed-8345-f9347485ebfa\") " pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.208831 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-rl78g\" (UID: \"28a444a8-06a7-48ed-8345-f9347485ebfa\") " pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.209201 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-rl78g\" (UID: \"28a444a8-06a7-48ed-8345-f9347485ebfa\") " pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.210427 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-rl78g\" (UID: \"28a444a8-06a7-48ed-8345-f9347485ebfa\") " pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.227524 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thvfg\" (UniqueName: \"kubernetes.io/projected/28a444a8-06a7-48ed-8345-f9347485ebfa-kube-api-access-thvfg\") pod \"dnsmasq-dns-56df8fb6b7-rl78g\" (UID: \"28a444a8-06a7-48ed-8345-f9347485ebfa\") " pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.253712 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.261920 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.272353 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.272368 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-g8gzt" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.272556 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-j7p9t" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.272579 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.272669 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.278219 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.278988 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.296072 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-zxtmw"] Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.366377 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.368282 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.372926 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.374282 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.399314 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.412613 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-config-data\") pod \"glance-default-external-api-0\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.412901 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5knq\" (UniqueName: \"kubernetes.io/projected/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-kube-api-access-w5knq\") pod \"glance-default-external-api-0\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.413115 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-logs\") pod \"glance-default-external-api-0\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.413252 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.413336 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-scripts\") pod \"glance-default-external-api-0\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.413424 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.413519 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.413616 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.515295 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5knq\" (UniqueName: \"kubernetes.io/projected/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-kube-api-access-w5knq\") pod \"glance-default-external-api-0\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.515354 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-logs\") pod \"glance-default-external-api-0\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.515397 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ad391d9-4f42-4252-b17d-55db2e864d06-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.515427 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.515467 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-scripts\") pod \"glance-default-external-api-0\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.515490 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.515504 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.515553 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.515573 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkt4r\" (UniqueName: \"kubernetes.io/projected/3ad391d9-4f42-4252-b17d-55db2e864d06-kube-api-access-mkt4r\") pod \"glance-default-internal-api-0\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.515599 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ad391d9-4f42-4252-b17d-55db2e864d06-logs\") pod \"glance-default-internal-api-0\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.515638 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ad391d9-4f42-4252-b17d-55db2e864d06-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.515662 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ad391d9-4f42-4252-b17d-55db2e864d06-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.515698 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.515718 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3ad391d9-4f42-4252-b17d-55db2e864d06-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.515739 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ad391d9-4f42-4252-b17d-55db2e864d06-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.515763 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-config-data\") pod \"glance-default-external-api-0\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.516518 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.517490 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-logs\") pod \"glance-default-external-api-0\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.517934 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-qjwdw"] Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.520557 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.521629 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-scripts\") pod \"glance-default-external-api-0\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.521985 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.524825 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-config-data\") pod \"glance-default-external-api-0\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.532902 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.547696 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5knq\" (UniqueName: \"kubernetes.io/projected/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-kube-api-access-w5knq\") pod \"glance-default-external-api-0\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.630952 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ad391d9-4f42-4252-b17d-55db2e864d06-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.631583 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ad391d9-4f42-4252-b17d-55db2e864d06-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.631781 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkt4r\" (UniqueName: \"kubernetes.io/projected/3ad391d9-4f42-4252-b17d-55db2e864d06-kube-api-access-mkt4r\") pod \"glance-default-internal-api-0\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.631826 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ad391d9-4f42-4252-b17d-55db2e864d06-logs\") pod \"glance-default-internal-api-0\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.631935 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ad391d9-4f42-4252-b17d-55db2e864d06-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.632000 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ad391d9-4f42-4252-b17d-55db2e864d06-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.632238 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.632341 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3ad391d9-4f42-4252-b17d-55db2e864d06-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.636128 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ad391d9-4f42-4252-b17d-55db2e864d06-logs\") pod \"glance-default-internal-api-0\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.652022 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3ad391d9-4f42-4252-b17d-55db2e864d06-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.652118 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/glance-default-internal-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.675192 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-ftmg2"] Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.680994 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkt4r\" (UniqueName: \"kubernetes.io/projected/3ad391d9-4f42-4252-b17d-55db2e864d06-kube-api-access-mkt4r\") pod \"glance-default-internal-api-0\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.684271 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ad391d9-4f42-4252-b17d-55db2e864d06-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.685696 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.686016 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ad391d9-4f42-4252-b17d-55db2e864d06-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.687911 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ad391d9-4f42-4252-b17d-55db2e864d06-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.694972 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ad391d9-4f42-4252-b17d-55db2e864d06-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: W1123 15:00:25.700935 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod940dffda_38d6_469c_8453_5acb4092ed82.slice/crio-ad251e1cf70114e00b1f72f6e61734fb9a4d76fe4079c19a50d8a805e9205c40 WatchSource:0}: Error finding container ad251e1cf70114e00b1f72f6e61734fb9a4d76fe4079c19a50d8a805e9205c40: Status 404 returned error can't find the container with id ad251e1cf70114e00b1f72f6e61734fb9a4d76fe4079c19a50d8a805e9205c40 Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.710990 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.720314 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.865701 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-82ms7"] Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.896581 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-sdvpr"] Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.900675 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.911264 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.947602 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bbf5cc879-qjwdw" event={"ID":"a1575caa-7652-4f58-92b8-fc433cc1fd8d","Type":"ContainerStarted","Data":"3ba267450bbb866802f82770ab37b2b52b7398cdb21aae7c99e7d2dd462ec1c8"} Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.947684 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bbf5cc879-qjwdw" event={"ID":"a1575caa-7652-4f58-92b8-fc433cc1fd8d","Type":"ContainerStarted","Data":"737a1e14936934b13edce329d1cdb28b6a16a9285594840047d50b49e8f445e8"} Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.947770 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-bbf5cc879-qjwdw" podUID="a1575caa-7652-4f58-92b8-fc433cc1fd8d" containerName="init" containerID="cri-o://3ba267450bbb866802f82770ab37b2b52b7398cdb21aae7c99e7d2dd462ec1c8" gracePeriod=10 Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.954863 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-82ms7" event={"ID":"93d8639a-cdc9-4e9f-a17f-883debe12333","Type":"ContainerStarted","Data":"fe23d5b7c8411b4828f1b618c7eaec1afd44a5d92788f3f12fd2d7721734c7f7"} Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.959524 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-zxtmw" event={"ID":"7907fcb1-82b6-48bc-8c87-107d9f137c9a","Type":"ContainerStarted","Data":"d8471c24b16d6d4f6103efd54a5561d59050162334738e8d02ec33d90eef6ff9"} Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.959551 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-zxtmw" event={"ID":"7907fcb1-82b6-48bc-8c87-107d9f137c9a","Type":"ContainerStarted","Data":"eb21c37e691b3b226fad2132d3bc14d82851acba8de4df9142984743497d4beb"} Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.967246 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-sdvpr" event={"ID":"49ce1b12-d28e-481e-ad05-68355446dd4c","Type":"ContainerStarted","Data":"1957be852c3f2c65d6ab69178531aec36e53ec68fd2b7c2f0249fd400d3bc094"} Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.971104 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"940dffda-38d6-469c-8453-5acb4092ed82","Type":"ContainerStarted","Data":"ad251e1cf70114e00b1f72f6e61734fb9a4d76fe4079c19a50d8a805e9205c40"} Nov 23 15:00:25 crc kubenswrapper[5050]: I1123 15:00:25.975866 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-ftmg2" event={"ID":"f53cab34-8b0f-45cf-8ef9-a524aa3578b4","Type":"ContainerStarted","Data":"2e3293f7d092705d6004268dbed3216e6a4c27d39c7fcfe40dc811ba5ff5da2e"} Nov 23 15:00:26 crc kubenswrapper[5050]: I1123 15:00:26.003215 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-zxtmw" podStartSLOduration=2.003191865 podStartE2EDuration="2.003191865s" podCreationTimestamp="2025-11-23 15:00:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:00:25.998251277 +0000 UTC m=+1121.165247752" watchObservedRunningTime="2025-11-23 15:00:26.003191865 +0000 UTC m=+1121.170188350" Nov 23 15:00:26 crc kubenswrapper[5050]: I1123 15:00:26.100366 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-rl78g"] Nov 23 15:00:26 crc kubenswrapper[5050]: I1123 15:00:26.157662 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-j7p9t"] Nov 23 15:00:26 crc kubenswrapper[5050]: I1123 15:00:26.555747 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bbf5cc879-qjwdw" Nov 23 15:00:26 crc kubenswrapper[5050]: I1123 15:00:26.644671 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 15:00:26 crc kubenswrapper[5050]: I1123 15:00:26.678274 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-dns-svc\") pod \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\" (UID: \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\") " Nov 23 15:00:26 crc kubenswrapper[5050]: I1123 15:00:26.678407 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-config\") pod \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\" (UID: \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\") " Nov 23 15:00:26 crc kubenswrapper[5050]: I1123 15:00:26.678467 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-ovsdbserver-sb\") pod \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\" (UID: \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\") " Nov 23 15:00:26 crc kubenswrapper[5050]: I1123 15:00:26.678615 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-dns-swift-storage-0\") pod \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\" (UID: \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\") " Nov 23 15:00:26 crc kubenswrapper[5050]: I1123 15:00:26.678708 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wrcjv\" (UniqueName: \"kubernetes.io/projected/a1575caa-7652-4f58-92b8-fc433cc1fd8d-kube-api-access-wrcjv\") pod \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\" (UID: \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\") " Nov 23 15:00:26 crc kubenswrapper[5050]: I1123 15:00:26.678733 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-ovsdbserver-nb\") pod \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\" (UID: \"a1575caa-7652-4f58-92b8-fc433cc1fd8d\") " Nov 23 15:00:26 crc kubenswrapper[5050]: I1123 15:00:26.744501 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 15:00:26 crc kubenswrapper[5050]: I1123 15:00:26.766106 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a1575caa-7652-4f58-92b8-fc433cc1fd8d-kube-api-access-wrcjv" (OuterVolumeSpecName: "kube-api-access-wrcjv") pod "a1575caa-7652-4f58-92b8-fc433cc1fd8d" (UID: "a1575caa-7652-4f58-92b8-fc433cc1fd8d"). InnerVolumeSpecName "kube-api-access-wrcjv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:00:26 crc kubenswrapper[5050]: I1123 15:00:26.796880 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wrcjv\" (UniqueName: \"kubernetes.io/projected/a1575caa-7652-4f58-92b8-fc433cc1fd8d-kube-api-access-wrcjv\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:26 crc kubenswrapper[5050]: I1123 15:00:26.850683 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 15:00:26 crc kubenswrapper[5050]: I1123 15:00:26.905179 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "a1575caa-7652-4f58-92b8-fc433cc1fd8d" (UID: "a1575caa-7652-4f58-92b8-fc433cc1fd8d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:00:26 crc kubenswrapper[5050]: I1123 15:00:26.913835 5050 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:26 crc kubenswrapper[5050]: I1123 15:00:26.943882 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:00:26 crc kubenswrapper[5050]: I1123 15:00:26.981126 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a1575caa-7652-4f58-92b8-fc433cc1fd8d" (UID: "a1575caa-7652-4f58-92b8-fc433cc1fd8d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:00:27 crc kubenswrapper[5050]: I1123 15:00:27.011419 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a1575caa-7652-4f58-92b8-fc433cc1fd8d" (UID: "a1575caa-7652-4f58-92b8-fc433cc1fd8d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:00:27 crc kubenswrapper[5050]: I1123 15:00:27.018297 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:27 crc kubenswrapper[5050]: I1123 15:00:27.018347 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:27 crc kubenswrapper[5050]: I1123 15:00:27.020686 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-config" (OuterVolumeSpecName: "config") pod "a1575caa-7652-4f58-92b8-fc433cc1fd8d" (UID: "a1575caa-7652-4f58-92b8-fc433cc1fd8d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:00:27 crc kubenswrapper[5050]: I1123 15:00:27.026099 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a1575caa-7652-4f58-92b8-fc433cc1fd8d" (UID: "a1575caa-7652-4f58-92b8-fc433cc1fd8d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:00:27 crc kubenswrapper[5050]: I1123 15:00:27.076980 5050 generic.go:334] "Generic (PLEG): container finished" podID="a1575caa-7652-4f58-92b8-fc433cc1fd8d" containerID="3ba267450bbb866802f82770ab37b2b52b7398cdb21aae7c99e7d2dd462ec1c8" exitCode=0 Nov 23 15:00:27 crc kubenswrapper[5050]: I1123 15:00:27.077049 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bbf5cc879-qjwdw" event={"ID":"a1575caa-7652-4f58-92b8-fc433cc1fd8d","Type":"ContainerDied","Data":"3ba267450bbb866802f82770ab37b2b52b7398cdb21aae7c99e7d2dd462ec1c8"} Nov 23 15:00:27 crc kubenswrapper[5050]: I1123 15:00:27.077079 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bbf5cc879-qjwdw" event={"ID":"a1575caa-7652-4f58-92b8-fc433cc1fd8d","Type":"ContainerDied","Data":"737a1e14936934b13edce329d1cdb28b6a16a9285594840047d50b49e8f445e8"} Nov 23 15:00:27 crc kubenswrapper[5050]: I1123 15:00:27.077095 5050 scope.go:117] "RemoveContainer" containerID="3ba267450bbb866802f82770ab37b2b52b7398cdb21aae7c99e7d2dd462ec1c8" Nov 23 15:00:27 crc kubenswrapper[5050]: I1123 15:00:27.077219 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bbf5cc879-qjwdw" Nov 23 15:00:27 crc kubenswrapper[5050]: I1123 15:00:27.120912 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-config\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:27 crc kubenswrapper[5050]: I1123 15:00:27.120945 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a1575caa-7652-4f58-92b8-fc433cc1fd8d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:27 crc kubenswrapper[5050]: I1123 15:00:27.124346 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" event={"ID":"28a444a8-06a7-48ed-8345-f9347485ebfa","Type":"ContainerStarted","Data":"c0b47f1f58e85cee2fd4d1974dda4e74f5ad23dabd704f8af41eb7b29eb0d40b"} Nov 23 15:00:27 crc kubenswrapper[5050]: I1123 15:00:27.130049 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-sdvpr" event={"ID":"49ce1b12-d28e-481e-ad05-68355446dd4c","Type":"ContainerStarted","Data":"096bc79c344ebb77c5036294dbf1470f2dcc544c7a30af4d159b48bf4fc5d85b"} Nov 23 15:00:27 crc kubenswrapper[5050]: I1123 15:00:27.131413 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 15:00:27 crc kubenswrapper[5050]: I1123 15:00:27.168765 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cfff6fc4-80c4-47e9-8bd3-318ad452fff5","Type":"ContainerStarted","Data":"8a81af822658866d1f7ccca01409f9c39283fa8f981e6cbe5fe0a43fb06cb0c8"} Nov 23 15:00:27 crc kubenswrapper[5050]: I1123 15:00:27.177741 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-sdvpr" podStartSLOduration=3.177697982 podStartE2EDuration="3.177697982s" podCreationTimestamp="2025-11-23 15:00:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:00:27.166172641 +0000 UTC m=+1122.333169126" watchObservedRunningTime="2025-11-23 15:00:27.177697982 +0000 UTC m=+1122.344694477" Nov 23 15:00:27 crc kubenswrapper[5050]: I1123 15:00:27.186550 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-j7p9t" event={"ID":"3291f014-f18b-4142-a770-d9c33b141d66","Type":"ContainerStarted","Data":"0cc63ef349d09c6556e87b3ace86a609349c743b2081912477b2a5dc52b2b180"} Nov 23 15:00:27 crc kubenswrapper[5050]: I1123 15:00:27.240162 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-qjwdw"] Nov 23 15:00:27 crc kubenswrapper[5050]: I1123 15:00:27.245798 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-qjwdw"] Nov 23 15:00:27 crc kubenswrapper[5050]: I1123 15:00:27.284186 5050 scope.go:117] "RemoveContainer" containerID="3ba267450bbb866802f82770ab37b2b52b7398cdb21aae7c99e7d2dd462ec1c8" Nov 23 15:00:27 crc kubenswrapper[5050]: E1123 15:00:27.284849 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ba267450bbb866802f82770ab37b2b52b7398cdb21aae7c99e7d2dd462ec1c8\": container with ID starting with 3ba267450bbb866802f82770ab37b2b52b7398cdb21aae7c99e7d2dd462ec1c8 not found: ID does not exist" containerID="3ba267450bbb866802f82770ab37b2b52b7398cdb21aae7c99e7d2dd462ec1c8" Nov 23 15:00:27 crc kubenswrapper[5050]: I1123 15:00:27.284908 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ba267450bbb866802f82770ab37b2b52b7398cdb21aae7c99e7d2dd462ec1c8"} err="failed to get container status \"3ba267450bbb866802f82770ab37b2b52b7398cdb21aae7c99e7d2dd462ec1c8\": rpc error: code = NotFound desc = could not find container \"3ba267450bbb866802f82770ab37b2b52b7398cdb21aae7c99e7d2dd462ec1c8\": container with ID starting with 3ba267450bbb866802f82770ab37b2b52b7398cdb21aae7c99e7d2dd462ec1c8 not found: ID does not exist" Nov 23 15:00:27 crc kubenswrapper[5050]: I1123 15:00:27.596520 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a1575caa-7652-4f58-92b8-fc433cc1fd8d" path="/var/lib/kubelet/pods/a1575caa-7652-4f58-92b8-fc433cc1fd8d/volumes" Nov 23 15:00:28 crc kubenswrapper[5050]: I1123 15:00:28.206850 5050 generic.go:334] "Generic (PLEG): container finished" podID="28a444a8-06a7-48ed-8345-f9347485ebfa" containerID="2aade2d185488851e452454b8878ea31301b4c4305f66363f75542431a99143e" exitCode=0 Nov 23 15:00:28 crc kubenswrapper[5050]: I1123 15:00:28.207357 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" event={"ID":"28a444a8-06a7-48ed-8345-f9347485ebfa","Type":"ContainerDied","Data":"2aade2d185488851e452454b8878ea31301b4c4305f66363f75542431a99143e"} Nov 23 15:00:28 crc kubenswrapper[5050]: I1123 15:00:28.212207 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3ad391d9-4f42-4252-b17d-55db2e864d06","Type":"ContainerStarted","Data":"2d329242fe529ebd4d99f6d436f7b088f35f092a79202c0af2aea286b0f6b865"} Nov 23 15:00:29 crc kubenswrapper[5050]: I1123 15:00:29.224394 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:00:29 crc kubenswrapper[5050]: I1123 15:00:29.225193 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:00:29 crc kubenswrapper[5050]: I1123 15:00:29.224604 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cfff6fc4-80c4-47e9-8bd3-318ad452fff5","Type":"ContainerStarted","Data":"30500673840a9c9b3232b4360359427b3e0012e6878149a851b8a4644671e568"} Nov 23 15:00:29 crc kubenswrapper[5050]: I1123 15:00:29.225260 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 15:00:29 crc kubenswrapper[5050]: I1123 15:00:29.226186 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2f1968a148f9134c159c0b58dbe311e0c835edf7fb66133145fa860c5ae063e0"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 15:00:29 crc kubenswrapper[5050]: I1123 15:00:29.226240 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://2f1968a148f9134c159c0b58dbe311e0c835edf7fb66133145fa860c5ae063e0" gracePeriod=600 Nov 23 15:00:29 crc kubenswrapper[5050]: I1123 15:00:29.230661 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" event={"ID":"28a444a8-06a7-48ed-8345-f9347485ebfa","Type":"ContainerStarted","Data":"47d6afd8f6513ed2566179d5d2eda050443088c0ead87dd9a060c6ce15b54b2b"} Nov 23 15:00:29 crc kubenswrapper[5050]: I1123 15:00:29.230731 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" Nov 23 15:00:29 crc kubenswrapper[5050]: I1123 15:00:29.235140 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3ad391d9-4f42-4252-b17d-55db2e864d06","Type":"ContainerStarted","Data":"e0e0889e4ba6af9d7a9a2a08c49f65b1b68a38893a92ed91943dc840547247d8"} Nov 23 15:00:29 crc kubenswrapper[5050]: I1123 15:00:29.265571 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" podStartSLOduration=5.265514525 podStartE2EDuration="5.265514525s" podCreationTimestamp="2025-11-23 15:00:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:00:29.259064345 +0000 UTC m=+1124.426060830" watchObservedRunningTime="2025-11-23 15:00:29.265514525 +0000 UTC m=+1124.432511010" Nov 23 15:00:30 crc kubenswrapper[5050]: I1123 15:00:30.251169 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cfff6fc4-80c4-47e9-8bd3-318ad452fff5","Type":"ContainerStarted","Data":"dfa3c1520eff7561f76d667fac369eb10b90aa2651f76e0a640aeb5b68c240d5"} Nov 23 15:00:30 crc kubenswrapper[5050]: I1123 15:00:30.251456 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="cfff6fc4-80c4-47e9-8bd3-318ad452fff5" containerName="glance-log" containerID="cri-o://30500673840a9c9b3232b4360359427b3e0012e6878149a851b8a4644671e568" gracePeriod=30 Nov 23 15:00:30 crc kubenswrapper[5050]: I1123 15:00:30.251604 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="cfff6fc4-80c4-47e9-8bd3-318ad452fff5" containerName="glance-httpd" containerID="cri-o://dfa3c1520eff7561f76d667fac369eb10b90aa2651f76e0a640aeb5b68c240d5" gracePeriod=30 Nov 23 15:00:30 crc kubenswrapper[5050]: I1123 15:00:30.254058 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3ad391d9-4f42-4252-b17d-55db2e864d06","Type":"ContainerStarted","Data":"7aba863ec17643ee15f6cc0b758c8a11c0990bbcb2edcb00dc5e320abde88305"} Nov 23 15:00:30 crc kubenswrapper[5050]: I1123 15:00:30.254259 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="3ad391d9-4f42-4252-b17d-55db2e864d06" containerName="glance-log" containerID="cri-o://e0e0889e4ba6af9d7a9a2a08c49f65b1b68a38893a92ed91943dc840547247d8" gracePeriod=30 Nov 23 15:00:30 crc kubenswrapper[5050]: I1123 15:00:30.254359 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="3ad391d9-4f42-4252-b17d-55db2e864d06" containerName="glance-httpd" containerID="cri-o://7aba863ec17643ee15f6cc0b758c8a11c0990bbcb2edcb00dc5e320abde88305" gracePeriod=30 Nov 23 15:00:30 crc kubenswrapper[5050]: I1123 15:00:30.262487 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="2f1968a148f9134c159c0b58dbe311e0c835edf7fb66133145fa860c5ae063e0" exitCode=0 Nov 23 15:00:30 crc kubenswrapper[5050]: I1123 15:00:30.263083 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"2f1968a148f9134c159c0b58dbe311e0c835edf7fb66133145fa860c5ae063e0"} Nov 23 15:00:30 crc kubenswrapper[5050]: I1123 15:00:30.263141 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"f369955faef6b1a99a27dfa755c0fc81c7c636113d3009cdb4311b4b6c0018d4"} Nov 23 15:00:30 crc kubenswrapper[5050]: I1123 15:00:30.263164 5050 scope.go:117] "RemoveContainer" containerID="1b270f8e19ea537b9c90473c9519c04a4a641a83e215048ece7d9c5619e16b60" Nov 23 15:00:30 crc kubenswrapper[5050]: I1123 15:00:30.281402 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=6.281381456 podStartE2EDuration="6.281381456s" podCreationTimestamp="2025-11-23 15:00:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:00:30.272429597 +0000 UTC m=+1125.439426082" watchObservedRunningTime="2025-11-23 15:00:30.281381456 +0000 UTC m=+1125.448377931" Nov 23 15:00:30 crc kubenswrapper[5050]: I1123 15:00:30.328255 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.328236641 podStartE2EDuration="6.328236641s" podCreationTimestamp="2025-11-23 15:00:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:00:30.315774714 +0000 UTC m=+1125.482771199" watchObservedRunningTime="2025-11-23 15:00:30.328236641 +0000 UTC m=+1125.495233126" Nov 23 15:00:31 crc kubenswrapper[5050]: I1123 15:00:31.279861 5050 generic.go:334] "Generic (PLEG): container finished" podID="3ad391d9-4f42-4252-b17d-55db2e864d06" containerID="7aba863ec17643ee15f6cc0b758c8a11c0990bbcb2edcb00dc5e320abde88305" exitCode=0 Nov 23 15:00:31 crc kubenswrapper[5050]: I1123 15:00:31.280333 5050 generic.go:334] "Generic (PLEG): container finished" podID="3ad391d9-4f42-4252-b17d-55db2e864d06" containerID="e0e0889e4ba6af9d7a9a2a08c49f65b1b68a38893a92ed91943dc840547247d8" exitCode=143 Nov 23 15:00:31 crc kubenswrapper[5050]: I1123 15:00:31.279956 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3ad391d9-4f42-4252-b17d-55db2e864d06","Type":"ContainerDied","Data":"7aba863ec17643ee15f6cc0b758c8a11c0990bbcb2edcb00dc5e320abde88305"} Nov 23 15:00:31 crc kubenswrapper[5050]: I1123 15:00:31.280421 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3ad391d9-4f42-4252-b17d-55db2e864d06","Type":"ContainerDied","Data":"e0e0889e4ba6af9d7a9a2a08c49f65b1b68a38893a92ed91943dc840547247d8"} Nov 23 15:00:31 crc kubenswrapper[5050]: I1123 15:00:31.286342 5050 generic.go:334] "Generic (PLEG): container finished" podID="7907fcb1-82b6-48bc-8c87-107d9f137c9a" containerID="d8471c24b16d6d4f6103efd54a5561d59050162334738e8d02ec33d90eef6ff9" exitCode=0 Nov 23 15:00:31 crc kubenswrapper[5050]: I1123 15:00:31.286417 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-zxtmw" event={"ID":"7907fcb1-82b6-48bc-8c87-107d9f137c9a","Type":"ContainerDied","Data":"d8471c24b16d6d4f6103efd54a5561d59050162334738e8d02ec33d90eef6ff9"} Nov 23 15:00:31 crc kubenswrapper[5050]: I1123 15:00:31.291823 5050 generic.go:334] "Generic (PLEG): container finished" podID="cfff6fc4-80c4-47e9-8bd3-318ad452fff5" containerID="30500673840a9c9b3232b4360359427b3e0012e6878149a851b8a4644671e568" exitCode=143 Nov 23 15:00:31 crc kubenswrapper[5050]: I1123 15:00:31.291853 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cfff6fc4-80c4-47e9-8bd3-318ad452fff5","Type":"ContainerDied","Data":"30500673840a9c9b3232b4360359427b3e0012e6878149a851b8a4644671e568"} Nov 23 15:00:32 crc kubenswrapper[5050]: I1123 15:00:32.307249 5050 generic.go:334] "Generic (PLEG): container finished" podID="cfff6fc4-80c4-47e9-8bd3-318ad452fff5" containerID="dfa3c1520eff7561f76d667fac369eb10b90aa2651f76e0a640aeb5b68c240d5" exitCode=0 Nov 23 15:00:32 crc kubenswrapper[5050]: I1123 15:00:32.307291 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cfff6fc4-80c4-47e9-8bd3-318ad452fff5","Type":"ContainerDied","Data":"dfa3c1520eff7561f76d667fac369eb10b90aa2651f76e0a640aeb5b68c240d5"} Nov 23 15:00:35 crc kubenswrapper[5050]: I1123 15:00:35.280807 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" Nov 23 15:00:35 crc kubenswrapper[5050]: I1123 15:00:35.377240 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-58mlp"] Nov 23 15:00:35 crc kubenswrapper[5050]: I1123 15:00:35.378121 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" podUID="245f4876-8b58-42ff-864d-a9bda0fd9c32" containerName="dnsmasq-dns" containerID="cri-o://e44f54d0198ef92639aceb836d4552afa065a7a78bcc0146999c46d9e307c41a" gracePeriod=10 Nov 23 15:00:35 crc kubenswrapper[5050]: I1123 15:00:35.902654 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-zxtmw" Nov 23 15:00:36 crc kubenswrapper[5050]: I1123 15:00:36.031621 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-config-data\") pod \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\" (UID: \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\") " Nov 23 15:00:36 crc kubenswrapper[5050]: I1123 15:00:36.031804 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-scripts\") pod \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\" (UID: \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\") " Nov 23 15:00:36 crc kubenswrapper[5050]: I1123 15:00:36.031866 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-combined-ca-bundle\") pod \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\" (UID: \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\") " Nov 23 15:00:36 crc kubenswrapper[5050]: I1123 15:00:36.031895 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7jjx2\" (UniqueName: \"kubernetes.io/projected/7907fcb1-82b6-48bc-8c87-107d9f137c9a-kube-api-access-7jjx2\") pod \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\" (UID: \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\") " Nov 23 15:00:36 crc kubenswrapper[5050]: I1123 15:00:36.031989 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-credential-keys\") pod \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\" (UID: \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\") " Nov 23 15:00:36 crc kubenswrapper[5050]: I1123 15:00:36.032082 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-fernet-keys\") pod \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\" (UID: \"7907fcb1-82b6-48bc-8c87-107d9f137c9a\") " Nov 23 15:00:36 crc kubenswrapper[5050]: I1123 15:00:36.039787 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-scripts" (OuterVolumeSpecName: "scripts") pod "7907fcb1-82b6-48bc-8c87-107d9f137c9a" (UID: "7907fcb1-82b6-48bc-8c87-107d9f137c9a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:36 crc kubenswrapper[5050]: I1123 15:00:36.040253 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "7907fcb1-82b6-48bc-8c87-107d9f137c9a" (UID: "7907fcb1-82b6-48bc-8c87-107d9f137c9a"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:36 crc kubenswrapper[5050]: I1123 15:00:36.042356 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7907fcb1-82b6-48bc-8c87-107d9f137c9a-kube-api-access-7jjx2" (OuterVolumeSpecName: "kube-api-access-7jjx2") pod "7907fcb1-82b6-48bc-8c87-107d9f137c9a" (UID: "7907fcb1-82b6-48bc-8c87-107d9f137c9a"). InnerVolumeSpecName "kube-api-access-7jjx2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:00:36 crc kubenswrapper[5050]: I1123 15:00:36.049707 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "7907fcb1-82b6-48bc-8c87-107d9f137c9a" (UID: "7907fcb1-82b6-48bc-8c87-107d9f137c9a"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:36 crc kubenswrapper[5050]: I1123 15:00:36.105730 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7907fcb1-82b6-48bc-8c87-107d9f137c9a" (UID: "7907fcb1-82b6-48bc-8c87-107d9f137c9a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:36 crc kubenswrapper[5050]: I1123 15:00:36.115365 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-config-data" (OuterVolumeSpecName: "config-data") pod "7907fcb1-82b6-48bc-8c87-107d9f137c9a" (UID: "7907fcb1-82b6-48bc-8c87-107d9f137c9a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:36 crc kubenswrapper[5050]: I1123 15:00:36.135085 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:36 crc kubenswrapper[5050]: I1123 15:00:36.135133 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:36 crc kubenswrapper[5050]: I1123 15:00:36.135176 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7jjx2\" (UniqueName: \"kubernetes.io/projected/7907fcb1-82b6-48bc-8c87-107d9f137c9a-kube-api-access-7jjx2\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:36 crc kubenswrapper[5050]: I1123 15:00:36.135189 5050 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:36 crc kubenswrapper[5050]: I1123 15:00:36.135200 5050 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:36 crc kubenswrapper[5050]: I1123 15:00:36.135211 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7907fcb1-82b6-48bc-8c87-107d9f137c9a-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:36 crc kubenswrapper[5050]: I1123 15:00:36.360860 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-zxtmw" event={"ID":"7907fcb1-82b6-48bc-8c87-107d9f137c9a","Type":"ContainerDied","Data":"eb21c37e691b3b226fad2132d3bc14d82851acba8de4df9142984743497d4beb"} Nov 23 15:00:36 crc kubenswrapper[5050]: I1123 15:00:36.360908 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-zxtmw" Nov 23 15:00:36 crc kubenswrapper[5050]: I1123 15:00:36.360927 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eb21c37e691b3b226fad2132d3bc14d82851acba8de4df9142984743497d4beb" Nov 23 15:00:36 crc kubenswrapper[5050]: I1123 15:00:36.364306 5050 generic.go:334] "Generic (PLEG): container finished" podID="245f4876-8b58-42ff-864d-a9bda0fd9c32" containerID="e44f54d0198ef92639aceb836d4552afa065a7a78bcc0146999c46d9e307c41a" exitCode=0 Nov 23 15:00:36 crc kubenswrapper[5050]: I1123 15:00:36.364355 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" event={"ID":"245f4876-8b58-42ff-864d-a9bda0fd9c32","Type":"ContainerDied","Data":"e44f54d0198ef92639aceb836d4552afa065a7a78bcc0146999c46d9e307c41a"} Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.016012 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-zxtmw"] Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.025052 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-zxtmw"] Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.109262 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-zskrr"] Nov 23 15:00:37 crc kubenswrapper[5050]: E1123 15:00:37.109699 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1575caa-7652-4f58-92b8-fc433cc1fd8d" containerName="init" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.109717 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1575caa-7652-4f58-92b8-fc433cc1fd8d" containerName="init" Nov 23 15:00:37 crc kubenswrapper[5050]: E1123 15:00:37.109756 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7907fcb1-82b6-48bc-8c87-107d9f137c9a" containerName="keystone-bootstrap" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.109764 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="7907fcb1-82b6-48bc-8c87-107d9f137c9a" containerName="keystone-bootstrap" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.109922 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1575caa-7652-4f58-92b8-fc433cc1fd8d" containerName="init" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.109938 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="7907fcb1-82b6-48bc-8c87-107d9f137c9a" containerName="keystone-bootstrap" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.110606 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-zskrr" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.116602 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.116602 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.116681 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.116608 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.117417 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-hkfk8" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.126580 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-zskrr"] Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.170244 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-credential-keys\") pod \"keystone-bootstrap-zskrr\" (UID: \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\") " pod="openstack/keystone-bootstrap-zskrr" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.170368 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-fernet-keys\") pod \"keystone-bootstrap-zskrr\" (UID: \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\") " pod="openstack/keystone-bootstrap-zskrr" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.170693 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gn47q\" (UniqueName: \"kubernetes.io/projected/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-kube-api-access-gn47q\") pod \"keystone-bootstrap-zskrr\" (UID: \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\") " pod="openstack/keystone-bootstrap-zskrr" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.170869 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-scripts\") pod \"keystone-bootstrap-zskrr\" (UID: \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\") " pod="openstack/keystone-bootstrap-zskrr" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.170984 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-config-data\") pod \"keystone-bootstrap-zskrr\" (UID: \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\") " pod="openstack/keystone-bootstrap-zskrr" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.171188 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-combined-ca-bundle\") pod \"keystone-bootstrap-zskrr\" (UID: \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\") " pod="openstack/keystone-bootstrap-zskrr" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.273190 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-credential-keys\") pod \"keystone-bootstrap-zskrr\" (UID: \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\") " pod="openstack/keystone-bootstrap-zskrr" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.273832 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-fernet-keys\") pod \"keystone-bootstrap-zskrr\" (UID: \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\") " pod="openstack/keystone-bootstrap-zskrr" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.273903 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gn47q\" (UniqueName: \"kubernetes.io/projected/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-kube-api-access-gn47q\") pod \"keystone-bootstrap-zskrr\" (UID: \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\") " pod="openstack/keystone-bootstrap-zskrr" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.273974 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-scripts\") pod \"keystone-bootstrap-zskrr\" (UID: \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\") " pod="openstack/keystone-bootstrap-zskrr" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.274031 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-config-data\") pod \"keystone-bootstrap-zskrr\" (UID: \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\") " pod="openstack/keystone-bootstrap-zskrr" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.274108 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-combined-ca-bundle\") pod \"keystone-bootstrap-zskrr\" (UID: \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\") " pod="openstack/keystone-bootstrap-zskrr" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.283599 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-scripts\") pod \"keystone-bootstrap-zskrr\" (UID: \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\") " pod="openstack/keystone-bootstrap-zskrr" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.283703 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-credential-keys\") pod \"keystone-bootstrap-zskrr\" (UID: \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\") " pod="openstack/keystone-bootstrap-zskrr" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.284285 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-config-data\") pod \"keystone-bootstrap-zskrr\" (UID: \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\") " pod="openstack/keystone-bootstrap-zskrr" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.284466 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-combined-ca-bundle\") pod \"keystone-bootstrap-zskrr\" (UID: \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\") " pod="openstack/keystone-bootstrap-zskrr" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.285058 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-fernet-keys\") pod \"keystone-bootstrap-zskrr\" (UID: \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\") " pod="openstack/keystone-bootstrap-zskrr" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.296489 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gn47q\" (UniqueName: \"kubernetes.io/projected/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-kube-api-access-gn47q\") pod \"keystone-bootstrap-zskrr\" (UID: \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\") " pod="openstack/keystone-bootstrap-zskrr" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.439510 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-zskrr" Nov 23 15:00:37 crc kubenswrapper[5050]: I1123 15:00:37.562349 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7907fcb1-82b6-48bc-8c87-107d9f137c9a" path="/var/lib/kubelet/pods/7907fcb1-82b6-48bc-8c87-107d9f137c9a/volumes" Nov 23 15:00:44 crc kubenswrapper[5050]: I1123 15:00:44.397114 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" podUID="245f4876-8b58-42ff-864d-a9bda0fd9c32" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.131:5353: i/o timeout" Nov 23 15:00:45 crc kubenswrapper[5050]: I1123 15:00:45.983399 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.003333 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.041268 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.077709 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-ovsdbserver-nb\") pod \"245f4876-8b58-42ff-864d-a9bda0fd9c32\" (UID: \"245f4876-8b58-42ff-864d-a9bda0fd9c32\") " Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.077765 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.077818 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-ovsdbserver-sb\") pod \"245f4876-8b58-42ff-864d-a9bda0fd9c32\" (UID: \"245f4876-8b58-42ff-864d-a9bda0fd9c32\") " Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.077854 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-64mvb\" (UniqueName: \"kubernetes.io/projected/245f4876-8b58-42ff-864d-a9bda0fd9c32-kube-api-access-64mvb\") pod \"245f4876-8b58-42ff-864d-a9bda0fd9c32\" (UID: \"245f4876-8b58-42ff-864d-a9bda0fd9c32\") " Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.077887 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w5knq\" (UniqueName: \"kubernetes.io/projected/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-kube-api-access-w5knq\") pod \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.077906 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-dns-swift-storage-0\") pod \"245f4876-8b58-42ff-864d-a9bda0fd9c32\" (UID: \"245f4876-8b58-42ff-864d-a9bda0fd9c32\") " Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.077935 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-scripts\") pod \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.078020 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-dns-svc\") pod \"245f4876-8b58-42ff-864d-a9bda0fd9c32\" (UID: \"245f4876-8b58-42ff-864d-a9bda0fd9c32\") " Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.078060 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-logs\") pod \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.078095 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-public-tls-certs\") pod \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.078142 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-config\") pod \"245f4876-8b58-42ff-864d-a9bda0fd9c32\" (UID: \"245f4876-8b58-42ff-864d-a9bda0fd9c32\") " Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.078196 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-config-data\") pod \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.078245 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-combined-ca-bundle\") pod \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.078270 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-httpd-run\") pod \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\" (UID: \"cfff6fc4-80c4-47e9-8bd3-318ad452fff5\") " Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.078867 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "cfff6fc4-80c4-47e9-8bd3-318ad452fff5" (UID: "cfff6fc4-80c4-47e9-8bd3-318ad452fff5"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.079068 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-logs" (OuterVolumeSpecName: "logs") pod "cfff6fc4-80c4-47e9-8bd3-318ad452fff5" (UID: "cfff6fc4-80c4-47e9-8bd3-318ad452fff5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.087637 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-scripts" (OuterVolumeSpecName: "scripts") pod "cfff6fc4-80c4-47e9-8bd3-318ad452fff5" (UID: "cfff6fc4-80c4-47e9-8bd3-318ad452fff5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.098754 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "cfff6fc4-80c4-47e9-8bd3-318ad452fff5" (UID: "cfff6fc4-80c4-47e9-8bd3-318ad452fff5"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.099557 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-kube-api-access-w5knq" (OuterVolumeSpecName: "kube-api-access-w5knq") pod "cfff6fc4-80c4-47e9-8bd3-318ad452fff5" (UID: "cfff6fc4-80c4-47e9-8bd3-318ad452fff5"). InnerVolumeSpecName "kube-api-access-w5knq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.106305 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/245f4876-8b58-42ff-864d-a9bda0fd9c32-kube-api-access-64mvb" (OuterVolumeSpecName: "kube-api-access-64mvb") pod "245f4876-8b58-42ff-864d-a9bda0fd9c32" (UID: "245f4876-8b58-42ff-864d-a9bda0fd9c32"). InnerVolumeSpecName "kube-api-access-64mvb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.118275 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cfff6fc4-80c4-47e9-8bd3-318ad452fff5" (UID: "cfff6fc4-80c4-47e9-8bd3-318ad452fff5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.136329 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "245f4876-8b58-42ff-864d-a9bda0fd9c32" (UID: "245f4876-8b58-42ff-864d-a9bda0fd9c32"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.143737 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "245f4876-8b58-42ff-864d-a9bda0fd9c32" (UID: "245f4876-8b58-42ff-864d-a9bda0fd9c32"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.162255 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "245f4876-8b58-42ff-864d-a9bda0fd9c32" (UID: "245f4876-8b58-42ff-864d-a9bda0fd9c32"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.171497 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-config-data" (OuterVolumeSpecName: "config-data") pod "cfff6fc4-80c4-47e9-8bd3-318ad452fff5" (UID: "cfff6fc4-80c4-47e9-8bd3-318ad452fff5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.176520 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "cfff6fc4-80c4-47e9-8bd3-318ad452fff5" (UID: "cfff6fc4-80c4-47e9-8bd3-318ad452fff5"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.184339 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ad391d9-4f42-4252-b17d-55db2e864d06-combined-ca-bundle\") pod \"3ad391d9-4f42-4252-b17d-55db2e864d06\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.184411 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ad391d9-4f42-4252-b17d-55db2e864d06-logs\") pod \"3ad391d9-4f42-4252-b17d-55db2e864d06\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.184617 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ad391d9-4f42-4252-b17d-55db2e864d06-scripts\") pod \"3ad391d9-4f42-4252-b17d-55db2e864d06\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.184677 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ad391d9-4f42-4252-b17d-55db2e864d06-internal-tls-certs\") pod \"3ad391d9-4f42-4252-b17d-55db2e864d06\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.184884 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ad391d9-4f42-4252-b17d-55db2e864d06-logs" (OuterVolumeSpecName: "logs") pod "3ad391d9-4f42-4252-b17d-55db2e864d06" (UID: "3ad391d9-4f42-4252-b17d-55db2e864d06"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.184976 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "245f4876-8b58-42ff-864d-a9bda0fd9c32" (UID: "245f4876-8b58-42ff-864d-a9bda0fd9c32"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.185087 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mkt4r\" (UniqueName: \"kubernetes.io/projected/3ad391d9-4f42-4252-b17d-55db2e864d06-kube-api-access-mkt4r\") pod \"3ad391d9-4f42-4252-b17d-55db2e864d06\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.185127 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ad391d9-4f42-4252-b17d-55db2e864d06-config-data\") pod \"3ad391d9-4f42-4252-b17d-55db2e864d06\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.185154 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"3ad391d9-4f42-4252-b17d-55db2e864d06\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.185232 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-ovsdbserver-nb\") pod \"245f4876-8b58-42ff-864d-a9bda0fd9c32\" (UID: \"245f4876-8b58-42ff-864d-a9bda0fd9c32\") " Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.185362 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3ad391d9-4f42-4252-b17d-55db2e864d06-httpd-run\") pod \"3ad391d9-4f42-4252-b17d-55db2e864d06\" (UID: \"3ad391d9-4f42-4252-b17d-55db2e864d06\") " Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.186187 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.186215 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.186231 5050 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.186266 5050 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.186280 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.186294 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-64mvb\" (UniqueName: \"kubernetes.io/projected/245f4876-8b58-42ff-864d-a9bda0fd9c32-kube-api-access-64mvb\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.186309 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w5knq\" (UniqueName: \"kubernetes.io/projected/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-kube-api-access-w5knq\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.186322 5050 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.186333 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.186345 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ad391d9-4f42-4252-b17d-55db2e864d06-logs\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.186358 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.186369 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-logs\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.186383 5050 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cfff6fc4-80c4-47e9-8bd3-318ad452fff5-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:46 crc kubenswrapper[5050]: W1123 15:00:46.187175 5050 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/245f4876-8b58-42ff-864d-a9bda0fd9c32/volumes/kubernetes.io~configmap/ovsdbserver-nb Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.187215 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "245f4876-8b58-42ff-864d-a9bda0fd9c32" (UID: "245f4876-8b58-42ff-864d-a9bda0fd9c32"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.189437 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ad391d9-4f42-4252-b17d-55db2e864d06-scripts" (OuterVolumeSpecName: "scripts") pod "3ad391d9-4f42-4252-b17d-55db2e864d06" (UID: "3ad391d9-4f42-4252-b17d-55db2e864d06"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.189598 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ad391d9-4f42-4252-b17d-55db2e864d06-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "3ad391d9-4f42-4252-b17d-55db2e864d06" (UID: "3ad391d9-4f42-4252-b17d-55db2e864d06"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.190745 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ad391d9-4f42-4252-b17d-55db2e864d06-kube-api-access-mkt4r" (OuterVolumeSpecName: "kube-api-access-mkt4r") pod "3ad391d9-4f42-4252-b17d-55db2e864d06" (UID: "3ad391d9-4f42-4252-b17d-55db2e864d06"). InnerVolumeSpecName "kube-api-access-mkt4r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.191163 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "glance") pod "3ad391d9-4f42-4252-b17d-55db2e864d06" (UID: "3ad391d9-4f42-4252-b17d-55db2e864d06"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.197229 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-config" (OuterVolumeSpecName: "config") pod "245f4876-8b58-42ff-864d-a9bda0fd9c32" (UID: "245f4876-8b58-42ff-864d-a9bda0fd9c32"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.212021 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ad391d9-4f42-4252-b17d-55db2e864d06-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3ad391d9-4f42-4252-b17d-55db2e864d06" (UID: "3ad391d9-4f42-4252-b17d-55db2e864d06"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.212385 5050 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.235767 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ad391d9-4f42-4252-b17d-55db2e864d06-config-data" (OuterVolumeSpecName: "config-data") pod "3ad391d9-4f42-4252-b17d-55db2e864d06" (UID: "3ad391d9-4f42-4252-b17d-55db2e864d06"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.248822 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ad391d9-4f42-4252-b17d-55db2e864d06-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "3ad391d9-4f42-4252-b17d-55db2e864d06" (UID: "3ad391d9-4f42-4252-b17d-55db2e864d06"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.288309 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ad391d9-4f42-4252-b17d-55db2e864d06-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.288356 5050 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ad391d9-4f42-4252-b17d-55db2e864d06-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.288373 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-config\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.288386 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mkt4r\" (UniqueName: \"kubernetes.io/projected/3ad391d9-4f42-4252-b17d-55db2e864d06-kube-api-access-mkt4r\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.288401 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ad391d9-4f42-4252-b17d-55db2e864d06-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.288527 5050 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.288541 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/245f4876-8b58-42ff-864d-a9bda0fd9c32-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.288556 5050 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.288567 5050 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3ad391d9-4f42-4252-b17d-55db2e864d06-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.288580 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ad391d9-4f42-4252-b17d-55db2e864d06-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.311264 5050 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.390670 5050 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.470932 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" event={"ID":"245f4876-8b58-42ff-864d-a9bda0fd9c32","Type":"ContainerDied","Data":"43775857791c54766f7c66c4aed8b9f2c9259bd699c3f717673ee9aa40ae0b23"} Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.470986 5050 scope.go:117] "RemoveContainer" containerID="e44f54d0198ef92639aceb836d4552afa065a7a78bcc0146999c46d9e307c41a" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.471083 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.476714 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3ad391d9-4f42-4252-b17d-55db2e864d06","Type":"ContainerDied","Data":"2d329242fe529ebd4d99f6d436f7b088f35f092a79202c0af2aea286b0f6b865"} Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.476725 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.479659 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cfff6fc4-80c4-47e9-8bd3-318ad452fff5","Type":"ContainerDied","Data":"8a81af822658866d1f7ccca01409f9c39283fa8f981e6cbe5fe0a43fb06cb0c8"} Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.479734 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.509123 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-58mlp"] Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.522909 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-58mlp"] Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.532940 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.548682 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.586774 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.595742 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.616086 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 15:00:46 crc kubenswrapper[5050]: E1123 15:00:46.616609 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfff6fc4-80c4-47e9-8bd3-318ad452fff5" containerName="glance-httpd" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.616629 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfff6fc4-80c4-47e9-8bd3-318ad452fff5" containerName="glance-httpd" Nov 23 15:00:46 crc kubenswrapper[5050]: E1123 15:00:46.616648 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ad391d9-4f42-4252-b17d-55db2e864d06" containerName="glance-httpd" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.616654 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ad391d9-4f42-4252-b17d-55db2e864d06" containerName="glance-httpd" Nov 23 15:00:46 crc kubenswrapper[5050]: E1123 15:00:46.616664 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="245f4876-8b58-42ff-864d-a9bda0fd9c32" containerName="dnsmasq-dns" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.616670 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="245f4876-8b58-42ff-864d-a9bda0fd9c32" containerName="dnsmasq-dns" Nov 23 15:00:46 crc kubenswrapper[5050]: E1123 15:00:46.616681 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="245f4876-8b58-42ff-864d-a9bda0fd9c32" containerName="init" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.616707 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="245f4876-8b58-42ff-864d-a9bda0fd9c32" containerName="init" Nov 23 15:00:46 crc kubenswrapper[5050]: E1123 15:00:46.616723 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ad391d9-4f42-4252-b17d-55db2e864d06" containerName="glance-log" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.616729 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ad391d9-4f42-4252-b17d-55db2e864d06" containerName="glance-log" Nov 23 15:00:46 crc kubenswrapper[5050]: E1123 15:00:46.616741 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfff6fc4-80c4-47e9-8bd3-318ad452fff5" containerName="glance-log" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.616748 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfff6fc4-80c4-47e9-8bd3-318ad452fff5" containerName="glance-log" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.616942 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="cfff6fc4-80c4-47e9-8bd3-318ad452fff5" containerName="glance-httpd" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.616953 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="245f4876-8b58-42ff-864d-a9bda0fd9c32" containerName="dnsmasq-dns" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.616962 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ad391d9-4f42-4252-b17d-55db2e864d06" containerName="glance-httpd" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.616972 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="cfff6fc4-80c4-47e9-8bd3-318ad452fff5" containerName="glance-log" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.616982 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ad391d9-4f42-4252-b17d-55db2e864d06" containerName="glance-log" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.617987 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.620294 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.620600 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.620687 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.620794 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-g8gzt" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.626374 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.635251 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.636909 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.642256 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.642616 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.654030 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.697572 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.697633 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-logs\") pod \"glance-default-internal-api-0\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.697750 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.697782 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.697798 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.697823 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.697908 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.697952 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8hp4\" (UniqueName: \"kubernetes.io/projected/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-kube-api-access-h8hp4\") pod \"glance-default-internal-api-0\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.799600 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.799723 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/83a846d6-b03f-4bc1-ac31-cacf0ee96658-logs\") pod \"glance-default-external-api-0\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.799754 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjdj6\" (UniqueName: \"kubernetes.io/projected/83a846d6-b03f-4bc1-ac31-cacf0ee96658-kube-api-access-zjdj6\") pod \"glance-default-external-api-0\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.799779 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.799822 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8hp4\" (UniqueName: \"kubernetes.io/projected/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-kube-api-access-h8hp4\") pod \"glance-default-internal-api-0\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.799848 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83a846d6-b03f-4bc1-ac31-cacf0ee96658-config-data\") pod \"glance-default-external-api-0\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.799879 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.799911 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83a846d6-b03f-4bc1-ac31-cacf0ee96658-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.799940 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-logs\") pod \"glance-default-internal-api-0\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.799970 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/83a846d6-b03f-4bc1-ac31-cacf0ee96658-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.800007 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/83a846d6-b03f-4bc1-ac31-cacf0ee96658-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.800068 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.800092 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.800119 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.800141 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83a846d6-b03f-4bc1-ac31-cacf0ee96658-scripts\") pod \"glance-default-external-api-0\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.800166 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.800309 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.800473 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.801063 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-logs\") pod \"glance-default-internal-api-0\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.804292 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.804835 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.804970 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.817179 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.820111 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8hp4\" (UniqueName: \"kubernetes.io/projected/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-kube-api-access-h8hp4\") pod \"glance-default-internal-api-0\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.832384 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.904174 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.904260 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83a846d6-b03f-4bc1-ac31-cacf0ee96658-scripts\") pod \"glance-default-external-api-0\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.904391 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/83a846d6-b03f-4bc1-ac31-cacf0ee96658-logs\") pod \"glance-default-external-api-0\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.904418 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjdj6\" (UniqueName: \"kubernetes.io/projected/83a846d6-b03f-4bc1-ac31-cacf0ee96658-kube-api-access-zjdj6\") pod \"glance-default-external-api-0\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.904497 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83a846d6-b03f-4bc1-ac31-cacf0ee96658-config-data\") pod \"glance-default-external-api-0\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.904538 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83a846d6-b03f-4bc1-ac31-cacf0ee96658-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.904573 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/83a846d6-b03f-4bc1-ac31-cacf0ee96658-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.904622 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/83a846d6-b03f-4bc1-ac31-cacf0ee96658-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.905490 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.905934 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/83a846d6-b03f-4bc1-ac31-cacf0ee96658-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.907858 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/83a846d6-b03f-4bc1-ac31-cacf0ee96658-logs\") pod \"glance-default-external-api-0\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.910290 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/83a846d6-b03f-4bc1-ac31-cacf0ee96658-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.910550 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83a846d6-b03f-4bc1-ac31-cacf0ee96658-scripts\") pod \"glance-default-external-api-0\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.911531 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83a846d6-b03f-4bc1-ac31-cacf0ee96658-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.921246 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83a846d6-b03f-4bc1-ac31-cacf0ee96658-config-data\") pod \"glance-default-external-api-0\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.924301 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjdj6\" (UniqueName: \"kubernetes.io/projected/83a846d6-b03f-4bc1-ac31-cacf0ee96658-kube-api-access-zjdj6\") pod \"glance-default-external-api-0\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.947853 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " pod="openstack/glance-default-external-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.948671 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 23 15:00:46 crc kubenswrapper[5050]: I1123 15:00:46.961690 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 23 15:00:47 crc kubenswrapper[5050]: I1123 15:00:47.414918 5050 scope.go:117] "RemoveContainer" containerID="bd22171393bb20d0c3e16b69fe310cf436b5cd98cf076a7e65ca368fc12901b7" Nov 23 15:00:47 crc kubenswrapper[5050]: E1123 15:00:47.456720 5050 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 23 15:00:47 crc kubenswrapper[5050]: E1123 15:00:47.456911 5050 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mb2sl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-82ms7_openstack(93d8639a-cdc9-4e9f-a17f-883debe12333): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 23 15:00:47 crc kubenswrapper[5050]: E1123 15:00:47.458398 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-82ms7" podUID="93d8639a-cdc9-4e9f-a17f-883debe12333" Nov 23 15:00:47 crc kubenswrapper[5050]: E1123 15:00:47.500507 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-82ms7" podUID="93d8639a-cdc9-4e9f-a17f-883debe12333" Nov 23 15:00:47 crc kubenswrapper[5050]: I1123 15:00:47.575540 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="245f4876-8b58-42ff-864d-a9bda0fd9c32" path="/var/lib/kubelet/pods/245f4876-8b58-42ff-864d-a9bda0fd9c32/volumes" Nov 23 15:00:47 crc kubenswrapper[5050]: I1123 15:00:47.577029 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ad391d9-4f42-4252-b17d-55db2e864d06" path="/var/lib/kubelet/pods/3ad391d9-4f42-4252-b17d-55db2e864d06/volumes" Nov 23 15:00:47 crc kubenswrapper[5050]: I1123 15:00:47.577839 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cfff6fc4-80c4-47e9-8bd3-318ad452fff5" path="/var/lib/kubelet/pods/cfff6fc4-80c4-47e9-8bd3-318ad452fff5/volumes" Nov 23 15:00:47 crc kubenswrapper[5050]: I1123 15:00:47.578227 5050 scope.go:117] "RemoveContainer" containerID="7aba863ec17643ee15f6cc0b758c8a11c0990bbcb2edcb00dc5e320abde88305" Nov 23 15:00:47 crc kubenswrapper[5050]: I1123 15:00:47.656696 5050 scope.go:117] "RemoveContainer" containerID="e0e0889e4ba6af9d7a9a2a08c49f65b1b68a38893a92ed91943dc840547247d8" Nov 23 15:00:47 crc kubenswrapper[5050]: I1123 15:00:47.725167 5050 scope.go:117] "RemoveContainer" containerID="dfa3c1520eff7561f76d667fac369eb10b90aa2651f76e0a640aeb5b68c240d5" Nov 23 15:00:47 crc kubenswrapper[5050]: I1123 15:00:47.771267 5050 scope.go:117] "RemoveContainer" containerID="30500673840a9c9b3232b4360359427b3e0012e6878149a851b8a4644671e568" Nov 23 15:00:47 crc kubenswrapper[5050]: I1123 15:00:47.934091 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-zskrr"] Nov 23 15:00:47 crc kubenswrapper[5050]: I1123 15:00:47.953052 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 23 15:00:48 crc kubenswrapper[5050]: W1123 15:00:48.162184 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8d700d7b_7ad1_41bc_a0fe_a60f106ae33b.slice/crio-b8d222eba1709a414d9b5d966984f29435629b884da50f63cde094aea412d09f WatchSource:0}: Error finding container b8d222eba1709a414d9b5d966984f29435629b884da50f63cde094aea412d09f: Status 404 returned error can't find the container with id b8d222eba1709a414d9b5d966984f29435629b884da50f63cde094aea412d09f Nov 23 15:00:48 crc kubenswrapper[5050]: I1123 15:00:48.164231 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 15:00:48 crc kubenswrapper[5050]: I1123 15:00:48.279045 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 15:00:48 crc kubenswrapper[5050]: W1123 15:00:48.294931 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod83a846d6_b03f_4bc1_ac31_cacf0ee96658.slice/crio-df108e38b00fd595113f100be472d8632ad509e0b3b11d89b353d918c4ca4e8c WatchSource:0}: Error finding container df108e38b00fd595113f100be472d8632ad509e0b3b11d89b353d918c4ca4e8c: Status 404 returned error can't find the container with id df108e38b00fd595113f100be472d8632ad509e0b3b11d89b353d918c4ca4e8c Nov 23 15:00:48 crc kubenswrapper[5050]: I1123 15:00:48.522996 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b","Type":"ContainerStarted","Data":"b8d222eba1709a414d9b5d966984f29435629b884da50f63cde094aea412d09f"} Nov 23 15:00:48 crc kubenswrapper[5050]: I1123 15:00:48.525732 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"83a846d6-b03f-4bc1-ac31-cacf0ee96658","Type":"ContainerStarted","Data":"df108e38b00fd595113f100be472d8632ad509e0b3b11d89b353d918c4ca4e8c"} Nov 23 15:00:48 crc kubenswrapper[5050]: I1123 15:00:48.536288 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-j7p9t" event={"ID":"3291f014-f18b-4142-a770-d9c33b141d66","Type":"ContainerStarted","Data":"413a63448c5f1b6aa0ed69c820b78045e358548763dd1334c1ea783960639530"} Nov 23 15:00:48 crc kubenswrapper[5050]: I1123 15:00:48.548661 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"940dffda-38d6-469c-8453-5acb4092ed82","Type":"ContainerStarted","Data":"60e10035b1ca3fb05a796d8061c3ae4fc86d25ebf065c1682c1ac895dd99e388"} Nov 23 15:00:48 crc kubenswrapper[5050]: I1123 15:00:48.557101 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-zskrr" event={"ID":"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca","Type":"ContainerStarted","Data":"ca03918f91f0d24416b97c1154f1427e4787e0717a61e93ae86f59f5dae6d896"} Nov 23 15:00:48 crc kubenswrapper[5050]: I1123 15:00:48.557184 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-zskrr" event={"ID":"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca","Type":"ContainerStarted","Data":"8efc81fbd8cf18971e8925f965a2526433911195bb92cb82d7f3a89f0d60ab0f"} Nov 23 15:00:48 crc kubenswrapper[5050]: I1123 15:00:48.559721 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-j7p9t" podStartSLOduration=3.336489283 podStartE2EDuration="24.559695671s" podCreationTimestamp="2025-11-23 15:00:24 +0000 UTC" firstStartedPulling="2025-11-23 15:00:26.175524292 +0000 UTC m=+1121.342520767" lastFinishedPulling="2025-11-23 15:00:47.39873067 +0000 UTC m=+1142.565727155" observedRunningTime="2025-11-23 15:00:48.553147468 +0000 UTC m=+1143.720143953" watchObservedRunningTime="2025-11-23 15:00:48.559695671 +0000 UTC m=+1143.726692156" Nov 23 15:00:48 crc kubenswrapper[5050]: I1123 15:00:48.564238 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-ftmg2" event={"ID":"f53cab34-8b0f-45cf-8ef9-a524aa3578b4","Type":"ContainerStarted","Data":"ff68a1c25ddcf47d18f14477af4201b7b01eca17df536e55e05e2a33a3ed79c8"} Nov 23 15:00:48 crc kubenswrapper[5050]: I1123 15:00:48.586022 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-zskrr" podStartSLOduration=11.586002582999999 podStartE2EDuration="11.586002583s" podCreationTimestamp="2025-11-23 15:00:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:00:48.584936733 +0000 UTC m=+1143.751933218" watchObservedRunningTime="2025-11-23 15:00:48.586002583 +0000 UTC m=+1143.752999068" Nov 23 15:00:48 crc kubenswrapper[5050]: I1123 15:00:48.616570 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-ftmg2" podStartSLOduration=2.88414389 podStartE2EDuration="24.616551854s" podCreationTimestamp="2025-11-23 15:00:24 +0000 UTC" firstStartedPulling="2025-11-23 15:00:25.702089232 +0000 UTC m=+1120.869085717" lastFinishedPulling="2025-11-23 15:00:47.434497196 +0000 UTC m=+1142.601493681" observedRunningTime="2025-11-23 15:00:48.605921058 +0000 UTC m=+1143.772917543" watchObservedRunningTime="2025-11-23 15:00:48.616551854 +0000 UTC m=+1143.783548339" Nov 23 15:00:49 crc kubenswrapper[5050]: I1123 15:00:49.398800 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5f59b8f679-58mlp" podUID="245f4876-8b58-42ff-864d-a9bda0fd9c32" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.131:5353: i/o timeout" Nov 23 15:00:49 crc kubenswrapper[5050]: I1123 15:00:49.577480 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b","Type":"ContainerStarted","Data":"a8481807947b6125b632d234b7c09c019ca4e117d153545ff0b1bfd657c9904e"} Nov 23 15:00:49 crc kubenswrapper[5050]: I1123 15:00:49.580838 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"83a846d6-b03f-4bc1-ac31-cacf0ee96658","Type":"ContainerStarted","Data":"4eaad0b4b2774c58b0ed715e526bba7629a4da67f0b876ef9fe855cfffd60bbf"} Nov 23 15:00:49 crc kubenswrapper[5050]: I1123 15:00:49.582907 5050 generic.go:334] "Generic (PLEG): container finished" podID="49ce1b12-d28e-481e-ad05-68355446dd4c" containerID="096bc79c344ebb77c5036294dbf1470f2dcc544c7a30af4d159b48bf4fc5d85b" exitCode=0 Nov 23 15:00:49 crc kubenswrapper[5050]: I1123 15:00:49.583027 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-sdvpr" event={"ID":"49ce1b12-d28e-481e-ad05-68355446dd4c","Type":"ContainerDied","Data":"096bc79c344ebb77c5036294dbf1470f2dcc544c7a30af4d159b48bf4fc5d85b"} Nov 23 15:00:50 crc kubenswrapper[5050]: I1123 15:00:50.603099 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"940dffda-38d6-469c-8453-5acb4092ed82","Type":"ContainerStarted","Data":"62fe46e75027e1094cf742edff33ace7b51cd7f03786f96706f9e8112a7a8a36"} Nov 23 15:00:50 crc kubenswrapper[5050]: I1123 15:00:50.606354 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b","Type":"ContainerStarted","Data":"7c37b6da7c1af7beb9189416f11bacec18531c7a243fc6250c6f74bebc17d8c8"} Nov 23 15:00:50 crc kubenswrapper[5050]: I1123 15:00:50.658544 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.65851785 podStartE2EDuration="4.65851785s" podCreationTimestamp="2025-11-23 15:00:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:00:50.638256766 +0000 UTC m=+1145.805253261" watchObservedRunningTime="2025-11-23 15:00:50.65851785 +0000 UTC m=+1145.825514345" Nov 23 15:00:51 crc kubenswrapper[5050]: I1123 15:00:51.005738 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-sdvpr" Nov 23 15:00:51 crc kubenswrapper[5050]: I1123 15:00:51.116156 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49ce1b12-d28e-481e-ad05-68355446dd4c-combined-ca-bundle\") pod \"49ce1b12-d28e-481e-ad05-68355446dd4c\" (UID: \"49ce1b12-d28e-481e-ad05-68355446dd4c\") " Nov 23 15:00:51 crc kubenswrapper[5050]: I1123 15:00:51.116521 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/49ce1b12-d28e-481e-ad05-68355446dd4c-config\") pod \"49ce1b12-d28e-481e-ad05-68355446dd4c\" (UID: \"49ce1b12-d28e-481e-ad05-68355446dd4c\") " Nov 23 15:00:51 crc kubenswrapper[5050]: I1123 15:00:51.117574 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pz5j9\" (UniqueName: \"kubernetes.io/projected/49ce1b12-d28e-481e-ad05-68355446dd4c-kube-api-access-pz5j9\") pod \"49ce1b12-d28e-481e-ad05-68355446dd4c\" (UID: \"49ce1b12-d28e-481e-ad05-68355446dd4c\") " Nov 23 15:00:51 crc kubenswrapper[5050]: I1123 15:00:51.124028 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ce1b12-d28e-481e-ad05-68355446dd4c-kube-api-access-pz5j9" (OuterVolumeSpecName: "kube-api-access-pz5j9") pod "49ce1b12-d28e-481e-ad05-68355446dd4c" (UID: "49ce1b12-d28e-481e-ad05-68355446dd4c"). InnerVolumeSpecName "kube-api-access-pz5j9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:00:51 crc kubenswrapper[5050]: I1123 15:00:51.154491 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49ce1b12-d28e-481e-ad05-68355446dd4c-config" (OuterVolumeSpecName: "config") pod "49ce1b12-d28e-481e-ad05-68355446dd4c" (UID: "49ce1b12-d28e-481e-ad05-68355446dd4c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:51 crc kubenswrapper[5050]: I1123 15:00:51.155961 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49ce1b12-d28e-481e-ad05-68355446dd4c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "49ce1b12-d28e-481e-ad05-68355446dd4c" (UID: "49ce1b12-d28e-481e-ad05-68355446dd4c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:51 crc kubenswrapper[5050]: I1123 15:00:51.220972 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49ce1b12-d28e-481e-ad05-68355446dd4c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:51 crc kubenswrapper[5050]: I1123 15:00:51.221013 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/49ce1b12-d28e-481e-ad05-68355446dd4c-config\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:51 crc kubenswrapper[5050]: I1123 15:00:51.221030 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pz5j9\" (UniqueName: \"kubernetes.io/projected/49ce1b12-d28e-481e-ad05-68355446dd4c-kube-api-access-pz5j9\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:51 crc kubenswrapper[5050]: I1123 15:00:51.626486 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"83a846d6-b03f-4bc1-ac31-cacf0ee96658","Type":"ContainerStarted","Data":"361f0f712cdb56c1a734658f4960db8d2ba0252abc0913de0ab85035acfe8a7b"} Nov 23 15:00:51 crc kubenswrapper[5050]: I1123 15:00:51.648645 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-sdvpr" event={"ID":"49ce1b12-d28e-481e-ad05-68355446dd4c","Type":"ContainerDied","Data":"1957be852c3f2c65d6ab69178531aec36e53ec68fd2b7c2f0249fd400d3bc094"} Nov 23 15:00:51 crc kubenswrapper[5050]: I1123 15:00:51.648695 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1957be852c3f2c65d6ab69178531aec36e53ec68fd2b7c2f0249fd400d3bc094" Nov 23 15:00:51 crc kubenswrapper[5050]: I1123 15:00:51.648704 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-sdvpr" Nov 23 15:00:51 crc kubenswrapper[5050]: I1123 15:00:51.667219 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.667192651 podStartE2EDuration="5.667192651s" podCreationTimestamp="2025-11-23 15:00:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:00:51.657044108 +0000 UTC m=+1146.824040593" watchObservedRunningTime="2025-11-23 15:00:51.667192651 +0000 UTC m=+1146.834189136" Nov 23 15:00:51 crc kubenswrapper[5050]: I1123 15:00:51.853177 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-dcnfh"] Nov 23 15:00:51 crc kubenswrapper[5050]: E1123 15:00:51.853630 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49ce1b12-d28e-481e-ad05-68355446dd4c" containerName="neutron-db-sync" Nov 23 15:00:51 crc kubenswrapper[5050]: I1123 15:00:51.853649 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="49ce1b12-d28e-481e-ad05-68355446dd4c" containerName="neutron-db-sync" Nov 23 15:00:51 crc kubenswrapper[5050]: I1123 15:00:51.853821 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="49ce1b12-d28e-481e-ad05-68355446dd4c" containerName="neutron-db-sync" Nov 23 15:00:51 crc kubenswrapper[5050]: I1123 15:00:51.857023 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" Nov 23 15:00:51 crc kubenswrapper[5050]: I1123 15:00:51.889606 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-dcnfh"] Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.001090 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7b667979-dcnfh\" (UID: \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\") " pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.001175 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-dns-svc\") pod \"dnsmasq-dns-6b7b667979-dcnfh\" (UID: \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\") " pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.001285 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7b667979-dcnfh\" (UID: \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\") " pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.001671 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7b667979-dcnfh\" (UID: \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\") " pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.001990 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dmzd\" (UniqueName: \"kubernetes.io/projected/57d4c7fd-cbe6-41c6-868f-de274707cd2d-kube-api-access-6dmzd\") pod \"dnsmasq-dns-6b7b667979-dcnfh\" (UID: \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\") " pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.002080 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-config\") pod \"dnsmasq-dns-6b7b667979-dcnfh\" (UID: \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\") " pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.019162 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-557bdb6d7b-clvms"] Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.027704 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-557bdb6d7b-clvms" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.033680 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.034188 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-mn2cc" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.034461 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.034869 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.046722 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-557bdb6d7b-clvms"] Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.110706 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/97329021-2f95-4540-bb83-c816a4f471ff-ovndb-tls-certs\") pod \"neutron-557bdb6d7b-clvms\" (UID: \"97329021-2f95-4540-bb83-c816a4f471ff\") " pod="openstack/neutron-557bdb6d7b-clvms" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.110783 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7b667979-dcnfh\" (UID: \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\") " pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.110816 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97329021-2f95-4540-bb83-c816a4f471ff-combined-ca-bundle\") pod \"neutron-557bdb6d7b-clvms\" (UID: \"97329021-2f95-4540-bb83-c816a4f471ff\") " pod="openstack/neutron-557bdb6d7b-clvms" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.110858 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-dns-svc\") pod \"dnsmasq-dns-6b7b667979-dcnfh\" (UID: \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\") " pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.110896 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7b667979-dcnfh\" (UID: \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\") " pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.110918 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/97329021-2f95-4540-bb83-c816a4f471ff-config\") pod \"neutron-557bdb6d7b-clvms\" (UID: \"97329021-2f95-4540-bb83-c816a4f471ff\") " pod="openstack/neutron-557bdb6d7b-clvms" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.110970 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/97329021-2f95-4540-bb83-c816a4f471ff-httpd-config\") pod \"neutron-557bdb6d7b-clvms\" (UID: \"97329021-2f95-4540-bb83-c816a4f471ff\") " pod="openstack/neutron-557bdb6d7b-clvms" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.111000 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7b667979-dcnfh\" (UID: \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\") " pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.111022 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92cjn\" (UniqueName: \"kubernetes.io/projected/97329021-2f95-4540-bb83-c816a4f471ff-kube-api-access-92cjn\") pod \"neutron-557bdb6d7b-clvms\" (UID: \"97329021-2f95-4540-bb83-c816a4f471ff\") " pod="openstack/neutron-557bdb6d7b-clvms" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.111057 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dmzd\" (UniqueName: \"kubernetes.io/projected/57d4c7fd-cbe6-41c6-868f-de274707cd2d-kube-api-access-6dmzd\") pod \"dnsmasq-dns-6b7b667979-dcnfh\" (UID: \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\") " pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.111083 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-config\") pod \"dnsmasq-dns-6b7b667979-dcnfh\" (UID: \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\") " pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.112089 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-config\") pod \"dnsmasq-dns-6b7b667979-dcnfh\" (UID: \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\") " pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.112771 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7b667979-dcnfh\" (UID: \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\") " pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.113030 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7b667979-dcnfh\" (UID: \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\") " pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.113700 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-dns-svc\") pod \"dnsmasq-dns-6b7b667979-dcnfh\" (UID: \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\") " pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.113869 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7b667979-dcnfh\" (UID: \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\") " pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.146496 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dmzd\" (UniqueName: \"kubernetes.io/projected/57d4c7fd-cbe6-41c6-868f-de274707cd2d-kube-api-access-6dmzd\") pod \"dnsmasq-dns-6b7b667979-dcnfh\" (UID: \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\") " pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.191396 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.213275 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/97329021-2f95-4540-bb83-c816a4f471ff-config\") pod \"neutron-557bdb6d7b-clvms\" (UID: \"97329021-2f95-4540-bb83-c816a4f471ff\") " pod="openstack/neutron-557bdb6d7b-clvms" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.213366 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/97329021-2f95-4540-bb83-c816a4f471ff-httpd-config\") pod \"neutron-557bdb6d7b-clvms\" (UID: \"97329021-2f95-4540-bb83-c816a4f471ff\") " pod="openstack/neutron-557bdb6d7b-clvms" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.214043 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92cjn\" (UniqueName: \"kubernetes.io/projected/97329021-2f95-4540-bb83-c816a4f471ff-kube-api-access-92cjn\") pod \"neutron-557bdb6d7b-clvms\" (UID: \"97329021-2f95-4540-bb83-c816a4f471ff\") " pod="openstack/neutron-557bdb6d7b-clvms" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.214110 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/97329021-2f95-4540-bb83-c816a4f471ff-ovndb-tls-certs\") pod \"neutron-557bdb6d7b-clvms\" (UID: \"97329021-2f95-4540-bb83-c816a4f471ff\") " pod="openstack/neutron-557bdb6d7b-clvms" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.214149 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97329021-2f95-4540-bb83-c816a4f471ff-combined-ca-bundle\") pod \"neutron-557bdb6d7b-clvms\" (UID: \"97329021-2f95-4540-bb83-c816a4f471ff\") " pod="openstack/neutron-557bdb6d7b-clvms" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.218636 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/97329021-2f95-4540-bb83-c816a4f471ff-config\") pod \"neutron-557bdb6d7b-clvms\" (UID: \"97329021-2f95-4540-bb83-c816a4f471ff\") " pod="openstack/neutron-557bdb6d7b-clvms" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.219403 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/97329021-2f95-4540-bb83-c816a4f471ff-httpd-config\") pod \"neutron-557bdb6d7b-clvms\" (UID: \"97329021-2f95-4540-bb83-c816a4f471ff\") " pod="openstack/neutron-557bdb6d7b-clvms" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.219911 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/97329021-2f95-4540-bb83-c816a4f471ff-ovndb-tls-certs\") pod \"neutron-557bdb6d7b-clvms\" (UID: \"97329021-2f95-4540-bb83-c816a4f471ff\") " pod="openstack/neutron-557bdb6d7b-clvms" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.225293 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97329021-2f95-4540-bb83-c816a4f471ff-combined-ca-bundle\") pod \"neutron-557bdb6d7b-clvms\" (UID: \"97329021-2f95-4540-bb83-c816a4f471ff\") " pod="openstack/neutron-557bdb6d7b-clvms" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.240363 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92cjn\" (UniqueName: \"kubernetes.io/projected/97329021-2f95-4540-bb83-c816a4f471ff-kube-api-access-92cjn\") pod \"neutron-557bdb6d7b-clvms\" (UID: \"97329021-2f95-4540-bb83-c816a4f471ff\") " pod="openstack/neutron-557bdb6d7b-clvms" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.363757 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-557bdb6d7b-clvms" Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.670491 5050 generic.go:334] "Generic (PLEG): container finished" podID="3291f014-f18b-4142-a770-d9c33b141d66" containerID="413a63448c5f1b6aa0ed69c820b78045e358548763dd1334c1ea783960639530" exitCode=0 Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.670555 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-j7p9t" event={"ID":"3291f014-f18b-4142-a770-d9c33b141d66","Type":"ContainerDied","Data":"413a63448c5f1b6aa0ed69c820b78045e358548763dd1334c1ea783960639530"} Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.676521 5050 generic.go:334] "Generic (PLEG): container finished" podID="889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca" containerID="ca03918f91f0d24416b97c1154f1427e4787e0717a61e93ae86f59f5dae6d896" exitCode=0 Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.676613 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-zskrr" event={"ID":"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca","Type":"ContainerDied","Data":"ca03918f91f0d24416b97c1154f1427e4787e0717a61e93ae86f59f5dae6d896"} Nov 23 15:00:52 crc kubenswrapper[5050]: I1123 15:00:52.702480 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-dcnfh"] Nov 23 15:00:53 crc kubenswrapper[5050]: I1123 15:00:53.066483 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-557bdb6d7b-clvms"] Nov 23 15:00:53 crc kubenswrapper[5050]: I1123 15:00:53.688342 5050 generic.go:334] "Generic (PLEG): container finished" podID="f53cab34-8b0f-45cf-8ef9-a524aa3578b4" containerID="ff68a1c25ddcf47d18f14477af4201b7b01eca17df536e55e05e2a33a3ed79c8" exitCode=0 Nov 23 15:00:53 crc kubenswrapper[5050]: I1123 15:00:53.688413 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-ftmg2" event={"ID":"f53cab34-8b0f-45cf-8ef9-a524aa3578b4","Type":"ContainerDied","Data":"ff68a1c25ddcf47d18f14477af4201b7b01eca17df536e55e05e2a33a3ed79c8"} Nov 23 15:00:54 crc kubenswrapper[5050]: I1123 15:00:54.944929 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7cb7995f89-k8h9t"] Nov 23 15:00:54 crc kubenswrapper[5050]: I1123 15:00:54.949097 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7cb7995f89-k8h9t" Nov 23 15:00:54 crc kubenswrapper[5050]: I1123 15:00:54.951700 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 23 15:00:54 crc kubenswrapper[5050]: I1123 15:00:54.952856 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 23 15:00:54 crc kubenswrapper[5050]: I1123 15:00:54.959054 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7cb7995f89-k8h9t"] Nov 23 15:00:55 crc kubenswrapper[5050]: I1123 15:00:55.003012 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-ovndb-tls-certs\") pod \"neutron-7cb7995f89-k8h9t\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " pod="openstack/neutron-7cb7995f89-k8h9t" Nov 23 15:00:55 crc kubenswrapper[5050]: I1123 15:00:55.003143 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2q76j\" (UniqueName: \"kubernetes.io/projected/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-kube-api-access-2q76j\") pod \"neutron-7cb7995f89-k8h9t\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " pod="openstack/neutron-7cb7995f89-k8h9t" Nov 23 15:00:55 crc kubenswrapper[5050]: I1123 15:00:55.003225 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-internal-tls-certs\") pod \"neutron-7cb7995f89-k8h9t\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " pod="openstack/neutron-7cb7995f89-k8h9t" Nov 23 15:00:55 crc kubenswrapper[5050]: I1123 15:00:55.003314 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-public-tls-certs\") pod \"neutron-7cb7995f89-k8h9t\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " pod="openstack/neutron-7cb7995f89-k8h9t" Nov 23 15:00:55 crc kubenswrapper[5050]: I1123 15:00:55.003361 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-config\") pod \"neutron-7cb7995f89-k8h9t\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " pod="openstack/neutron-7cb7995f89-k8h9t" Nov 23 15:00:55 crc kubenswrapper[5050]: I1123 15:00:55.003408 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-httpd-config\") pod \"neutron-7cb7995f89-k8h9t\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " pod="openstack/neutron-7cb7995f89-k8h9t" Nov 23 15:00:55 crc kubenswrapper[5050]: I1123 15:00:55.003437 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-combined-ca-bundle\") pod \"neutron-7cb7995f89-k8h9t\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " pod="openstack/neutron-7cb7995f89-k8h9t" Nov 23 15:00:55 crc kubenswrapper[5050]: I1123 15:00:55.106052 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-httpd-config\") pod \"neutron-7cb7995f89-k8h9t\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " pod="openstack/neutron-7cb7995f89-k8h9t" Nov 23 15:00:55 crc kubenswrapper[5050]: I1123 15:00:55.106110 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-combined-ca-bundle\") pod \"neutron-7cb7995f89-k8h9t\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " pod="openstack/neutron-7cb7995f89-k8h9t" Nov 23 15:00:55 crc kubenswrapper[5050]: I1123 15:00:55.106166 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-ovndb-tls-certs\") pod \"neutron-7cb7995f89-k8h9t\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " pod="openstack/neutron-7cb7995f89-k8h9t" Nov 23 15:00:55 crc kubenswrapper[5050]: I1123 15:00:55.106223 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2q76j\" (UniqueName: \"kubernetes.io/projected/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-kube-api-access-2q76j\") pod \"neutron-7cb7995f89-k8h9t\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " pod="openstack/neutron-7cb7995f89-k8h9t" Nov 23 15:00:55 crc kubenswrapper[5050]: I1123 15:00:55.106268 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-internal-tls-certs\") pod \"neutron-7cb7995f89-k8h9t\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " pod="openstack/neutron-7cb7995f89-k8h9t" Nov 23 15:00:55 crc kubenswrapper[5050]: I1123 15:00:55.106315 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-public-tls-certs\") pod \"neutron-7cb7995f89-k8h9t\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " pod="openstack/neutron-7cb7995f89-k8h9t" Nov 23 15:00:55 crc kubenswrapper[5050]: I1123 15:00:55.106347 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-config\") pod \"neutron-7cb7995f89-k8h9t\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " pod="openstack/neutron-7cb7995f89-k8h9t" Nov 23 15:00:55 crc kubenswrapper[5050]: I1123 15:00:55.114722 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-httpd-config\") pod \"neutron-7cb7995f89-k8h9t\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " pod="openstack/neutron-7cb7995f89-k8h9t" Nov 23 15:00:55 crc kubenswrapper[5050]: I1123 15:00:55.114788 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-combined-ca-bundle\") pod \"neutron-7cb7995f89-k8h9t\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " pod="openstack/neutron-7cb7995f89-k8h9t" Nov 23 15:00:55 crc kubenswrapper[5050]: I1123 15:00:55.115831 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-internal-tls-certs\") pod \"neutron-7cb7995f89-k8h9t\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " pod="openstack/neutron-7cb7995f89-k8h9t" Nov 23 15:00:55 crc kubenswrapper[5050]: I1123 15:00:55.116150 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-public-tls-certs\") pod \"neutron-7cb7995f89-k8h9t\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " pod="openstack/neutron-7cb7995f89-k8h9t" Nov 23 15:00:55 crc kubenswrapper[5050]: I1123 15:00:55.118797 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-ovndb-tls-certs\") pod \"neutron-7cb7995f89-k8h9t\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " pod="openstack/neutron-7cb7995f89-k8h9t" Nov 23 15:00:55 crc kubenswrapper[5050]: I1123 15:00:55.123718 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-config\") pod \"neutron-7cb7995f89-k8h9t\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " pod="openstack/neutron-7cb7995f89-k8h9t" Nov 23 15:00:55 crc kubenswrapper[5050]: I1123 15:00:55.130356 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2q76j\" (UniqueName: \"kubernetes.io/projected/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-kube-api-access-2q76j\") pod \"neutron-7cb7995f89-k8h9t\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " pod="openstack/neutron-7cb7995f89-k8h9t" Nov 23 15:00:55 crc kubenswrapper[5050]: I1123 15:00:55.273362 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7cb7995f89-k8h9t" Nov 23 15:00:56 crc kubenswrapper[5050]: I1123 15:00:56.950742 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 23 15:00:56 crc kubenswrapper[5050]: I1123 15:00:56.952645 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 23 15:00:56 crc kubenswrapper[5050]: I1123 15:00:56.961959 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 23 15:00:56 crc kubenswrapper[5050]: I1123 15:00:56.962057 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 23 15:00:57 crc kubenswrapper[5050]: I1123 15:00:57.002467 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 23 15:00:57 crc kubenswrapper[5050]: I1123 15:00:57.017269 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 23 15:00:57 crc kubenswrapper[5050]: I1123 15:00:57.020428 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 23 15:00:57 crc kubenswrapper[5050]: I1123 15:00:57.035239 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 23 15:00:57 crc kubenswrapper[5050]: I1123 15:00:57.745509 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 23 15:00:57 crc kubenswrapper[5050]: I1123 15:00:57.745654 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 23 15:00:57 crc kubenswrapper[5050]: I1123 15:00:57.745874 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 23 15:00:57 crc kubenswrapper[5050]: I1123 15:00:57.745903 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.690805 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-j7p9t" Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.779145 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" event={"ID":"57d4c7fd-cbe6-41c6-868f-de274707cd2d","Type":"ContainerStarted","Data":"0b0b1173994ba7c92a71d968fcccd41575744712d76743f8c76f9f27d38bce27"} Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.783738 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-557bdb6d7b-clvms" event={"ID":"97329021-2f95-4540-bb83-c816a4f471ff","Type":"ContainerStarted","Data":"f25b57c626b90edfafa28139b38a3e1769e9234306bb7404152116f754ad2822"} Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.785470 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-zskrr" Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.797295 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-j7p9t" Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.797893 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-j7p9t" event={"ID":"3291f014-f18b-4142-a770-d9c33b141d66","Type":"ContainerDied","Data":"0cc63ef349d09c6556e87b3ace86a609349c743b2081912477b2a5dc52b2b180"} Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.797922 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0cc63ef349d09c6556e87b3ace86a609349c743b2081912477b2a5dc52b2b180" Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.814979 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-zskrr" event={"ID":"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca","Type":"ContainerDied","Data":"8efc81fbd8cf18971e8925f965a2526433911195bb92cb82d7f3a89f0d60ab0f"} Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.815085 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8efc81fbd8cf18971e8925f965a2526433911195bb92cb82d7f3a89f0d60ab0f" Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.815196 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-zskrr" Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.818581 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-ftmg2" event={"ID":"f53cab34-8b0f-45cf-8ef9-a524aa3578b4","Type":"ContainerDied","Data":"2e3293f7d092705d6004268dbed3216e6a4c27d39c7fcfe40dc811ba5ff5da2e"} Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.818601 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2e3293f7d092705d6004268dbed3216e6a4c27d39c7fcfe40dc811ba5ff5da2e" Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.824353 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3291f014-f18b-4142-a770-d9c33b141d66-combined-ca-bundle\") pod \"3291f014-f18b-4142-a770-d9c33b141d66\" (UID: \"3291f014-f18b-4142-a770-d9c33b141d66\") " Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.824431 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3291f014-f18b-4142-a770-d9c33b141d66-config-data\") pod \"3291f014-f18b-4142-a770-d9c33b141d66\" (UID: \"3291f014-f18b-4142-a770-d9c33b141d66\") " Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.824474 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3291f014-f18b-4142-a770-d9c33b141d66-scripts\") pod \"3291f014-f18b-4142-a770-d9c33b141d66\" (UID: \"3291f014-f18b-4142-a770-d9c33b141d66\") " Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.824625 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2bvdb\" (UniqueName: \"kubernetes.io/projected/3291f014-f18b-4142-a770-d9c33b141d66-kube-api-access-2bvdb\") pod \"3291f014-f18b-4142-a770-d9c33b141d66\" (UID: \"3291f014-f18b-4142-a770-d9c33b141d66\") " Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.824747 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3291f014-f18b-4142-a770-d9c33b141d66-logs\") pod \"3291f014-f18b-4142-a770-d9c33b141d66\" (UID: \"3291f014-f18b-4142-a770-d9c33b141d66\") " Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.825737 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3291f014-f18b-4142-a770-d9c33b141d66-logs" (OuterVolumeSpecName: "logs") pod "3291f014-f18b-4142-a770-d9c33b141d66" (UID: "3291f014-f18b-4142-a770-d9c33b141d66"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.845120 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3291f014-f18b-4142-a770-d9c33b141d66-scripts" (OuterVolumeSpecName: "scripts") pod "3291f014-f18b-4142-a770-d9c33b141d66" (UID: "3291f014-f18b-4142-a770-d9c33b141d66"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.846646 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3291f014-f18b-4142-a770-d9c33b141d66-kube-api-access-2bvdb" (OuterVolumeSpecName: "kube-api-access-2bvdb") pod "3291f014-f18b-4142-a770-d9c33b141d66" (UID: "3291f014-f18b-4142-a770-d9c33b141d66"). InnerVolumeSpecName "kube-api-access-2bvdb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.854577 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-ftmg2" Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.886886 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3291f014-f18b-4142-a770-d9c33b141d66-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3291f014-f18b-4142-a770-d9c33b141d66" (UID: "3291f014-f18b-4142-a770-d9c33b141d66"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.926162 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-credential-keys\") pod \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\" (UID: \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\") " Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.926592 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f53cab34-8b0f-45cf-8ef9-a524aa3578b4-combined-ca-bundle\") pod \"f53cab34-8b0f-45cf-8ef9-a524aa3578b4\" (UID: \"f53cab34-8b0f-45cf-8ef9-a524aa3578b4\") " Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.926699 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gn47q\" (UniqueName: \"kubernetes.io/projected/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-kube-api-access-gn47q\") pod \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\" (UID: \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\") " Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.926767 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-scripts\") pod \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\" (UID: \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\") " Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.926788 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j4g9r\" (UniqueName: \"kubernetes.io/projected/f53cab34-8b0f-45cf-8ef9-a524aa3578b4-kube-api-access-j4g9r\") pod \"f53cab34-8b0f-45cf-8ef9-a524aa3578b4\" (UID: \"f53cab34-8b0f-45cf-8ef9-a524aa3578b4\") " Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.926878 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-combined-ca-bundle\") pod \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\" (UID: \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\") " Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.926926 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-config-data\") pod \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\" (UID: \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\") " Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.927022 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-fernet-keys\") pod \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\" (UID: \"889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca\") " Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.927068 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f53cab34-8b0f-45cf-8ef9-a524aa3578b4-db-sync-config-data\") pod \"f53cab34-8b0f-45cf-8ef9-a524aa3578b4\" (UID: \"f53cab34-8b0f-45cf-8ef9-a524aa3578b4\") " Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.927470 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3291f014-f18b-4142-a770-d9c33b141d66-logs\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.927493 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3291f014-f18b-4142-a770-d9c33b141d66-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.927507 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3291f014-f18b-4142-a770-d9c33b141d66-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.927517 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2bvdb\" (UniqueName: \"kubernetes.io/projected/3291f014-f18b-4142-a770-d9c33b141d66-kube-api-access-2bvdb\") on node \"crc\" DevicePath \"\"" Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.936913 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-scripts" (OuterVolumeSpecName: "scripts") pod "889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca" (UID: "889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.953782 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3291f014-f18b-4142-a770-d9c33b141d66-config-data" (OuterVolumeSpecName: "config-data") pod "3291f014-f18b-4142-a770-d9c33b141d66" (UID: "3291f014-f18b-4142-a770-d9c33b141d66"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.953879 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f53cab34-8b0f-45cf-8ef9-a524aa3578b4-kube-api-access-j4g9r" (OuterVolumeSpecName: "kube-api-access-j4g9r") pod "f53cab34-8b0f-45cf-8ef9-a524aa3578b4" (UID: "f53cab34-8b0f-45cf-8ef9-a524aa3578b4"). InnerVolumeSpecName "kube-api-access-j4g9r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.954361 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-kube-api-access-gn47q" (OuterVolumeSpecName: "kube-api-access-gn47q") pod "889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca" (UID: "889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca"). InnerVolumeSpecName "kube-api-access-gn47q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.955380 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca" (UID: "889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.961818 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f53cab34-8b0f-45cf-8ef9-a524aa3578b4-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "f53cab34-8b0f-45cf-8ef9-a524aa3578b4" (UID: "f53cab34-8b0f-45cf-8ef9-a524aa3578b4"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.961944 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca" (UID: "889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:00:59 crc kubenswrapper[5050]: I1123 15:00:59.991623 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-config-data" (OuterVolumeSpecName: "config-data") pod "889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca" (UID: "889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.000077 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca" (UID: "889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.003076 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f53cab34-8b0f-45cf-8ef9-a524aa3578b4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f53cab34-8b0f-45cf-8ef9-a524aa3578b4" (UID: "f53cab34-8b0f-45cf-8ef9-a524aa3578b4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.028883 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.028911 5050 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.028921 5050 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f53cab34-8b0f-45cf-8ef9-a524aa3578b4-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.028934 5050 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.028943 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f53cab34-8b0f-45cf-8ef9-a524aa3578b4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.028965 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gn47q\" (UniqueName: \"kubernetes.io/projected/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-kube-api-access-gn47q\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.028977 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.028986 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j4g9r\" (UniqueName: \"kubernetes.io/projected/f53cab34-8b0f-45cf-8ef9-a524aa3578b4-kube-api-access-j4g9r\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.029025 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3291f014-f18b-4142-a770-d9c33b141d66-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.029035 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.041499 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.041605 5050 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.048928 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.133959 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.134048 5050 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.249473 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.264475 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7cb7995f89-k8h9t"] Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.816333 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-748fb7ccf6-s9qnq"] Nov 23 15:01:00 crc kubenswrapper[5050]: E1123 15:01:00.816864 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3291f014-f18b-4142-a770-d9c33b141d66" containerName="placement-db-sync" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.816880 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="3291f014-f18b-4142-a770-d9c33b141d66" containerName="placement-db-sync" Nov 23 15:01:00 crc kubenswrapper[5050]: E1123 15:01:00.816906 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f53cab34-8b0f-45cf-8ef9-a524aa3578b4" containerName="barbican-db-sync" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.816912 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f53cab34-8b0f-45cf-8ef9-a524aa3578b4" containerName="barbican-db-sync" Nov 23 15:01:00 crc kubenswrapper[5050]: E1123 15:01:00.816926 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca" containerName="keystone-bootstrap" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.816934 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca" containerName="keystone-bootstrap" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.823108 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="3291f014-f18b-4142-a770-d9c33b141d66" containerName="placement-db-sync" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.823179 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca" containerName="keystone-bootstrap" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.823196 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="f53cab34-8b0f-45cf-8ef9-a524aa3578b4" containerName="barbican-db-sync" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.824591 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.831088 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.831406 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.831592 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-k8v8m" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.831709 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.831909 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-748fb7ccf6-s9qnq"] Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.843393 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.872582 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7cb7995f89-k8h9t" event={"ID":"1b3a1560-8762-47b9-8d1c-fe94eb46bec2","Type":"ContainerStarted","Data":"c8744bf42aa8122e20bb79dbbcfbe437abb2154864233d844f2967643789a84f"} Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.872631 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7cb7995f89-k8h9t" event={"ID":"1b3a1560-8762-47b9-8d1c-fe94eb46bec2","Type":"ContainerStarted","Data":"382a3ec48599cb331a7724a9e7f942e6c4119e74d5e1b0252f6209ec9449b088"} Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.877234 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-557bdb6d7b-clvms" event={"ID":"97329021-2f95-4540-bb83-c816a4f471ff","Type":"ContainerStarted","Data":"f2861d7bc955e3ef8bd52b3ed3c192dfa3077057bcd368c4878b1882a30e8b48"} Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.877369 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-557bdb6d7b-clvms" event={"ID":"97329021-2f95-4540-bb83-c816a4f471ff","Type":"ContainerStarted","Data":"cbd55eeb58dd0470a584ce250f5cc0e0004303738408fea9238e70cbbbe00b8d"} Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.878592 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-557bdb6d7b-clvms" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.885840 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"940dffda-38d6-469c-8453-5acb4092ed82","Type":"ContainerStarted","Data":"2924621750b1bc5af5085e463c9c282ad0f1c14251a8c0075f4ceadef71b73a4"} Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.887267 5050 generic.go:334] "Generic (PLEG): container finished" podID="57d4c7fd-cbe6-41c6-868f-de274707cd2d" containerID="fba6eff7da7c8f9e02497e5406a09a149bf0183ad444a9f1ab67be209f5ce877" exitCode=0 Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.888706 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" event={"ID":"57d4c7fd-cbe6-41c6-868f-de274707cd2d","Type":"ContainerDied","Data":"fba6eff7da7c8f9e02497e5406a09a149bf0183ad444a9f1ab67be209f5ce877"} Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.889216 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-ftmg2" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.950071 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-public-tls-certs\") pod \"placement-748fb7ccf6-s9qnq\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.950315 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srcwm\" (UniqueName: \"kubernetes.io/projected/add2a2e2-5553-4e43-8ddd-b8719949d531-kube-api-access-srcwm\") pod \"placement-748fb7ccf6-s9qnq\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.950342 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-combined-ca-bundle\") pod \"placement-748fb7ccf6-s9qnq\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.950362 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-internal-tls-certs\") pod \"placement-748fb7ccf6-s9qnq\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.950407 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-scripts\") pod \"placement-748fb7ccf6-s9qnq\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.950427 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/add2a2e2-5553-4e43-8ddd-b8719949d531-logs\") pod \"placement-748fb7ccf6-s9qnq\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.950477 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-config-data\") pod \"placement-748fb7ccf6-s9qnq\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.951968 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-ffbf89f58-t9kvk"] Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.958123 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.964161 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.964493 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-hkfk8" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.964654 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.964682 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.964771 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.964965 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 23 15:01:00 crc kubenswrapper[5050]: I1123 15:01:00.988914 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-557bdb6d7b-clvms" podStartSLOduration=9.988886729 podStartE2EDuration="9.988886729s" podCreationTimestamp="2025-11-23 15:00:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:01:00.917844601 +0000 UTC m=+1156.084841086" watchObservedRunningTime="2025-11-23 15:01:00.988886729 +0000 UTC m=+1156.155883214" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.007736 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-ffbf89f58-t9kvk"] Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.056686 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srcwm\" (UniqueName: \"kubernetes.io/projected/add2a2e2-5553-4e43-8ddd-b8719949d531-kube-api-access-srcwm\") pod \"placement-748fb7ccf6-s9qnq\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.056736 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-combined-ca-bundle\") pod \"placement-748fb7ccf6-s9qnq\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.056764 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-internal-tls-certs\") pod \"placement-748fb7ccf6-s9qnq\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.058580 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-scripts\") pod \"placement-748fb7ccf6-s9qnq\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.058676 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/add2a2e2-5553-4e43-8ddd-b8719949d531-logs\") pod \"placement-748fb7ccf6-s9qnq\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.058741 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-config-data\") pod \"placement-748fb7ccf6-s9qnq\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.058872 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-public-tls-certs\") pod \"placement-748fb7ccf6-s9qnq\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.066558 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/add2a2e2-5553-4e43-8ddd-b8719949d531-logs\") pod \"placement-748fb7ccf6-s9qnq\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.078315 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-internal-tls-certs\") pod \"placement-748fb7ccf6-s9qnq\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.080324 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-scripts\") pod \"placement-748fb7ccf6-s9qnq\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.082888 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-config-data\") pod \"placement-748fb7ccf6-s9qnq\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.109987 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-combined-ca-bundle\") pod \"placement-748fb7ccf6-s9qnq\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.129176 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-public-tls-certs\") pod \"placement-748fb7ccf6-s9qnq\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.140385 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-586bfdfdd8-99vrd"] Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.142482 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.147486 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-hljng" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.148776 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.148902 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.159536 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srcwm\" (UniqueName: \"kubernetes.io/projected/add2a2e2-5553-4e43-8ddd-b8719949d531-kube-api-access-srcwm\") pod \"placement-748fb7ccf6-s9qnq\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.160349 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-scripts\") pod \"keystone-ffbf89f58-t9kvk\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.160376 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-public-tls-certs\") pod \"keystone-ffbf89f58-t9kvk\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.160404 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-combined-ca-bundle\") pod \"keystone-ffbf89f58-t9kvk\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.160429 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-internal-tls-certs\") pod \"keystone-ffbf89f58-t9kvk\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.160485 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-fernet-keys\") pod \"keystone-ffbf89f58-t9kvk\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.160550 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-credential-keys\") pod \"keystone-ffbf89f58-t9kvk\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.160577 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p4p5t\" (UniqueName: \"kubernetes.io/projected/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-kube-api-access-p4p5t\") pod \"keystone-ffbf89f58-t9kvk\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.160599 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-config-data\") pod \"keystone-ffbf89f58-t9kvk\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.173323 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-864fb4756c-xzxnp"] Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.175142 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-864fb4756c-xzxnp" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.185939 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.223990 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-586bfdfdd8-99vrd"] Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.264875 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-864fb4756c-xzxnp"] Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.269244 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-fernet-keys\") pod \"keystone-ffbf89f58-t9kvk\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.269501 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-87nhx\" (UniqueName: \"kubernetes.io/projected/9d53c4b5-d0a1-4191-9239-8c7b6806f860-kube-api-access-87nhx\") pod \"barbican-keystone-listener-586bfdfdd8-99vrd\" (UID: \"9d53c4b5-d0a1-4191-9239-8c7b6806f860\") " pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.269630 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d53c4b5-d0a1-4191-9239-8c7b6806f860-config-data\") pod \"barbican-keystone-listener-586bfdfdd8-99vrd\" (UID: \"9d53c4b5-d0a1-4191-9239-8c7b6806f860\") " pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.269779 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9d53c4b5-d0a1-4191-9239-8c7b6806f860-config-data-custom\") pod \"barbican-keystone-listener-586bfdfdd8-99vrd\" (UID: \"9d53c4b5-d0a1-4191-9239-8c7b6806f860\") " pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.269841 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d53c4b5-d0a1-4191-9239-8c7b6806f860-logs\") pod \"barbican-keystone-listener-586bfdfdd8-99vrd\" (UID: \"9d53c4b5-d0a1-4191-9239-8c7b6806f860\") " pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.269933 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-credential-keys\") pod \"keystone-ffbf89f58-t9kvk\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.270828 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p4p5t\" (UniqueName: \"kubernetes.io/projected/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-kube-api-access-p4p5t\") pod \"keystone-ffbf89f58-t9kvk\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.270941 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-config-data\") pod \"keystone-ffbf89f58-t9kvk\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.271080 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-scripts\") pod \"keystone-ffbf89f58-t9kvk\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.271148 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-public-tls-certs\") pod \"keystone-ffbf89f58-t9kvk\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.271219 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-combined-ca-bundle\") pod \"keystone-ffbf89f58-t9kvk\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.271301 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-internal-tls-certs\") pod \"keystone-ffbf89f58-t9kvk\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.271378 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d53c4b5-d0a1-4191-9239-8c7b6806f860-combined-ca-bundle\") pod \"barbican-keystone-listener-586bfdfdd8-99vrd\" (UID: \"9d53c4b5-d0a1-4191-9239-8c7b6806f860\") " pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.289889 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-credential-keys\") pod \"keystone-ffbf89f58-t9kvk\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.295340 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-scripts\") pod \"keystone-ffbf89f58-t9kvk\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.295438 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-fernet-keys\") pod \"keystone-ffbf89f58-t9kvk\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.296727 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-internal-tls-certs\") pod \"keystone-ffbf89f58-t9kvk\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.300663 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p4p5t\" (UniqueName: \"kubernetes.io/projected/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-kube-api-access-p4p5t\") pod \"keystone-ffbf89f58-t9kvk\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.305136 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-combined-ca-bundle\") pod \"keystone-ffbf89f58-t9kvk\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.307750 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-config-data\") pod \"keystone-ffbf89f58-t9kvk\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.308173 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-public-tls-certs\") pod \"keystone-ffbf89f58-t9kvk\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.347636 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-dcnfh"] Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.354731 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-jd96g"] Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.359593 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.361206 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.363596 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-jd96g"] Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.373486 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-logs\") pod \"barbican-worker-864fb4756c-xzxnp\" (UID: \"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0\") " pod="openstack/barbican-worker-864fb4756c-xzxnp" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.373574 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-config-data-custom\") pod \"barbican-worker-864fb4756c-xzxnp\" (UID: \"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0\") " pod="openstack/barbican-worker-864fb4756c-xzxnp" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.373619 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d53c4b5-d0a1-4191-9239-8c7b6806f860-combined-ca-bundle\") pod \"barbican-keystone-listener-586bfdfdd8-99vrd\" (UID: \"9d53c4b5-d0a1-4191-9239-8c7b6806f860\") " pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.373642 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-config-data\") pod \"barbican-worker-864fb4756c-xzxnp\" (UID: \"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0\") " pod="openstack/barbican-worker-864fb4756c-xzxnp" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.373684 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-87nhx\" (UniqueName: \"kubernetes.io/projected/9d53c4b5-d0a1-4191-9239-8c7b6806f860-kube-api-access-87nhx\") pod \"barbican-keystone-listener-586bfdfdd8-99vrd\" (UID: \"9d53c4b5-d0a1-4191-9239-8c7b6806f860\") " pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.373710 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d53c4b5-d0a1-4191-9239-8c7b6806f860-config-data\") pod \"barbican-keystone-listener-586bfdfdd8-99vrd\" (UID: \"9d53c4b5-d0a1-4191-9239-8c7b6806f860\") " pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.373748 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-combined-ca-bundle\") pod \"barbican-worker-864fb4756c-xzxnp\" (UID: \"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0\") " pod="openstack/barbican-worker-864fb4756c-xzxnp" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.373767 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4q4gk\" (UniqueName: \"kubernetes.io/projected/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-kube-api-access-4q4gk\") pod \"barbican-worker-864fb4756c-xzxnp\" (UID: \"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0\") " pod="openstack/barbican-worker-864fb4756c-xzxnp" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.373802 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9d53c4b5-d0a1-4191-9239-8c7b6806f860-config-data-custom\") pod \"barbican-keystone-listener-586bfdfdd8-99vrd\" (UID: \"9d53c4b5-d0a1-4191-9239-8c7b6806f860\") " pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.373822 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d53c4b5-d0a1-4191-9239-8c7b6806f860-logs\") pod \"barbican-keystone-listener-586bfdfdd8-99vrd\" (UID: \"9d53c4b5-d0a1-4191-9239-8c7b6806f860\") " pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.374558 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d53c4b5-d0a1-4191-9239-8c7b6806f860-logs\") pod \"barbican-keystone-listener-586bfdfdd8-99vrd\" (UID: \"9d53c4b5-d0a1-4191-9239-8c7b6806f860\") " pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.379753 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-d7556d5c6-lqb4v"] Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.382131 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-d7556d5c6-lqb4v"] Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.382250 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-d7556d5c6-lqb4v" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.390572 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.395115 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9d53c4b5-d0a1-4191-9239-8c7b6806f860-config-data-custom\") pod \"barbican-keystone-listener-586bfdfdd8-99vrd\" (UID: \"9d53c4b5-d0a1-4191-9239-8c7b6806f860\") " pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.399940 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d53c4b5-d0a1-4191-9239-8c7b6806f860-combined-ca-bundle\") pod \"barbican-keystone-listener-586bfdfdd8-99vrd\" (UID: \"9d53c4b5-d0a1-4191-9239-8c7b6806f860\") " pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.407791 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d53c4b5-d0a1-4191-9239-8c7b6806f860-config-data\") pod \"barbican-keystone-listener-586bfdfdd8-99vrd\" (UID: \"9d53c4b5-d0a1-4191-9239-8c7b6806f860\") " pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.419785 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-87nhx\" (UniqueName: \"kubernetes.io/projected/9d53c4b5-d0a1-4191-9239-8c7b6806f860-kube-api-access-87nhx\") pod \"barbican-keystone-listener-586bfdfdd8-99vrd\" (UID: \"9d53c4b5-d0a1-4191-9239-8c7b6806f860\") " pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.458617 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.475757 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0123488a-af6b-42f3-a828-58b8ee0f4639-combined-ca-bundle\") pod \"barbican-api-d7556d5c6-lqb4v\" (UID: \"0123488a-af6b-42f3-a828-58b8ee0f4639\") " pod="openstack/barbican-api-d7556d5c6-lqb4v" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.475833 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-jd96g\" (UID: \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\") " pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.475851 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-jd96g\" (UID: \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\") " pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.475877 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0123488a-af6b-42f3-a828-58b8ee0f4639-config-data\") pod \"barbican-api-d7556d5c6-lqb4v\" (UID: \"0123488a-af6b-42f3-a828-58b8ee0f4639\") " pod="openstack/barbican-api-d7556d5c6-lqb4v" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.475919 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-config-data-custom\") pod \"barbican-worker-864fb4756c-xzxnp\" (UID: \"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0\") " pod="openstack/barbican-worker-864fb4756c-xzxnp" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.475951 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0123488a-af6b-42f3-a828-58b8ee0f4639-logs\") pod \"barbican-api-d7556d5c6-lqb4v\" (UID: \"0123488a-af6b-42f3-a828-58b8ee0f4639\") " pod="openstack/barbican-api-d7556d5c6-lqb4v" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.475980 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-config-data\") pod \"barbican-worker-864fb4756c-xzxnp\" (UID: \"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0\") " pod="openstack/barbican-worker-864fb4756c-xzxnp" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.476013 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-jd96g\" (UID: \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\") " pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.476053 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-combined-ca-bundle\") pod \"barbican-worker-864fb4756c-xzxnp\" (UID: \"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0\") " pod="openstack/barbican-worker-864fb4756c-xzxnp" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.476070 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4q4gk\" (UniqueName: \"kubernetes.io/projected/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-kube-api-access-4q4gk\") pod \"barbican-worker-864fb4756c-xzxnp\" (UID: \"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0\") " pod="openstack/barbican-worker-864fb4756c-xzxnp" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.476114 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-config\") pod \"dnsmasq-dns-848cf88cfc-jd96g\" (UID: \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\") " pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.476138 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0123488a-af6b-42f3-a828-58b8ee0f4639-config-data-custom\") pod \"barbican-api-d7556d5c6-lqb4v\" (UID: \"0123488a-af6b-42f3-a828-58b8ee0f4639\") " pod="openstack/barbican-api-d7556d5c6-lqb4v" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.476157 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dpsn6\" (UniqueName: \"kubernetes.io/projected/0123488a-af6b-42f3-a828-58b8ee0f4639-kube-api-access-dpsn6\") pod \"barbican-api-d7556d5c6-lqb4v\" (UID: \"0123488a-af6b-42f3-a828-58b8ee0f4639\") " pod="openstack/barbican-api-d7556d5c6-lqb4v" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.476179 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-jd96g\" (UID: \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\") " pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.476204 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-logs\") pod \"barbican-worker-864fb4756c-xzxnp\" (UID: \"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0\") " pod="openstack/barbican-worker-864fb4756c-xzxnp" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.476226 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2cgm\" (UniqueName: \"kubernetes.io/projected/fa26df5c-4d96-49d9-8e38-434ca7b03a84-kube-api-access-t2cgm\") pod \"dnsmasq-dns-848cf88cfc-jd96g\" (UID: \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\") " pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.477680 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-logs\") pod \"barbican-worker-864fb4756c-xzxnp\" (UID: \"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0\") " pod="openstack/barbican-worker-864fb4756c-xzxnp" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.492778 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-combined-ca-bundle\") pod \"barbican-worker-864fb4756c-xzxnp\" (UID: \"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0\") " pod="openstack/barbican-worker-864fb4756c-xzxnp" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.497244 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-config-data-custom\") pod \"barbican-worker-864fb4756c-xzxnp\" (UID: \"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0\") " pod="openstack/barbican-worker-864fb4756c-xzxnp" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.498505 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-config-data\") pod \"barbican-worker-864fb4756c-xzxnp\" (UID: \"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0\") " pod="openstack/barbican-worker-864fb4756c-xzxnp" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.501683 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4q4gk\" (UniqueName: \"kubernetes.io/projected/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-kube-api-access-4q4gk\") pod \"barbican-worker-864fb4756c-xzxnp\" (UID: \"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0\") " pod="openstack/barbican-worker-864fb4756c-xzxnp" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.521058 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.587231 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-config\") pod \"dnsmasq-dns-848cf88cfc-jd96g\" (UID: \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\") " pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.587300 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0123488a-af6b-42f3-a828-58b8ee0f4639-config-data-custom\") pod \"barbican-api-d7556d5c6-lqb4v\" (UID: \"0123488a-af6b-42f3-a828-58b8ee0f4639\") " pod="openstack/barbican-api-d7556d5c6-lqb4v" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.587338 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dpsn6\" (UniqueName: \"kubernetes.io/projected/0123488a-af6b-42f3-a828-58b8ee0f4639-kube-api-access-dpsn6\") pod \"barbican-api-d7556d5c6-lqb4v\" (UID: \"0123488a-af6b-42f3-a828-58b8ee0f4639\") " pod="openstack/barbican-api-d7556d5c6-lqb4v" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.587367 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-jd96g\" (UID: \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\") " pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.587423 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2cgm\" (UniqueName: \"kubernetes.io/projected/fa26df5c-4d96-49d9-8e38-434ca7b03a84-kube-api-access-t2cgm\") pod \"dnsmasq-dns-848cf88cfc-jd96g\" (UID: \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\") " pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.587466 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0123488a-af6b-42f3-a828-58b8ee0f4639-combined-ca-bundle\") pod \"barbican-api-d7556d5c6-lqb4v\" (UID: \"0123488a-af6b-42f3-a828-58b8ee0f4639\") " pod="openstack/barbican-api-d7556d5c6-lqb4v" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.587512 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-jd96g\" (UID: \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\") " pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.587565 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-jd96g\" (UID: \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\") " pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.587596 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0123488a-af6b-42f3-a828-58b8ee0f4639-config-data\") pod \"barbican-api-d7556d5c6-lqb4v\" (UID: \"0123488a-af6b-42f3-a828-58b8ee0f4639\") " pod="openstack/barbican-api-d7556d5c6-lqb4v" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.587706 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0123488a-af6b-42f3-a828-58b8ee0f4639-logs\") pod \"barbican-api-d7556d5c6-lqb4v\" (UID: \"0123488a-af6b-42f3-a828-58b8ee0f4639\") " pod="openstack/barbican-api-d7556d5c6-lqb4v" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.587792 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-jd96g\" (UID: \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\") " pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.591069 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-jd96g\" (UID: \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\") " pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.593110 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-config\") pod \"dnsmasq-dns-848cf88cfc-jd96g\" (UID: \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\") " pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.593720 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-jd96g\" (UID: \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\") " pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.595219 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-jd96g\" (UID: \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\") " pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.601241 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-jd96g\" (UID: \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\") " pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.607404 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0123488a-af6b-42f3-a828-58b8ee0f4639-logs\") pod \"barbican-api-d7556d5c6-lqb4v\" (UID: \"0123488a-af6b-42f3-a828-58b8ee0f4639\") " pod="openstack/barbican-api-d7556d5c6-lqb4v" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.607642 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0123488a-af6b-42f3-a828-58b8ee0f4639-combined-ca-bundle\") pod \"barbican-api-d7556d5c6-lqb4v\" (UID: \"0123488a-af6b-42f3-a828-58b8ee0f4639\") " pod="openstack/barbican-api-d7556d5c6-lqb4v" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.611230 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-864fb4756c-xzxnp" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.619452 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0123488a-af6b-42f3-a828-58b8ee0f4639-config-data-custom\") pod \"barbican-api-d7556d5c6-lqb4v\" (UID: \"0123488a-af6b-42f3-a828-58b8ee0f4639\") " pod="openstack/barbican-api-d7556d5c6-lqb4v" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.651175 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dpsn6\" (UniqueName: \"kubernetes.io/projected/0123488a-af6b-42f3-a828-58b8ee0f4639-kube-api-access-dpsn6\") pod \"barbican-api-d7556d5c6-lqb4v\" (UID: \"0123488a-af6b-42f3-a828-58b8ee0f4639\") " pod="openstack/barbican-api-d7556d5c6-lqb4v" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.652822 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2cgm\" (UniqueName: \"kubernetes.io/projected/fa26df5c-4d96-49d9-8e38-434ca7b03a84-kube-api-access-t2cgm\") pod \"dnsmasq-dns-848cf88cfc-jd96g\" (UID: \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\") " pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.701644 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.753455 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0123488a-af6b-42f3-a828-58b8ee0f4639-config-data\") pod \"barbican-api-d7556d5c6-lqb4v\" (UID: \"0123488a-af6b-42f3-a828-58b8ee0f4639\") " pod="openstack/barbican-api-d7556d5c6-lqb4v" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.968845 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7cb7995f89-k8h9t" event={"ID":"1b3a1560-8762-47b9-8d1c-fe94eb46bec2","Type":"ContainerStarted","Data":"8b5d3ae6f2b8800aa1cfd0b5a953b873c14fdd59195485538c97159fa050f837"} Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.969500 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7cb7995f89-k8h9t" Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.983971 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-ffbf89f58-t9kvk"] Nov 23 15:01:01 crc kubenswrapper[5050]: W1123 15:01:01.992095 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode3fbc320_b4b0_47f2_9fc6_a6efc63a3ee9.slice/crio-eb57e605f9b80957edce5518a5e62e697e480f0cb1c6f297b41656e2d021e40c WatchSource:0}: Error finding container eb57e605f9b80957edce5518a5e62e697e480f0cb1c6f297b41656e2d021e40c: Status 404 returned error can't find the container with id eb57e605f9b80957edce5518a5e62e697e480f0cb1c6f297b41656e2d021e40c Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.998753 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" event={"ID":"57d4c7fd-cbe6-41c6-868f-de274707cd2d","Type":"ContainerStarted","Data":"bb05ef0c741a1856d9cdb8e1e79fe69e2d55bdc55693ac1c61f9d56c1a906cdd"} Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.999031 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" podUID="57d4c7fd-cbe6-41c6-868f-de274707cd2d" containerName="dnsmasq-dns" containerID="cri-o://bb05ef0c741a1856d9cdb8e1e79fe69e2d55bdc55693ac1c61f9d56c1a906cdd" gracePeriod=10 Nov 23 15:01:01 crc kubenswrapper[5050]: I1123 15:01:01.999101 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" Nov 23 15:01:02 crc kubenswrapper[5050]: I1123 15:01:02.022029 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-82ms7" event={"ID":"93d8639a-cdc9-4e9f-a17f-883debe12333","Type":"ContainerStarted","Data":"93834389fba7affdd694cfab3a6d64c84af6adddd324ea40539aa89d0238f84f"} Nov 23 15:01:02 crc kubenswrapper[5050]: I1123 15:01:02.023474 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-d7556d5c6-lqb4v" Nov 23 15:01:02 crc kubenswrapper[5050]: I1123 15:01:02.046398 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7cb7995f89-k8h9t" podStartSLOduration=8.046377329 podStartE2EDuration="8.046377329s" podCreationTimestamp="2025-11-23 15:00:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:01:02.0061973 +0000 UTC m=+1157.173193785" watchObservedRunningTime="2025-11-23 15:01:02.046377329 +0000 UTC m=+1157.213373814" Nov 23 15:01:02 crc kubenswrapper[5050]: I1123 15:01:02.063638 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" podStartSLOduration=11.063618679 podStartE2EDuration="11.063618679s" podCreationTimestamp="2025-11-23 15:00:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:01:02.041363989 +0000 UTC m=+1157.208360474" watchObservedRunningTime="2025-11-23 15:01:02.063618679 +0000 UTC m=+1157.230615164" Nov 23 15:01:02 crc kubenswrapper[5050]: I1123 15:01:02.072415 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-82ms7" podStartSLOduration=3.8517431479999997 podStartE2EDuration="38.072397853s" podCreationTimestamp="2025-11-23 15:00:24 +0000 UTC" firstStartedPulling="2025-11-23 15:00:25.90387373 +0000 UTC m=+1121.070870215" lastFinishedPulling="2025-11-23 15:01:00.124528435 +0000 UTC m=+1155.291524920" observedRunningTime="2025-11-23 15:01:02.059993148 +0000 UTC m=+1157.226989633" watchObservedRunningTime="2025-11-23 15:01:02.072397853 +0000 UTC m=+1157.239394338" Nov 23 15:01:02 crc kubenswrapper[5050]: I1123 15:01:02.144098 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-748fb7ccf6-s9qnq"] Nov 23 15:01:02 crc kubenswrapper[5050]: I1123 15:01:02.472641 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-864fb4756c-xzxnp"] Nov 23 15:01:02 crc kubenswrapper[5050]: I1123 15:01:02.488706 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-586bfdfdd8-99vrd"] Nov 23 15:01:02 crc kubenswrapper[5050]: W1123 15:01:02.507660 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9d53c4b5_d0a1_4191_9239_8c7b6806f860.slice/crio-04599c1ee457198a051927d61d801c2c251b476f97e5c92bf4c2314b51b24b38 WatchSource:0}: Error finding container 04599c1ee457198a051927d61d801c2c251b476f97e5c92bf4c2314b51b24b38: Status 404 returned error can't find the container with id 04599c1ee457198a051927d61d801c2c251b476f97e5c92bf4c2314b51b24b38 Nov 23 15:01:02 crc kubenswrapper[5050]: W1123 15:01:02.536866 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod042c506a_b65f_4cbd_9ca7_3df0ec55ffa0.slice/crio-f28cd14e6a182d41c41c1de00a64ae8d66357f165c8c6bc2270a9f9a8b59604b WatchSource:0}: Error finding container f28cd14e6a182d41c41c1de00a64ae8d66357f165c8c6bc2270a9f9a8b59604b: Status 404 returned error can't find the container with id f28cd14e6a182d41c41c1de00a64ae8d66357f165c8c6bc2270a9f9a8b59604b Nov 23 15:01:02 crc kubenswrapper[5050]: I1123 15:01:02.629005 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-jd96g"] Nov 23 15:01:02 crc kubenswrapper[5050]: I1123 15:01:02.817054 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" Nov 23 15:01:02 crc kubenswrapper[5050]: I1123 15:01:02.929729 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-d7556d5c6-lqb4v"] Nov 23 15:01:02 crc kubenswrapper[5050]: I1123 15:01:02.949578 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-dns-svc\") pod \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\" (UID: \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\") " Nov 23 15:01:02 crc kubenswrapper[5050]: I1123 15:01:02.949677 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-config\") pod \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\" (UID: \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\") " Nov 23 15:01:02 crc kubenswrapper[5050]: I1123 15:01:02.949717 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-dns-swift-storage-0\") pod \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\" (UID: \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\") " Nov 23 15:01:02 crc kubenswrapper[5050]: I1123 15:01:02.949747 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6dmzd\" (UniqueName: \"kubernetes.io/projected/57d4c7fd-cbe6-41c6-868f-de274707cd2d-kube-api-access-6dmzd\") pod \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\" (UID: \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\") " Nov 23 15:01:02 crc kubenswrapper[5050]: I1123 15:01:02.949850 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-ovsdbserver-sb\") pod \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\" (UID: \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\") " Nov 23 15:01:02 crc kubenswrapper[5050]: I1123 15:01:02.950044 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-ovsdbserver-nb\") pod \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\" (UID: \"57d4c7fd-cbe6-41c6-868f-de274707cd2d\") " Nov 23 15:01:02 crc kubenswrapper[5050]: I1123 15:01:02.972417 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57d4c7fd-cbe6-41c6-868f-de274707cd2d-kube-api-access-6dmzd" (OuterVolumeSpecName: "kube-api-access-6dmzd") pod "57d4c7fd-cbe6-41c6-868f-de274707cd2d" (UID: "57d4c7fd-cbe6-41c6-868f-de274707cd2d"). InnerVolumeSpecName "kube-api-access-6dmzd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.005791 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "57d4c7fd-cbe6-41c6-868f-de274707cd2d" (UID: "57d4c7fd-cbe6-41c6-868f-de274707cd2d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.019457 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "57d4c7fd-cbe6-41c6-868f-de274707cd2d" (UID: "57d4c7fd-cbe6-41c6-868f-de274707cd2d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.032547 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-d7556d5c6-lqb4v" event={"ID":"0123488a-af6b-42f3-a828-58b8ee0f4639","Type":"ContainerStarted","Data":"98c91cc4d876e763221b031aa7dd6b2f1bf0f4433a26a0d633d6a8ad901a7136"} Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.033913 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" event={"ID":"fa26df5c-4d96-49d9-8e38-434ca7b03a84","Type":"ContainerStarted","Data":"582d8b167761e7c3319f44f3d1b918c96b5740a1bc1caed5393c047af2063b3c"} Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.042064 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-748fb7ccf6-s9qnq" event={"ID":"add2a2e2-5553-4e43-8ddd-b8719949d531","Type":"ContainerStarted","Data":"e90aeb6bca5150b94a5a513a2cc4e2b3291efa5c98df658f3c65b5db5139a3c0"} Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.044013 5050 generic.go:334] "Generic (PLEG): container finished" podID="57d4c7fd-cbe6-41c6-868f-de274707cd2d" containerID="bb05ef0c741a1856d9cdb8e1e79fe69e2d55bdc55693ac1c61f9d56c1a906cdd" exitCode=0 Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.044082 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" event={"ID":"57d4c7fd-cbe6-41c6-868f-de274707cd2d","Type":"ContainerDied","Data":"bb05ef0c741a1856d9cdb8e1e79fe69e2d55bdc55693ac1c61f9d56c1a906cdd"} Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.044104 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" event={"ID":"57d4c7fd-cbe6-41c6-868f-de274707cd2d","Type":"ContainerDied","Data":"0b0b1173994ba7c92a71d968fcccd41575744712d76743f8c76f9f27d38bce27"} Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.044123 5050 scope.go:117] "RemoveContainer" containerID="bb05ef0c741a1856d9cdb8e1e79fe69e2d55bdc55693ac1c61f9d56c1a906cdd" Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.044323 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.047123 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-config" (OuterVolumeSpecName: "config") pod "57d4c7fd-cbe6-41c6-868f-de274707cd2d" (UID: "57d4c7fd-cbe6-41c6-868f-de274707cd2d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.048821 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-864fb4756c-xzxnp" event={"ID":"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0","Type":"ContainerStarted","Data":"f28cd14e6a182d41c41c1de00a64ae8d66357f165c8c6bc2270a9f9a8b59604b"} Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.052207 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "57d4c7fd-cbe6-41c6-868f-de274707cd2d" (UID: "57d4c7fd-cbe6-41c6-868f-de274707cd2d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.053764 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.053783 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-config\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.053794 5050 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.053807 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6dmzd\" (UniqueName: \"kubernetes.io/projected/57d4c7fd-cbe6-41c6-868f-de274707cd2d-kube-api-access-6dmzd\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.053816 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.056529 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" event={"ID":"9d53c4b5-d0a1-4191-9239-8c7b6806f860","Type":"ContainerStarted","Data":"04599c1ee457198a051927d61d801c2c251b476f97e5c92bf4c2314b51b24b38"} Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.074335 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "57d4c7fd-cbe6-41c6-868f-de274707cd2d" (UID: "57d4c7fd-cbe6-41c6-868f-de274707cd2d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.074415 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-ffbf89f58-t9kvk" event={"ID":"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9","Type":"ContainerStarted","Data":"510cddb97927101d8a412932a0b83e3c82d63904833e599f58c30bcc8dd0541d"} Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.074473 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-ffbf89f58-t9kvk" event={"ID":"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9","Type":"ContainerStarted","Data":"eb57e605f9b80957edce5518a5e62e697e480f0cb1c6f297b41656e2d021e40c"} Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.074791 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.090098 5050 scope.go:117] "RemoveContainer" containerID="fba6eff7da7c8f9e02497e5406a09a149bf0183ad444a9f1ab67be209f5ce877" Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.097143 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-ffbf89f58-t9kvk" podStartSLOduration=3.097121071 podStartE2EDuration="3.097121071s" podCreationTimestamp="2025-11-23 15:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:01:03.096582966 +0000 UTC m=+1158.263579451" watchObservedRunningTime="2025-11-23 15:01:03.097121071 +0000 UTC m=+1158.264117556" Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.155796 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/57d4c7fd-cbe6-41c6-868f-de274707cd2d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.204035 5050 scope.go:117] "RemoveContainer" containerID="bb05ef0c741a1856d9cdb8e1e79fe69e2d55bdc55693ac1c61f9d56c1a906cdd" Nov 23 15:01:03 crc kubenswrapper[5050]: E1123 15:01:03.205125 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb05ef0c741a1856d9cdb8e1e79fe69e2d55bdc55693ac1c61f9d56c1a906cdd\": container with ID starting with bb05ef0c741a1856d9cdb8e1e79fe69e2d55bdc55693ac1c61f9d56c1a906cdd not found: ID does not exist" containerID="bb05ef0c741a1856d9cdb8e1e79fe69e2d55bdc55693ac1c61f9d56c1a906cdd" Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.205159 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb05ef0c741a1856d9cdb8e1e79fe69e2d55bdc55693ac1c61f9d56c1a906cdd"} err="failed to get container status \"bb05ef0c741a1856d9cdb8e1e79fe69e2d55bdc55693ac1c61f9d56c1a906cdd\": rpc error: code = NotFound desc = could not find container \"bb05ef0c741a1856d9cdb8e1e79fe69e2d55bdc55693ac1c61f9d56c1a906cdd\": container with ID starting with bb05ef0c741a1856d9cdb8e1e79fe69e2d55bdc55693ac1c61f9d56c1a906cdd not found: ID does not exist" Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.205185 5050 scope.go:117] "RemoveContainer" containerID="fba6eff7da7c8f9e02497e5406a09a149bf0183ad444a9f1ab67be209f5ce877" Nov 23 15:01:03 crc kubenswrapper[5050]: E1123 15:01:03.206661 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fba6eff7da7c8f9e02497e5406a09a149bf0183ad444a9f1ab67be209f5ce877\": container with ID starting with fba6eff7da7c8f9e02497e5406a09a149bf0183ad444a9f1ab67be209f5ce877 not found: ID does not exist" containerID="fba6eff7da7c8f9e02497e5406a09a149bf0183ad444a9f1ab67be209f5ce877" Nov 23 15:01:03 crc kubenswrapper[5050]: I1123 15:01:03.206688 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fba6eff7da7c8f9e02497e5406a09a149bf0183ad444a9f1ab67be209f5ce877"} err="failed to get container status \"fba6eff7da7c8f9e02497e5406a09a149bf0183ad444a9f1ab67be209f5ce877\": rpc error: code = NotFound desc = could not find container \"fba6eff7da7c8f9e02497e5406a09a149bf0183ad444a9f1ab67be209f5ce877\": container with ID starting with fba6eff7da7c8f9e02497e5406a09a149bf0183ad444a9f1ab67be209f5ce877 not found: ID does not exist" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.091794 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-748fb7ccf6-s9qnq" event={"ID":"add2a2e2-5553-4e43-8ddd-b8719949d531","Type":"ContainerStarted","Data":"ce2fe500a1bdad923f79c0d2d37ae5d7c546350b70f8148bfd61c5c68475e3f7"} Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.092592 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.092608 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-748fb7ccf6-s9qnq" event={"ID":"add2a2e2-5553-4e43-8ddd-b8719949d531","Type":"ContainerStarted","Data":"e29903a818a042f29b3e4e52f5b9a3d3f2db969791fe5adce7bb96fbe4dca897"} Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.092621 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.109698 5050 generic.go:334] "Generic (PLEG): container finished" podID="fa26df5c-4d96-49d9-8e38-434ca7b03a84" containerID="2849baeded4e502bf2e384360155c347b93dd1486d16d29497386423b90182a5" exitCode=0 Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.110262 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" event={"ID":"fa26df5c-4d96-49d9-8e38-434ca7b03a84","Type":"ContainerDied","Data":"2849baeded4e502bf2e384360155c347b93dd1486d16d29497386423b90182a5"} Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.145184 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-d7556d5c6-lqb4v" event={"ID":"0123488a-af6b-42f3-a828-58b8ee0f4639","Type":"ContainerStarted","Data":"6a21f9dddd87d645724f8021cd315b1c701c97263acf7edfe1af443b79bc46ad"} Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.145271 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-d7556d5c6-lqb4v" event={"ID":"0123488a-af6b-42f3-a828-58b8ee0f4639","Type":"ContainerStarted","Data":"4782398b4faea9b16534db3a12e7092435fec0ad49f6e1920316c1aec76cb7b4"} Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.145312 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-d7556d5c6-lqb4v" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.145419 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-d7556d5c6-lqb4v" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.159379 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-748fb7ccf6-s9qnq" podStartSLOduration=4.159355162 podStartE2EDuration="4.159355162s" podCreationTimestamp="2025-11-23 15:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:01:04.134260203 +0000 UTC m=+1159.301256688" watchObservedRunningTime="2025-11-23 15:01:04.159355162 +0000 UTC m=+1159.326351647" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.408166 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-d7556d5c6-lqb4v" podStartSLOduration=3.408138778 podStartE2EDuration="3.408138778s" podCreationTimestamp="2025-11-23 15:01:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:01:04.240971274 +0000 UTC m=+1159.407967759" watchObservedRunningTime="2025-11-23 15:01:04.408138778 +0000 UTC m=+1159.575135263" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.444383 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-77dc855c68-z488p"] Nov 23 15:01:04 crc kubenswrapper[5050]: E1123 15:01:04.445240 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57d4c7fd-cbe6-41c6-868f-de274707cd2d" containerName="dnsmasq-dns" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.445322 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="57d4c7fd-cbe6-41c6-868f-de274707cd2d" containerName="dnsmasq-dns" Nov 23 15:01:04 crc kubenswrapper[5050]: E1123 15:01:04.445429 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57d4c7fd-cbe6-41c6-868f-de274707cd2d" containerName="init" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.445526 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="57d4c7fd-cbe6-41c6-868f-de274707cd2d" containerName="init" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.445795 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="57d4c7fd-cbe6-41c6-868f-de274707cd2d" containerName="dnsmasq-dns" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.447141 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.463171 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.465016 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.493285 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-77dc855c68-z488p"] Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.534261 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f85034c8-6b79-42e1-849d-646a2ead1a93-logs\") pod \"barbican-api-77dc855c68-z488p\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.537111 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-config-data\") pod \"barbican-api-77dc855c68-z488p\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.537352 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-public-tls-certs\") pod \"barbican-api-77dc855c68-z488p\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.537498 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6v4d8\" (UniqueName: \"kubernetes.io/projected/f85034c8-6b79-42e1-849d-646a2ead1a93-kube-api-access-6v4d8\") pod \"barbican-api-77dc855c68-z488p\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.537712 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-internal-tls-certs\") pod \"barbican-api-77dc855c68-z488p\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.538012 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-combined-ca-bundle\") pod \"barbican-api-77dc855c68-z488p\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.538233 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-config-data-custom\") pod \"barbican-api-77dc855c68-z488p\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.642021 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-config-data\") pod \"barbican-api-77dc855c68-z488p\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.642132 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-public-tls-certs\") pod \"barbican-api-77dc855c68-z488p\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.642173 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6v4d8\" (UniqueName: \"kubernetes.io/projected/f85034c8-6b79-42e1-849d-646a2ead1a93-kube-api-access-6v4d8\") pod \"barbican-api-77dc855c68-z488p\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.642234 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-internal-tls-certs\") pod \"barbican-api-77dc855c68-z488p\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.642257 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-combined-ca-bundle\") pod \"barbican-api-77dc855c68-z488p\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.642274 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-config-data-custom\") pod \"barbican-api-77dc855c68-z488p\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.642426 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f85034c8-6b79-42e1-849d-646a2ead1a93-logs\") pod \"barbican-api-77dc855c68-z488p\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.643661 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f85034c8-6b79-42e1-849d-646a2ead1a93-logs\") pod \"barbican-api-77dc855c68-z488p\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.650965 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-config-data-custom\") pod \"barbican-api-77dc855c68-z488p\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.650986 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-config-data\") pod \"barbican-api-77dc855c68-z488p\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.652681 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-internal-tls-certs\") pod \"barbican-api-77dc855c68-z488p\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.653329 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-combined-ca-bundle\") pod \"barbican-api-77dc855c68-z488p\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.664068 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-public-tls-certs\") pod \"barbican-api-77dc855c68-z488p\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.664748 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6v4d8\" (UniqueName: \"kubernetes.io/projected/f85034c8-6b79-42e1-849d-646a2ead1a93-kube-api-access-6v4d8\") pod \"barbican-api-77dc855c68-z488p\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:01:04 crc kubenswrapper[5050]: I1123 15:01:04.838941 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:01:05 crc kubenswrapper[5050]: I1123 15:01:05.165679 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" event={"ID":"fa26df5c-4d96-49d9-8e38-434ca7b03a84","Type":"ContainerStarted","Data":"bd61e5d6f4fffaaacecb1155c22df9c3b1792f33900fceb21258173175465943"} Nov 23 15:01:05 crc kubenswrapper[5050]: I1123 15:01:05.166240 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" Nov 23 15:01:05 crc kubenswrapper[5050]: I1123 15:01:05.196765 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" podStartSLOduration=4.196746182 podStartE2EDuration="4.196746182s" podCreationTimestamp="2025-11-23 15:01:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:01:05.187098414 +0000 UTC m=+1160.354094899" watchObservedRunningTime="2025-11-23 15:01:05.196746182 +0000 UTC m=+1160.363742667" Nov 23 15:01:06 crc kubenswrapper[5050]: I1123 15:01:06.182344 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" event={"ID":"9d53c4b5-d0a1-4191-9239-8c7b6806f860","Type":"ContainerStarted","Data":"973cb826038668efa7e6c38aa0a38cd28308df3422e590a77aa204d04d4cf352"} Nov 23 15:01:06 crc kubenswrapper[5050]: I1123 15:01:06.335539 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-77dc855c68-z488p"] Nov 23 15:01:06 crc kubenswrapper[5050]: W1123 15:01:06.394339 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf85034c8_6b79_42e1_849d_646a2ead1a93.slice/crio-6a9518a45edf6d685eb5bb4729345f700a5d088b5886213d0be380bab0e90058 WatchSource:0}: Error finding container 6a9518a45edf6d685eb5bb4729345f700a5d088b5886213d0be380bab0e90058: Status 404 returned error can't find the container with id 6a9518a45edf6d685eb5bb4729345f700a5d088b5886213d0be380bab0e90058 Nov 23 15:01:07 crc kubenswrapper[5050]: I1123 15:01:07.194226 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-77dc855c68-z488p" event={"ID":"f85034c8-6b79-42e1-849d-646a2ead1a93","Type":"ContainerStarted","Data":"6a9518a45edf6d685eb5bb4729345f700a5d088b5886213d0be380bab0e90058"} Nov 23 15:01:08 crc kubenswrapper[5050]: I1123 15:01:08.213388 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-77dc855c68-z488p" event={"ID":"f85034c8-6b79-42e1-849d-646a2ead1a93","Type":"ContainerStarted","Data":"ec06abb2a42fb30f410f9e5ebcf42f255dbea4392fc4eb791ab3e885ee8c6c15"} Nov 23 15:01:08 crc kubenswrapper[5050]: I1123 15:01:08.220967 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" event={"ID":"9d53c4b5-d0a1-4191-9239-8c7b6806f860","Type":"ContainerStarted","Data":"561dbaa7317e13693d05c4f79f3b0d5138d3e140a5300d28891e26017d5bd81a"} Nov 23 15:01:08 crc kubenswrapper[5050]: I1123 15:01:08.224781 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-864fb4756c-xzxnp" event={"ID":"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0","Type":"ContainerStarted","Data":"4cbde859414d5230c51f3a896d2c7f2eb514382d4930ae12ebe388430ccf7b95"} Nov 23 15:01:08 crc kubenswrapper[5050]: I1123 15:01:08.252816 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" podStartSLOduration=3.993576386 podStartE2EDuration="7.25240857s" podCreationTimestamp="2025-11-23 15:01:01 +0000 UTC" firstStartedPulling="2025-11-23 15:01:02.516200568 +0000 UTC m=+1157.683197053" lastFinishedPulling="2025-11-23 15:01:05.775032752 +0000 UTC m=+1160.942029237" observedRunningTime="2025-11-23 15:01:08.239082529 +0000 UTC m=+1163.406079014" watchObservedRunningTime="2025-11-23 15:01:08.25240857 +0000 UTC m=+1163.419405065" Nov 23 15:01:09 crc kubenswrapper[5050]: I1123 15:01:09.242039 5050 generic.go:334] "Generic (PLEG): container finished" podID="93d8639a-cdc9-4e9f-a17f-883debe12333" containerID="93834389fba7affdd694cfab3a6d64c84af6adddd324ea40539aa89d0238f84f" exitCode=0 Nov 23 15:01:09 crc kubenswrapper[5050]: I1123 15:01:09.242175 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-82ms7" event={"ID":"93d8639a-cdc9-4e9f-a17f-883debe12333","Type":"ContainerDied","Data":"93834389fba7affdd694cfab3a6d64c84af6adddd324ea40539aa89d0238f84f"} Nov 23 15:01:10 crc kubenswrapper[5050]: I1123 15:01:10.861533 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-82ms7" Nov 23 15:01:10 crc kubenswrapper[5050]: I1123 15:01:10.994193 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mb2sl\" (UniqueName: \"kubernetes.io/projected/93d8639a-cdc9-4e9f-a17f-883debe12333-kube-api-access-mb2sl\") pod \"93d8639a-cdc9-4e9f-a17f-883debe12333\" (UID: \"93d8639a-cdc9-4e9f-a17f-883debe12333\") " Nov 23 15:01:10 crc kubenswrapper[5050]: I1123 15:01:10.994247 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93d8639a-cdc9-4e9f-a17f-883debe12333-scripts\") pod \"93d8639a-cdc9-4e9f-a17f-883debe12333\" (UID: \"93d8639a-cdc9-4e9f-a17f-883debe12333\") " Nov 23 15:01:10 crc kubenswrapper[5050]: I1123 15:01:10.994341 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/93d8639a-cdc9-4e9f-a17f-883debe12333-etc-machine-id\") pod \"93d8639a-cdc9-4e9f-a17f-883debe12333\" (UID: \"93d8639a-cdc9-4e9f-a17f-883debe12333\") " Nov 23 15:01:10 crc kubenswrapper[5050]: I1123 15:01:10.994366 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93d8639a-cdc9-4e9f-a17f-883debe12333-config-data\") pod \"93d8639a-cdc9-4e9f-a17f-883debe12333\" (UID: \"93d8639a-cdc9-4e9f-a17f-883debe12333\") " Nov 23 15:01:10 crc kubenswrapper[5050]: I1123 15:01:10.994430 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93d8639a-cdc9-4e9f-a17f-883debe12333-combined-ca-bundle\") pod \"93d8639a-cdc9-4e9f-a17f-883debe12333\" (UID: \"93d8639a-cdc9-4e9f-a17f-883debe12333\") " Nov 23 15:01:10 crc kubenswrapper[5050]: I1123 15:01:10.994483 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/93d8639a-cdc9-4e9f-a17f-883debe12333-db-sync-config-data\") pod \"93d8639a-cdc9-4e9f-a17f-883debe12333\" (UID: \"93d8639a-cdc9-4e9f-a17f-883debe12333\") " Nov 23 15:01:10 crc kubenswrapper[5050]: I1123 15:01:10.995676 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/93d8639a-cdc9-4e9f-a17f-883debe12333-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "93d8639a-cdc9-4e9f-a17f-883debe12333" (UID: "93d8639a-cdc9-4e9f-a17f-883debe12333"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.001659 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93d8639a-cdc9-4e9f-a17f-883debe12333-kube-api-access-mb2sl" (OuterVolumeSpecName: "kube-api-access-mb2sl") pod "93d8639a-cdc9-4e9f-a17f-883debe12333" (UID: "93d8639a-cdc9-4e9f-a17f-883debe12333"). InnerVolumeSpecName "kube-api-access-mb2sl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.001578 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93d8639a-cdc9-4e9f-a17f-883debe12333-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "93d8639a-cdc9-4e9f-a17f-883debe12333" (UID: "93d8639a-cdc9-4e9f-a17f-883debe12333"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.002187 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93d8639a-cdc9-4e9f-a17f-883debe12333-scripts" (OuterVolumeSpecName: "scripts") pod "93d8639a-cdc9-4e9f-a17f-883debe12333" (UID: "93d8639a-cdc9-4e9f-a17f-883debe12333"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.044600 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93d8639a-cdc9-4e9f-a17f-883debe12333-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "93d8639a-cdc9-4e9f-a17f-883debe12333" (UID: "93d8639a-cdc9-4e9f-a17f-883debe12333"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.058729 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93d8639a-cdc9-4e9f-a17f-883debe12333-config-data" (OuterVolumeSpecName: "config-data") pod "93d8639a-cdc9-4e9f-a17f-883debe12333" (UID: "93d8639a-cdc9-4e9f-a17f-883debe12333"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.097927 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mb2sl\" (UniqueName: \"kubernetes.io/projected/93d8639a-cdc9-4e9f-a17f-883debe12333-kube-api-access-mb2sl\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.098138 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93d8639a-cdc9-4e9f-a17f-883debe12333-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.098159 5050 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/93d8639a-cdc9-4e9f-a17f-883debe12333-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.098383 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93d8639a-cdc9-4e9f-a17f-883debe12333-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.098399 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93d8639a-cdc9-4e9f-a17f-883debe12333-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.098408 5050 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/93d8639a-cdc9-4e9f-a17f-883debe12333-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.514789 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-82ms7" event={"ID":"93d8639a-cdc9-4e9f-a17f-883debe12333","Type":"ContainerDied","Data":"fe23d5b7c8411b4828f1b618c7eaec1afd44a5d92788f3f12fd2d7721734c7f7"} Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.514854 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe23d5b7c8411b4828f1b618c7eaec1afd44a5d92788f3f12fd2d7721734c7f7" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.514954 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-82ms7" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.708092 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.781849 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-rl78g"] Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.782123 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" podUID="28a444a8-06a7-48ed-8345-f9347485ebfa" containerName="dnsmasq-dns" containerID="cri-o://47d6afd8f6513ed2566179d5d2eda050443088c0ead87dd9a060c6ce15b54b2b" gracePeriod=10 Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.850639 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-fsg9f"] Nov 23 15:01:11 crc kubenswrapper[5050]: E1123 15:01:11.851137 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93d8639a-cdc9-4e9f-a17f-883debe12333" containerName="cinder-db-sync" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.851154 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="93d8639a-cdc9-4e9f-a17f-883debe12333" containerName="cinder-db-sync" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.851337 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="93d8639a-cdc9-4e9f-a17f-883debe12333" containerName="cinder-db-sync" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.852392 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.863685 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-fsg9f"] Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.885657 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.887827 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.892335 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.892981 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.893245 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-7sr8w" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.898141 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.902485 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzxhn\" (UniqueName: \"kubernetes.io/projected/e7c35996-d4df-4886-afb0-bb77ddcce582-kube-api-access-dzxhn\") pod \"dnsmasq-dns-6578955fd5-fsg9f\" (UID: \"e7c35996-d4df-4886-afb0-bb77ddcce582\") " pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.902561 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-fsg9f\" (UID: \"e7c35996-d4df-4886-afb0-bb77ddcce582\") " pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.903456 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-dns-svc\") pod \"dnsmasq-dns-6578955fd5-fsg9f\" (UID: \"e7c35996-d4df-4886-afb0-bb77ddcce582\") " pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.903498 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-fsg9f\" (UID: \"e7c35996-d4df-4886-afb0-bb77ddcce582\") " pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.906688 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-fsg9f\" (UID: \"e7c35996-d4df-4886-afb0-bb77ddcce582\") " pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.906737 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-config\") pod \"dnsmasq-dns-6578955fd5-fsg9f\" (UID: \"e7c35996-d4df-4886-afb0-bb77ddcce582\") " pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" Nov 23 15:01:11 crc kubenswrapper[5050]: I1123 15:01:11.926069 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.020214 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-dns-svc\") pod \"dnsmasq-dns-6578955fd5-fsg9f\" (UID: \"e7c35996-d4df-4886-afb0-bb77ddcce582\") " pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.020273 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-fsg9f\" (UID: \"e7c35996-d4df-4886-afb0-bb77ddcce582\") " pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.020345 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-fsg9f\" (UID: \"e7c35996-d4df-4886-afb0-bb77ddcce582\") " pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.020402 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-config\") pod \"dnsmasq-dns-6578955fd5-fsg9f\" (UID: \"e7c35996-d4df-4886-afb0-bb77ddcce582\") " pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.020455 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/17cf7e41-00d7-40a7-b1ec-514f69143dc1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.021551 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-fsg9f\" (UID: \"e7c35996-d4df-4886-afb0-bb77ddcce582\") " pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.023407 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-dns-svc\") pod \"dnsmasq-dns-6578955fd5-fsg9f\" (UID: \"e7c35996-d4df-4886-afb0-bb77ddcce582\") " pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.023775 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-fsg9f\" (UID: \"e7c35996-d4df-4886-afb0-bb77ddcce582\") " pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.024118 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-config\") pod \"dnsmasq-dns-6578955fd5-fsg9f\" (UID: \"e7c35996-d4df-4886-afb0-bb77ddcce582\") " pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.026500 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17cf7e41-00d7-40a7-b1ec-514f69143dc1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.026651 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/17cf7e41-00d7-40a7-b1ec-514f69143dc1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.026923 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17cf7e41-00d7-40a7-b1ec-514f69143dc1-config-data\") pod \"cinder-scheduler-0\" (UID: \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.027053 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzxhn\" (UniqueName: \"kubernetes.io/projected/e7c35996-d4df-4886-afb0-bb77ddcce582-kube-api-access-dzxhn\") pod \"dnsmasq-dns-6578955fd5-fsg9f\" (UID: \"e7c35996-d4df-4886-afb0-bb77ddcce582\") " pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.027130 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-fsg9f\" (UID: \"e7c35996-d4df-4886-afb0-bb77ddcce582\") " pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.028485 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pnz7v\" (UniqueName: \"kubernetes.io/projected/17cf7e41-00d7-40a7-b1ec-514f69143dc1-kube-api-access-pnz7v\") pod \"cinder-scheduler-0\" (UID: \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.028537 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/17cf7e41-00d7-40a7-b1ec-514f69143dc1-scripts\") pod \"cinder-scheduler-0\" (UID: \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.029582 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-fsg9f\" (UID: \"e7c35996-d4df-4886-afb0-bb77ddcce582\") " pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.056698 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzxhn\" (UniqueName: \"kubernetes.io/projected/e7c35996-d4df-4886-afb0-bb77ddcce582-kube-api-access-dzxhn\") pod \"dnsmasq-dns-6578955fd5-fsg9f\" (UID: \"e7c35996-d4df-4886-afb0-bb77ddcce582\") " pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.120559 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.124021 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.131068 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.132431 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17cf7e41-00d7-40a7-b1ec-514f69143dc1-config-data\") pod \"cinder-scheduler-0\" (UID: \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.132645 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pnz7v\" (UniqueName: \"kubernetes.io/projected/17cf7e41-00d7-40a7-b1ec-514f69143dc1-kube-api-access-pnz7v\") pod \"cinder-scheduler-0\" (UID: \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.132667 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/17cf7e41-00d7-40a7-b1ec-514f69143dc1-scripts\") pod \"cinder-scheduler-0\" (UID: \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.132719 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/17cf7e41-00d7-40a7-b1ec-514f69143dc1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.132755 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17cf7e41-00d7-40a7-b1ec-514f69143dc1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.132772 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/17cf7e41-00d7-40a7-b1ec-514f69143dc1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.134699 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/17cf7e41-00d7-40a7-b1ec-514f69143dc1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.143569 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/17cf7e41-00d7-40a7-b1ec-514f69143dc1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.143710 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17cf7e41-00d7-40a7-b1ec-514f69143dc1-config-data\") pod \"cinder-scheduler-0\" (UID: \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.144029 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17cf7e41-00d7-40a7-b1ec-514f69143dc1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.151779 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/17cf7e41-00d7-40a7-b1ec-514f69143dc1-scripts\") pod \"cinder-scheduler-0\" (UID: \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.161216 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.179201 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pnz7v\" (UniqueName: \"kubernetes.io/projected/17cf7e41-00d7-40a7-b1ec-514f69143dc1-kube-api-access-pnz7v\") pod \"cinder-scheduler-0\" (UID: \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.200741 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.217028 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.238690 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/005d3430-65f9-48c5-aeed-afa7fea464cd-scripts\") pod \"cinder-api-0\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " pod="openstack/cinder-api-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.239112 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/005d3430-65f9-48c5-aeed-afa7fea464cd-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " pod="openstack/cinder-api-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.239208 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/005d3430-65f9-48c5-aeed-afa7fea464cd-etc-machine-id\") pod \"cinder-api-0\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " pod="openstack/cinder-api-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.239278 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/005d3430-65f9-48c5-aeed-afa7fea464cd-logs\") pod \"cinder-api-0\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " pod="openstack/cinder-api-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.239316 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/005d3430-65f9-48c5-aeed-afa7fea464cd-config-data\") pod \"cinder-api-0\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " pod="openstack/cinder-api-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.239397 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/005d3430-65f9-48c5-aeed-afa7fea464cd-config-data-custom\") pod \"cinder-api-0\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " pod="openstack/cinder-api-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.244650 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmppb\" (UniqueName: \"kubernetes.io/projected/005d3430-65f9-48c5-aeed-afa7fea464cd-kube-api-access-qmppb\") pod \"cinder-api-0\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " pod="openstack/cinder-api-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.347610 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/005d3430-65f9-48c5-aeed-afa7fea464cd-scripts\") pod \"cinder-api-0\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " pod="openstack/cinder-api-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.347758 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/005d3430-65f9-48c5-aeed-afa7fea464cd-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " pod="openstack/cinder-api-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.347797 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/005d3430-65f9-48c5-aeed-afa7fea464cd-etc-machine-id\") pod \"cinder-api-0\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " pod="openstack/cinder-api-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.347834 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/005d3430-65f9-48c5-aeed-afa7fea464cd-logs\") pod \"cinder-api-0\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " pod="openstack/cinder-api-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.347880 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/005d3430-65f9-48c5-aeed-afa7fea464cd-config-data\") pod \"cinder-api-0\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " pod="openstack/cinder-api-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.347920 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/005d3430-65f9-48c5-aeed-afa7fea464cd-config-data-custom\") pod \"cinder-api-0\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " pod="openstack/cinder-api-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.347961 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmppb\" (UniqueName: \"kubernetes.io/projected/005d3430-65f9-48c5-aeed-afa7fea464cd-kube-api-access-qmppb\") pod \"cinder-api-0\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " pod="openstack/cinder-api-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.348271 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/005d3430-65f9-48c5-aeed-afa7fea464cd-etc-machine-id\") pod \"cinder-api-0\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " pod="openstack/cinder-api-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.348657 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/005d3430-65f9-48c5-aeed-afa7fea464cd-logs\") pod \"cinder-api-0\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " pod="openstack/cinder-api-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.354811 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/005d3430-65f9-48c5-aeed-afa7fea464cd-scripts\") pod \"cinder-api-0\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " pod="openstack/cinder-api-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.355842 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/005d3430-65f9-48c5-aeed-afa7fea464cd-config-data-custom\") pod \"cinder-api-0\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " pod="openstack/cinder-api-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.356146 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/005d3430-65f9-48c5-aeed-afa7fea464cd-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " pod="openstack/cinder-api-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.360586 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/005d3430-65f9-48c5-aeed-afa7fea464cd-config-data\") pod \"cinder-api-0\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " pod="openstack/cinder-api-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.365546 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmppb\" (UniqueName: \"kubernetes.io/projected/005d3430-65f9-48c5-aeed-afa7fea464cd-kube-api-access-qmppb\") pod \"cinder-api-0\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " pod="openstack/cinder-api-0" Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.537116 5050 generic.go:334] "Generic (PLEG): container finished" podID="28a444a8-06a7-48ed-8345-f9347485ebfa" containerID="47d6afd8f6513ed2566179d5d2eda050443088c0ead87dd9a060c6ce15b54b2b" exitCode=0 Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.537172 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" event={"ID":"28a444a8-06a7-48ed-8345-f9347485ebfa","Type":"ContainerDied","Data":"47d6afd8f6513ed2566179d5d2eda050443088c0ead87dd9a060c6ce15b54b2b"} Nov 23 15:01:12 crc kubenswrapper[5050]: I1123 15:01:12.554696 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 23 15:01:13 crc kubenswrapper[5050]: I1123 15:01:13.696050 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-d7556d5c6-lqb4v" Nov 23 15:01:13 crc kubenswrapper[5050]: I1123 15:01:13.719062 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" Nov 23 15:01:13 crc kubenswrapper[5050]: I1123 15:01:13.791231 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-dns-svc\") pod \"28a444a8-06a7-48ed-8345-f9347485ebfa\" (UID: \"28a444a8-06a7-48ed-8345-f9347485ebfa\") " Nov 23 15:01:13 crc kubenswrapper[5050]: I1123 15:01:13.791303 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-thvfg\" (UniqueName: \"kubernetes.io/projected/28a444a8-06a7-48ed-8345-f9347485ebfa-kube-api-access-thvfg\") pod \"28a444a8-06a7-48ed-8345-f9347485ebfa\" (UID: \"28a444a8-06a7-48ed-8345-f9347485ebfa\") " Nov 23 15:01:13 crc kubenswrapper[5050]: I1123 15:01:13.791429 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-dns-swift-storage-0\") pod \"28a444a8-06a7-48ed-8345-f9347485ebfa\" (UID: \"28a444a8-06a7-48ed-8345-f9347485ebfa\") " Nov 23 15:01:13 crc kubenswrapper[5050]: I1123 15:01:13.791479 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-ovsdbserver-sb\") pod \"28a444a8-06a7-48ed-8345-f9347485ebfa\" (UID: \"28a444a8-06a7-48ed-8345-f9347485ebfa\") " Nov 23 15:01:13 crc kubenswrapper[5050]: I1123 15:01:13.791577 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-config\") pod \"28a444a8-06a7-48ed-8345-f9347485ebfa\" (UID: \"28a444a8-06a7-48ed-8345-f9347485ebfa\") " Nov 23 15:01:13 crc kubenswrapper[5050]: I1123 15:01:13.791658 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-ovsdbserver-nb\") pod \"28a444a8-06a7-48ed-8345-f9347485ebfa\" (UID: \"28a444a8-06a7-48ed-8345-f9347485ebfa\") " Nov 23 15:01:13 crc kubenswrapper[5050]: I1123 15:01:13.816986 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28a444a8-06a7-48ed-8345-f9347485ebfa-kube-api-access-thvfg" (OuterVolumeSpecName: "kube-api-access-thvfg") pod "28a444a8-06a7-48ed-8345-f9347485ebfa" (UID: "28a444a8-06a7-48ed-8345-f9347485ebfa"). InnerVolumeSpecName "kube-api-access-thvfg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:01:13 crc kubenswrapper[5050]: I1123 15:01:13.893542 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-thvfg\" (UniqueName: \"kubernetes.io/projected/28a444a8-06a7-48ed-8345-f9347485ebfa-kube-api-access-thvfg\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:13 crc kubenswrapper[5050]: I1123 15:01:13.915258 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "28a444a8-06a7-48ed-8345-f9347485ebfa" (UID: "28a444a8-06a7-48ed-8345-f9347485ebfa"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:01:13 crc kubenswrapper[5050]: I1123 15:01:13.928939 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "28a444a8-06a7-48ed-8345-f9347485ebfa" (UID: "28a444a8-06a7-48ed-8345-f9347485ebfa"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:01:13 crc kubenswrapper[5050]: I1123 15:01:13.929877 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "28a444a8-06a7-48ed-8345-f9347485ebfa" (UID: "28a444a8-06a7-48ed-8345-f9347485ebfa"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:01:13 crc kubenswrapper[5050]: I1123 15:01:13.967234 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-config" (OuterVolumeSpecName: "config") pod "28a444a8-06a7-48ed-8345-f9347485ebfa" (UID: "28a444a8-06a7-48ed-8345-f9347485ebfa"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:01:13 crc kubenswrapper[5050]: I1123 15:01:13.986040 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "28a444a8-06a7-48ed-8345-f9347485ebfa" (UID: "28a444a8-06a7-48ed-8345-f9347485ebfa"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:01:13 crc kubenswrapper[5050]: I1123 15:01:13.993944 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-d7556d5c6-lqb4v" Nov 23 15:01:13 crc kubenswrapper[5050]: I1123 15:01:13.995749 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-config\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:13 crc kubenswrapper[5050]: I1123 15:01:13.995774 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:13 crc kubenswrapper[5050]: I1123 15:01:13.995785 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:13 crc kubenswrapper[5050]: I1123 15:01:13.995794 5050 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:13 crc kubenswrapper[5050]: I1123 15:01:13.995805 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/28a444a8-06a7-48ed-8345-f9347485ebfa-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:14 crc kubenswrapper[5050]: I1123 15:01:14.344557 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 23 15:01:14 crc kubenswrapper[5050]: W1123 15:01:14.392856 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod17cf7e41_00d7_40a7_b1ec_514f69143dc1.slice/crio-0c26bc795b908166c29fe32dd881854e5832b9993f06f826a9ba3ab8388a6d3f WatchSource:0}: Error finding container 0c26bc795b908166c29fe32dd881854e5832b9993f06f826a9ba3ab8388a6d3f: Status 404 returned error can't find the container with id 0c26bc795b908166c29fe32dd881854e5832b9993f06f826a9ba3ab8388a6d3f Nov 23 15:01:14 crc kubenswrapper[5050]: I1123 15:01:14.438914 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 23 15:01:14 crc kubenswrapper[5050]: I1123 15:01:14.581550 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-fsg9f"] Nov 23 15:01:14 crc kubenswrapper[5050]: I1123 15:01:14.626168 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 23 15:01:14 crc kubenswrapper[5050]: I1123 15:01:14.640106 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-864fb4756c-xzxnp" event={"ID":"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0","Type":"ContainerStarted","Data":"085d654f5685c959e6314e3ffe048bec6afd8a89e45c228a241cbd713de9226c"} Nov 23 15:01:14 crc kubenswrapper[5050]: I1123 15:01:14.677851 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-864fb4756c-xzxnp" podStartSLOduration=10.439831295 podStartE2EDuration="13.677835128s" podCreationTimestamp="2025-11-23 15:01:01 +0000 UTC" firstStartedPulling="2025-11-23 15:01:02.538668744 +0000 UTC m=+1157.705665229" lastFinishedPulling="2025-11-23 15:01:05.776672587 +0000 UTC m=+1160.943669062" observedRunningTime="2025-11-23 15:01:14.677807397 +0000 UTC m=+1169.844803882" watchObservedRunningTime="2025-11-23 15:01:14.677835128 +0000 UTC m=+1169.844831613" Nov 23 15:01:14 crc kubenswrapper[5050]: I1123 15:01:14.688251 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-77dc855c68-z488p" event={"ID":"f85034c8-6b79-42e1-849d-646a2ead1a93","Type":"ContainerStarted","Data":"ba962ea3ca9a2e41f992ef21c4cd5215480c219a40218ff8e9e9408b4eeec97b"} Nov 23 15:01:14 crc kubenswrapper[5050]: I1123 15:01:14.689921 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:01:14 crc kubenswrapper[5050]: I1123 15:01:14.690371 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:01:14 crc kubenswrapper[5050]: I1123 15:01:14.738515 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" event={"ID":"28a444a8-06a7-48ed-8345-f9347485ebfa","Type":"ContainerDied","Data":"c0b47f1f58e85cee2fd4d1974dda4e74f5ad23dabd704f8af41eb7b29eb0d40b"} Nov 23 15:01:14 crc kubenswrapper[5050]: I1123 15:01:14.738596 5050 scope.go:117] "RemoveContainer" containerID="47d6afd8f6513ed2566179d5d2eda050443088c0ead87dd9a060c6ce15b54b2b" Nov 23 15:01:14 crc kubenswrapper[5050]: I1123 15:01:14.738798 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-rl78g" Nov 23 15:01:14 crc kubenswrapper[5050]: I1123 15:01:14.799384 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-77dc855c68-z488p" podStartSLOduration=10.799356251 podStartE2EDuration="10.799356251s" podCreationTimestamp="2025-11-23 15:01:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:01:14.737848519 +0000 UTC m=+1169.904845004" watchObservedRunningTime="2025-11-23 15:01:14.799356251 +0000 UTC m=+1169.966352746" Nov 23 15:01:14 crc kubenswrapper[5050]: I1123 15:01:14.839857 5050 scope.go:117] "RemoveContainer" containerID="2aade2d185488851e452454b8878ea31301b4c4305f66363f75542431a99143e" Nov 23 15:01:14 crc kubenswrapper[5050]: I1123 15:01:14.847303 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"940dffda-38d6-469c-8453-5acb4092ed82","Type":"ContainerStarted","Data":"f783c7444ce664052bb1dbb4b04100c95a5cafd41194f0e0bd9cc45550efb6de"} Nov 23 15:01:14 crc kubenswrapper[5050]: I1123 15:01:14.847587 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="940dffda-38d6-469c-8453-5acb4092ed82" containerName="ceilometer-central-agent" containerID="cri-o://60e10035b1ca3fb05a796d8061c3ae4fc86d25ebf065c1682c1ac895dd99e388" gracePeriod=30 Nov 23 15:01:14 crc kubenswrapper[5050]: I1123 15:01:14.847877 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 23 15:01:14 crc kubenswrapper[5050]: I1123 15:01:14.847950 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="940dffda-38d6-469c-8453-5acb4092ed82" containerName="proxy-httpd" containerID="cri-o://f783c7444ce664052bb1dbb4b04100c95a5cafd41194f0e0bd9cc45550efb6de" gracePeriod=30 Nov 23 15:01:14 crc kubenswrapper[5050]: I1123 15:01:14.847991 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="940dffda-38d6-469c-8453-5acb4092ed82" containerName="ceilometer-notification-agent" containerID="cri-o://62fe46e75027e1094cf742edff33ace7b51cd7f03786f96706f9e8112a7a8a36" gracePeriod=30 Nov 23 15:01:14 crc kubenswrapper[5050]: I1123 15:01:14.848040 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="940dffda-38d6-469c-8453-5acb4092ed82" containerName="sg-core" containerID="cri-o://2924621750b1bc5af5085e463c9c282ad0f1c14251a8c0075f4ceadef71b73a4" gracePeriod=30 Nov 23 15:01:14 crc kubenswrapper[5050]: I1123 15:01:14.869150 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"17cf7e41-00d7-40a7-b1ec-514f69143dc1","Type":"ContainerStarted","Data":"0c26bc795b908166c29fe32dd881854e5832b9993f06f826a9ba3ab8388a6d3f"} Nov 23 15:01:14 crc kubenswrapper[5050]: I1123 15:01:14.907522 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-rl78g"] Nov 23 15:01:14 crc kubenswrapper[5050]: I1123 15:01:14.949025 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-rl78g"] Nov 23 15:01:14 crc kubenswrapper[5050]: I1123 15:01:14.954995 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.864490612 podStartE2EDuration="50.954974883s" podCreationTimestamp="2025-11-23 15:00:24 +0000 UTC" firstStartedPulling="2025-11-23 15:00:25.704364326 +0000 UTC m=+1120.871360811" lastFinishedPulling="2025-11-23 15:01:13.794848597 +0000 UTC m=+1168.961845082" observedRunningTime="2025-11-23 15:01:14.928603029 +0000 UTC m=+1170.095599514" watchObservedRunningTime="2025-11-23 15:01:14.954974883 +0000 UTC m=+1170.121971368" Nov 23 15:01:15 crc kubenswrapper[5050]: E1123 15:01:15.136661 5050 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod940dffda_38d6_469c_8453_5acb4092ed82.slice/crio-2924621750b1bc5af5085e463c9c282ad0f1c14251a8c0075f4ceadef71b73a4.scope\": RecentStats: unable to find data in memory cache]" Nov 23 15:01:15 crc kubenswrapper[5050]: I1123 15:01:15.565407 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28a444a8-06a7-48ed-8345-f9347485ebfa" path="/var/lib/kubelet/pods/28a444a8-06a7-48ed-8345-f9347485ebfa/volumes" Nov 23 15:01:15 crc kubenswrapper[5050]: I1123 15:01:15.879051 5050 generic.go:334] "Generic (PLEG): container finished" podID="e7c35996-d4df-4886-afb0-bb77ddcce582" containerID="63f3f91319babce14e1ed652d7d8092e6fc2e31738c0165be8ce19658f3b5175" exitCode=0 Nov 23 15:01:15 crc kubenswrapper[5050]: I1123 15:01:15.879120 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" event={"ID":"e7c35996-d4df-4886-afb0-bb77ddcce582","Type":"ContainerDied","Data":"63f3f91319babce14e1ed652d7d8092e6fc2e31738c0165be8ce19658f3b5175"} Nov 23 15:01:15 crc kubenswrapper[5050]: I1123 15:01:15.879152 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" event={"ID":"e7c35996-d4df-4886-afb0-bb77ddcce582","Type":"ContainerStarted","Data":"b17e940bb4e5cd3e205a69c18c23bb1cbbbe240c7731622c47327906c38db1f2"} Nov 23 15:01:15 crc kubenswrapper[5050]: I1123 15:01:15.885702 5050 generic.go:334] "Generic (PLEG): container finished" podID="940dffda-38d6-469c-8453-5acb4092ed82" containerID="f783c7444ce664052bb1dbb4b04100c95a5cafd41194f0e0bd9cc45550efb6de" exitCode=0 Nov 23 15:01:15 crc kubenswrapper[5050]: I1123 15:01:15.885742 5050 generic.go:334] "Generic (PLEG): container finished" podID="940dffda-38d6-469c-8453-5acb4092ed82" containerID="2924621750b1bc5af5085e463c9c282ad0f1c14251a8c0075f4ceadef71b73a4" exitCode=2 Nov 23 15:01:15 crc kubenswrapper[5050]: I1123 15:01:15.885752 5050 generic.go:334] "Generic (PLEG): container finished" podID="940dffda-38d6-469c-8453-5acb4092ed82" containerID="60e10035b1ca3fb05a796d8061c3ae4fc86d25ebf065c1682c1ac895dd99e388" exitCode=0 Nov 23 15:01:15 crc kubenswrapper[5050]: I1123 15:01:15.885789 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"940dffda-38d6-469c-8453-5acb4092ed82","Type":"ContainerDied","Data":"f783c7444ce664052bb1dbb4b04100c95a5cafd41194f0e0bd9cc45550efb6de"} Nov 23 15:01:15 crc kubenswrapper[5050]: I1123 15:01:15.885818 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"940dffda-38d6-469c-8453-5acb4092ed82","Type":"ContainerDied","Data":"2924621750b1bc5af5085e463c9c282ad0f1c14251a8c0075f4ceadef71b73a4"} Nov 23 15:01:15 crc kubenswrapper[5050]: I1123 15:01:15.885829 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"940dffda-38d6-469c-8453-5acb4092ed82","Type":"ContainerDied","Data":"60e10035b1ca3fb05a796d8061c3ae4fc86d25ebf065c1682c1ac895dd99e388"} Nov 23 15:01:15 crc kubenswrapper[5050]: I1123 15:01:15.889178 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"005d3430-65f9-48c5-aeed-afa7fea464cd","Type":"ContainerStarted","Data":"10a59a2d5f9c0d1c7ec2c0f35d755f4ddae5f08b2af203fbb5ad059fabc1dd3f"} Nov 23 15:01:15 crc kubenswrapper[5050]: I1123 15:01:15.890045 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"005d3430-65f9-48c5-aeed-afa7fea464cd","Type":"ContainerStarted","Data":"78518298a60b5b0336a98eaa21c55f8f8e70788f4b020938a44954ebaee79b9c"} Nov 23 15:01:16 crc kubenswrapper[5050]: I1123 15:01:16.666380 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:01:16 crc kubenswrapper[5050]: I1123 15:01:16.916133 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" event={"ID":"e7c35996-d4df-4886-afb0-bb77ddcce582","Type":"ContainerStarted","Data":"2af7e607e49b66afd56010e6ff512ce5c8e5aa25061c01f470156f5bd99de973"} Nov 23 15:01:16 crc kubenswrapper[5050]: I1123 15:01:16.916515 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" Nov 23 15:01:16 crc kubenswrapper[5050]: I1123 15:01:16.929152 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"17cf7e41-00d7-40a7-b1ec-514f69143dc1","Type":"ContainerStarted","Data":"9a74fcd5cb3e71e3458c5d1c7eeef810707c6829e349ee1f44f266b37b9a2055"} Nov 23 15:01:16 crc kubenswrapper[5050]: I1123 15:01:16.929221 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"17cf7e41-00d7-40a7-b1ec-514f69143dc1","Type":"ContainerStarted","Data":"7968f6a282ac76de9ce7a682968a7c5cb97295d69dd1c15203e12d614301595c"} Nov 23 15:01:16 crc kubenswrapper[5050]: I1123 15:01:16.934646 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"005d3430-65f9-48c5-aeed-afa7fea464cd","Type":"ContainerStarted","Data":"41ef32220934050618d75d02f8e7db52366fe81c166841e1f0aff8237798f3dd"} Nov 23 15:01:16 crc kubenswrapper[5050]: I1123 15:01:16.935189 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 23 15:01:16 crc kubenswrapper[5050]: I1123 15:01:16.935222 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="005d3430-65f9-48c5-aeed-afa7fea464cd" containerName="cinder-api" containerID="cri-o://41ef32220934050618d75d02f8e7db52366fe81c166841e1f0aff8237798f3dd" gracePeriod=30 Nov 23 15:01:16 crc kubenswrapper[5050]: I1123 15:01:16.934954 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="005d3430-65f9-48c5-aeed-afa7fea464cd" containerName="cinder-api-log" containerID="cri-o://10a59a2d5f9c0d1c7ec2c0f35d755f4ddae5f08b2af203fbb5ad059fabc1dd3f" gracePeriod=30 Nov 23 15:01:16 crc kubenswrapper[5050]: I1123 15:01:16.952864 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" podStartSLOduration=5.952843683 podStartE2EDuration="5.952843683s" podCreationTimestamp="2025-11-23 15:01:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:01:16.936367694 +0000 UTC m=+1172.103364179" watchObservedRunningTime="2025-11-23 15:01:16.952843683 +0000 UTC m=+1172.119840158" Nov 23 15:01:16 crc kubenswrapper[5050]: I1123 15:01:16.972305 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.972277304 podStartE2EDuration="4.972277304s" podCreationTimestamp="2025-11-23 15:01:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:01:16.963742076 +0000 UTC m=+1172.130738571" watchObservedRunningTime="2025-11-23 15:01:16.972277304 +0000 UTC m=+1172.139273789" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.589930 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.716111 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.779748 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/005d3430-65f9-48c5-aeed-afa7fea464cd-config-data-custom\") pod \"005d3430-65f9-48c5-aeed-afa7fea464cd\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.779831 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/005d3430-65f9-48c5-aeed-afa7fea464cd-scripts\") pod \"005d3430-65f9-48c5-aeed-afa7fea464cd\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.779867 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/005d3430-65f9-48c5-aeed-afa7fea464cd-config-data\") pod \"005d3430-65f9-48c5-aeed-afa7fea464cd\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.779956 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/005d3430-65f9-48c5-aeed-afa7fea464cd-logs\") pod \"005d3430-65f9-48c5-aeed-afa7fea464cd\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.780029 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/005d3430-65f9-48c5-aeed-afa7fea464cd-etc-machine-id\") pod \"005d3430-65f9-48c5-aeed-afa7fea464cd\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.780100 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qmppb\" (UniqueName: \"kubernetes.io/projected/005d3430-65f9-48c5-aeed-afa7fea464cd-kube-api-access-qmppb\") pod \"005d3430-65f9-48c5-aeed-afa7fea464cd\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.780181 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/005d3430-65f9-48c5-aeed-afa7fea464cd-combined-ca-bundle\") pod \"005d3430-65f9-48c5-aeed-afa7fea464cd\" (UID: \"005d3430-65f9-48c5-aeed-afa7fea464cd\") " Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.780767 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/005d3430-65f9-48c5-aeed-afa7fea464cd-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "005d3430-65f9-48c5-aeed-afa7fea464cd" (UID: "005d3430-65f9-48c5-aeed-afa7fea464cd"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.781017 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/005d3430-65f9-48c5-aeed-afa7fea464cd-logs" (OuterVolumeSpecName: "logs") pod "005d3430-65f9-48c5-aeed-afa7fea464cd" (UID: "005d3430-65f9-48c5-aeed-afa7fea464cd"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.791604 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/005d3430-65f9-48c5-aeed-afa7fea464cd-scripts" (OuterVolumeSpecName: "scripts") pod "005d3430-65f9-48c5-aeed-afa7fea464cd" (UID: "005d3430-65f9-48c5-aeed-afa7fea464cd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.791660 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/005d3430-65f9-48c5-aeed-afa7fea464cd-kube-api-access-qmppb" (OuterVolumeSpecName: "kube-api-access-qmppb") pod "005d3430-65f9-48c5-aeed-afa7fea464cd" (UID: "005d3430-65f9-48c5-aeed-afa7fea464cd"). InnerVolumeSpecName "kube-api-access-qmppb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.793214 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/005d3430-65f9-48c5-aeed-afa7fea464cd-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "005d3430-65f9-48c5-aeed-afa7fea464cd" (UID: "005d3430-65f9-48c5-aeed-afa7fea464cd"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.831252 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/005d3430-65f9-48c5-aeed-afa7fea464cd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "005d3430-65f9-48c5-aeed-afa7fea464cd" (UID: "005d3430-65f9-48c5-aeed-afa7fea464cd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.861194 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/005d3430-65f9-48c5-aeed-afa7fea464cd-config-data" (OuterVolumeSpecName: "config-data") pod "005d3430-65f9-48c5-aeed-afa7fea464cd" (UID: "005d3430-65f9-48c5-aeed-afa7fea464cd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.882134 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/940dffda-38d6-469c-8453-5acb4092ed82-config-data\") pod \"940dffda-38d6-469c-8453-5acb4092ed82\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.882212 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/940dffda-38d6-469c-8453-5acb4092ed82-combined-ca-bundle\") pod \"940dffda-38d6-469c-8453-5acb4092ed82\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.882282 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/940dffda-38d6-469c-8453-5acb4092ed82-run-httpd\") pod \"940dffda-38d6-469c-8453-5acb4092ed82\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.882637 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nx4hd\" (UniqueName: \"kubernetes.io/projected/940dffda-38d6-469c-8453-5acb4092ed82-kube-api-access-nx4hd\") pod \"940dffda-38d6-469c-8453-5acb4092ed82\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.882696 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/940dffda-38d6-469c-8453-5acb4092ed82-scripts\") pod \"940dffda-38d6-469c-8453-5acb4092ed82\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.882813 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/940dffda-38d6-469c-8453-5acb4092ed82-sg-core-conf-yaml\") pod \"940dffda-38d6-469c-8453-5acb4092ed82\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.882864 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/940dffda-38d6-469c-8453-5acb4092ed82-log-httpd\") pod \"940dffda-38d6-469c-8453-5acb4092ed82\" (UID: \"940dffda-38d6-469c-8453-5acb4092ed82\") " Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.883400 5050 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/005d3430-65f9-48c5-aeed-afa7fea464cd-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.883429 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/005d3430-65f9-48c5-aeed-afa7fea464cd-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.883458 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/005d3430-65f9-48c5-aeed-afa7fea464cd-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.883472 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/005d3430-65f9-48c5-aeed-afa7fea464cd-logs\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.883488 5050 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/005d3430-65f9-48c5-aeed-afa7fea464cd-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.883500 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qmppb\" (UniqueName: \"kubernetes.io/projected/005d3430-65f9-48c5-aeed-afa7fea464cd-kube-api-access-qmppb\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.883513 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/005d3430-65f9-48c5-aeed-afa7fea464cd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.883645 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/940dffda-38d6-469c-8453-5acb4092ed82-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "940dffda-38d6-469c-8453-5acb4092ed82" (UID: "940dffda-38d6-469c-8453-5acb4092ed82"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.883794 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/940dffda-38d6-469c-8453-5acb4092ed82-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "940dffda-38d6-469c-8453-5acb4092ed82" (UID: "940dffda-38d6-469c-8453-5acb4092ed82"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.892229 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/940dffda-38d6-469c-8453-5acb4092ed82-scripts" (OuterVolumeSpecName: "scripts") pod "940dffda-38d6-469c-8453-5acb4092ed82" (UID: "940dffda-38d6-469c-8453-5acb4092ed82"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.892361 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/940dffda-38d6-469c-8453-5acb4092ed82-kube-api-access-nx4hd" (OuterVolumeSpecName: "kube-api-access-nx4hd") pod "940dffda-38d6-469c-8453-5acb4092ed82" (UID: "940dffda-38d6-469c-8453-5acb4092ed82"). InnerVolumeSpecName "kube-api-access-nx4hd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.920274 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/940dffda-38d6-469c-8453-5acb4092ed82-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "940dffda-38d6-469c-8453-5acb4092ed82" (UID: "940dffda-38d6-469c-8453-5acb4092ed82"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.947990 5050 generic.go:334] "Generic (PLEG): container finished" podID="940dffda-38d6-469c-8453-5acb4092ed82" containerID="62fe46e75027e1094cf742edff33ace7b51cd7f03786f96706f9e8112a7a8a36" exitCode=0 Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.948066 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.948097 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"940dffda-38d6-469c-8453-5acb4092ed82","Type":"ContainerDied","Data":"62fe46e75027e1094cf742edff33ace7b51cd7f03786f96706f9e8112a7a8a36"} Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.948141 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"940dffda-38d6-469c-8453-5acb4092ed82","Type":"ContainerDied","Data":"ad251e1cf70114e00b1f72f6e61734fb9a4d76fe4079c19a50d8a805e9205c40"} Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.948165 5050 scope.go:117] "RemoveContainer" containerID="f783c7444ce664052bb1dbb4b04100c95a5cafd41194f0e0bd9cc45550efb6de" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.956384 5050 generic.go:334] "Generic (PLEG): container finished" podID="005d3430-65f9-48c5-aeed-afa7fea464cd" containerID="41ef32220934050618d75d02f8e7db52366fe81c166841e1f0aff8237798f3dd" exitCode=0 Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.956593 5050 generic.go:334] "Generic (PLEG): container finished" podID="005d3430-65f9-48c5-aeed-afa7fea464cd" containerID="10a59a2d5f9c0d1c7ec2c0f35d755f4ddae5f08b2af203fbb5ad059fabc1dd3f" exitCode=143 Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.956632 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.956543 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"005d3430-65f9-48c5-aeed-afa7fea464cd","Type":"ContainerDied","Data":"41ef32220934050618d75d02f8e7db52366fe81c166841e1f0aff8237798f3dd"} Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.959770 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"005d3430-65f9-48c5-aeed-afa7fea464cd","Type":"ContainerDied","Data":"10a59a2d5f9c0d1c7ec2c0f35d755f4ddae5f08b2af203fbb5ad059fabc1dd3f"} Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.959987 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"005d3430-65f9-48c5-aeed-afa7fea464cd","Type":"ContainerDied","Data":"78518298a60b5b0336a98eaa21c55f8f8e70788f4b020938a44954ebaee79b9c"} Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.987198 5050 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/940dffda-38d6-469c-8453-5acb4092ed82-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.987233 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nx4hd\" (UniqueName: \"kubernetes.io/projected/940dffda-38d6-469c-8453-5acb4092ed82-kube-api-access-nx4hd\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.987243 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/940dffda-38d6-469c-8453-5acb4092ed82-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.987254 5050 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/940dffda-38d6-469c-8453-5acb4092ed82-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:17 crc kubenswrapper[5050]: I1123 15:01:17.987263 5050 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/940dffda-38d6-469c-8453-5acb4092ed82-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.001296 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=6.039337039 podStartE2EDuration="7.001272159s" podCreationTimestamp="2025-11-23 15:01:11 +0000 UTC" firstStartedPulling="2025-11-23 15:01:14.405930638 +0000 UTC m=+1169.572927123" lastFinishedPulling="2025-11-23 15:01:15.367865758 +0000 UTC m=+1170.534862243" observedRunningTime="2025-11-23 15:01:17.979888394 +0000 UTC m=+1173.146884879" watchObservedRunningTime="2025-11-23 15:01:18.001272159 +0000 UTC m=+1173.168268644" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.023014 5050 scope.go:117] "RemoveContainer" containerID="2924621750b1bc5af5085e463c9c282ad0f1c14251a8c0075f4ceadef71b73a4" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.029775 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/940dffda-38d6-469c-8453-5acb4092ed82-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "940dffda-38d6-469c-8453-5acb4092ed82" (UID: "940dffda-38d6-469c-8453-5acb4092ed82"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.030738 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/940dffda-38d6-469c-8453-5acb4092ed82-config-data" (OuterVolumeSpecName: "config-data") pod "940dffda-38d6-469c-8453-5acb4092ed82" (UID: "940dffda-38d6-469c-8453-5acb4092ed82"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.036915 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.048226 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.056989 5050 scope.go:117] "RemoveContainer" containerID="62fe46e75027e1094cf742edff33ace7b51cd7f03786f96706f9e8112a7a8a36" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.071667 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 23 15:01:18 crc kubenswrapper[5050]: E1123 15:01:18.072213 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="940dffda-38d6-469c-8453-5acb4092ed82" containerName="sg-core" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.072236 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="940dffda-38d6-469c-8453-5acb4092ed82" containerName="sg-core" Nov 23 15:01:18 crc kubenswrapper[5050]: E1123 15:01:18.072272 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="940dffda-38d6-469c-8453-5acb4092ed82" containerName="proxy-httpd" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.072281 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="940dffda-38d6-469c-8453-5acb4092ed82" containerName="proxy-httpd" Nov 23 15:01:18 crc kubenswrapper[5050]: E1123 15:01:18.072292 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="940dffda-38d6-469c-8453-5acb4092ed82" containerName="ceilometer-notification-agent" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.072302 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="940dffda-38d6-469c-8453-5acb4092ed82" containerName="ceilometer-notification-agent" Nov 23 15:01:18 crc kubenswrapper[5050]: E1123 15:01:18.072316 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28a444a8-06a7-48ed-8345-f9347485ebfa" containerName="dnsmasq-dns" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.072322 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="28a444a8-06a7-48ed-8345-f9347485ebfa" containerName="dnsmasq-dns" Nov 23 15:01:18 crc kubenswrapper[5050]: E1123 15:01:18.072336 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28a444a8-06a7-48ed-8345-f9347485ebfa" containerName="init" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.072343 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="28a444a8-06a7-48ed-8345-f9347485ebfa" containerName="init" Nov 23 15:01:18 crc kubenswrapper[5050]: E1123 15:01:18.072360 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="005d3430-65f9-48c5-aeed-afa7fea464cd" containerName="cinder-api-log" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.072367 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="005d3430-65f9-48c5-aeed-afa7fea464cd" containerName="cinder-api-log" Nov 23 15:01:18 crc kubenswrapper[5050]: E1123 15:01:18.072382 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="940dffda-38d6-469c-8453-5acb4092ed82" containerName="ceilometer-central-agent" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.072389 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="940dffda-38d6-469c-8453-5acb4092ed82" containerName="ceilometer-central-agent" Nov 23 15:01:18 crc kubenswrapper[5050]: E1123 15:01:18.072405 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="005d3430-65f9-48c5-aeed-afa7fea464cd" containerName="cinder-api" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.072412 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="005d3430-65f9-48c5-aeed-afa7fea464cd" containerName="cinder-api" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.072672 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="940dffda-38d6-469c-8453-5acb4092ed82" containerName="ceilometer-notification-agent" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.072688 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="940dffda-38d6-469c-8453-5acb4092ed82" containerName="sg-core" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.072694 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="005d3430-65f9-48c5-aeed-afa7fea464cd" containerName="cinder-api" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.072713 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="940dffda-38d6-469c-8453-5acb4092ed82" containerName="ceilometer-central-agent" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.072730 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="005d3430-65f9-48c5-aeed-afa7fea464cd" containerName="cinder-api-log" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.072741 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="940dffda-38d6-469c-8453-5acb4092ed82" containerName="proxy-httpd" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.072752 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="28a444a8-06a7-48ed-8345-f9347485ebfa" containerName="dnsmasq-dns" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.073820 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.080041 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.080145 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.080373 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.089637 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/940dffda-38d6-469c-8453-5acb4092ed82-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.089680 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/940dffda-38d6-469c-8453-5acb4092ed82-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.119646 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.126770 5050 scope.go:117] "RemoveContainer" containerID="60e10035b1ca3fb05a796d8061c3ae4fc86d25ebf065c1682c1ac895dd99e388" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.185045 5050 scope.go:117] "RemoveContainer" containerID="f783c7444ce664052bb1dbb4b04100c95a5cafd41194f0e0bd9cc45550efb6de" Nov 23 15:01:18 crc kubenswrapper[5050]: E1123 15:01:18.188555 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f783c7444ce664052bb1dbb4b04100c95a5cafd41194f0e0bd9cc45550efb6de\": container with ID starting with f783c7444ce664052bb1dbb4b04100c95a5cafd41194f0e0bd9cc45550efb6de not found: ID does not exist" containerID="f783c7444ce664052bb1dbb4b04100c95a5cafd41194f0e0bd9cc45550efb6de" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.188619 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f783c7444ce664052bb1dbb4b04100c95a5cafd41194f0e0bd9cc45550efb6de"} err="failed to get container status \"f783c7444ce664052bb1dbb4b04100c95a5cafd41194f0e0bd9cc45550efb6de\": rpc error: code = NotFound desc = could not find container \"f783c7444ce664052bb1dbb4b04100c95a5cafd41194f0e0bd9cc45550efb6de\": container with ID starting with f783c7444ce664052bb1dbb4b04100c95a5cafd41194f0e0bd9cc45550efb6de not found: ID does not exist" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.188650 5050 scope.go:117] "RemoveContainer" containerID="2924621750b1bc5af5085e463c9c282ad0f1c14251a8c0075f4ceadef71b73a4" Nov 23 15:01:18 crc kubenswrapper[5050]: E1123 15:01:18.190200 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2924621750b1bc5af5085e463c9c282ad0f1c14251a8c0075f4ceadef71b73a4\": container with ID starting with 2924621750b1bc5af5085e463c9c282ad0f1c14251a8c0075f4ceadef71b73a4 not found: ID does not exist" containerID="2924621750b1bc5af5085e463c9c282ad0f1c14251a8c0075f4ceadef71b73a4" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.190269 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2924621750b1bc5af5085e463c9c282ad0f1c14251a8c0075f4ceadef71b73a4"} err="failed to get container status \"2924621750b1bc5af5085e463c9c282ad0f1c14251a8c0075f4ceadef71b73a4\": rpc error: code = NotFound desc = could not find container \"2924621750b1bc5af5085e463c9c282ad0f1c14251a8c0075f4ceadef71b73a4\": container with ID starting with 2924621750b1bc5af5085e463c9c282ad0f1c14251a8c0075f4ceadef71b73a4 not found: ID does not exist" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.190324 5050 scope.go:117] "RemoveContainer" containerID="62fe46e75027e1094cf742edff33ace7b51cd7f03786f96706f9e8112a7a8a36" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.190998 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.191037 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-logs\") pod \"cinder-api-0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.191055 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-scripts\") pod \"cinder-api-0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.191078 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fr79l\" (UniqueName: \"kubernetes.io/projected/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-kube-api-access-fr79l\") pod \"cinder-api-0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.191102 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: E1123 15:01:18.191248 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"62fe46e75027e1094cf742edff33ace7b51cd7f03786f96706f9e8112a7a8a36\": container with ID starting with 62fe46e75027e1094cf742edff33ace7b51cd7f03786f96706f9e8112a7a8a36 not found: ID does not exist" containerID="62fe46e75027e1094cf742edff33ace7b51cd7f03786f96706f9e8112a7a8a36" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.191298 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62fe46e75027e1094cf742edff33ace7b51cd7f03786f96706f9e8112a7a8a36"} err="failed to get container status \"62fe46e75027e1094cf742edff33ace7b51cd7f03786f96706f9e8112a7a8a36\": rpc error: code = NotFound desc = could not find container \"62fe46e75027e1094cf742edff33ace7b51cd7f03786f96706f9e8112a7a8a36\": container with ID starting with 62fe46e75027e1094cf742edff33ace7b51cd7f03786f96706f9e8112a7a8a36 not found: ID does not exist" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.191344 5050 scope.go:117] "RemoveContainer" containerID="60e10035b1ca3fb05a796d8061c3ae4fc86d25ebf065c1682c1ac895dd99e388" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.191410 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-config-data\") pod \"cinder-api-0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.191632 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-config-data-custom\") pod \"cinder-api-0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.191820 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.191913 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-public-tls-certs\") pod \"cinder-api-0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: E1123 15:01:18.193091 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60e10035b1ca3fb05a796d8061c3ae4fc86d25ebf065c1682c1ac895dd99e388\": container with ID starting with 60e10035b1ca3fb05a796d8061c3ae4fc86d25ebf065c1682c1ac895dd99e388 not found: ID does not exist" containerID="60e10035b1ca3fb05a796d8061c3ae4fc86d25ebf065c1682c1ac895dd99e388" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.193149 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60e10035b1ca3fb05a796d8061c3ae4fc86d25ebf065c1682c1ac895dd99e388"} err="failed to get container status \"60e10035b1ca3fb05a796d8061c3ae4fc86d25ebf065c1682c1ac895dd99e388\": rpc error: code = NotFound desc = could not find container \"60e10035b1ca3fb05a796d8061c3ae4fc86d25ebf065c1682c1ac895dd99e388\": container with ID starting with 60e10035b1ca3fb05a796d8061c3ae4fc86d25ebf065c1682c1ac895dd99e388 not found: ID does not exist" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.193178 5050 scope.go:117] "RemoveContainer" containerID="41ef32220934050618d75d02f8e7db52366fe81c166841e1f0aff8237798f3dd" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.272141 5050 scope.go:117] "RemoveContainer" containerID="10a59a2d5f9c0d1c7ec2c0f35d755f4ddae5f08b2af203fbb5ad059fabc1dd3f" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.296676 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.296740 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-public-tls-certs\") pod \"cinder-api-0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.296799 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.296836 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-logs\") pod \"cinder-api-0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.296858 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-scripts\") pod \"cinder-api-0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.296884 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fr79l\" (UniqueName: \"kubernetes.io/projected/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-kube-api-access-fr79l\") pod \"cinder-api-0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.296913 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.296989 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-config-data\") pod \"cinder-api-0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.297034 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-config-data-custom\") pod \"cinder-api-0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.297632 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.298104 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-logs\") pod \"cinder-api-0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.301190 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.309820 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-public-tls-certs\") pod \"cinder-api-0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.321632 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.322288 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.325400 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-scripts\") pod \"cinder-api-0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.330647 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-config-data\") pod \"cinder-api-0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.335635 5050 scope.go:117] "RemoveContainer" containerID="41ef32220934050618d75d02f8e7db52366fe81c166841e1f0aff8237798f3dd" Nov 23 15:01:18 crc kubenswrapper[5050]: E1123 15:01:18.337145 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"41ef32220934050618d75d02f8e7db52366fe81c166841e1f0aff8237798f3dd\": container with ID starting with 41ef32220934050618d75d02f8e7db52366fe81c166841e1f0aff8237798f3dd not found: ID does not exist" containerID="41ef32220934050618d75d02f8e7db52366fe81c166841e1f0aff8237798f3dd" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.337191 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"41ef32220934050618d75d02f8e7db52366fe81c166841e1f0aff8237798f3dd"} err="failed to get container status \"41ef32220934050618d75d02f8e7db52366fe81c166841e1f0aff8237798f3dd\": rpc error: code = NotFound desc = could not find container \"41ef32220934050618d75d02f8e7db52366fe81c166841e1f0aff8237798f3dd\": container with ID starting with 41ef32220934050618d75d02f8e7db52366fe81c166841e1f0aff8237798f3dd not found: ID does not exist" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.337216 5050 scope.go:117] "RemoveContainer" containerID="10a59a2d5f9c0d1c7ec2c0f35d755f4ddae5f08b2af203fbb5ad059fabc1dd3f" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.342228 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: E1123 15:01:18.341633 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10a59a2d5f9c0d1c7ec2c0f35d755f4ddae5f08b2af203fbb5ad059fabc1dd3f\": container with ID starting with 10a59a2d5f9c0d1c7ec2c0f35d755f4ddae5f08b2af203fbb5ad059fabc1dd3f not found: ID does not exist" containerID="10a59a2d5f9c0d1c7ec2c0f35d755f4ddae5f08b2af203fbb5ad059fabc1dd3f" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.342499 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10a59a2d5f9c0d1c7ec2c0f35d755f4ddae5f08b2af203fbb5ad059fabc1dd3f"} err="failed to get container status \"10a59a2d5f9c0d1c7ec2c0f35d755f4ddae5f08b2af203fbb5ad059fabc1dd3f\": rpc error: code = NotFound desc = could not find container \"10a59a2d5f9c0d1c7ec2c0f35d755f4ddae5f08b2af203fbb5ad059fabc1dd3f\": container with ID starting with 10a59a2d5f9c0d1c7ec2c0f35d755f4ddae5f08b2af203fbb5ad059fabc1dd3f not found: ID does not exist" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.342815 5050 scope.go:117] "RemoveContainer" containerID="41ef32220934050618d75d02f8e7db52366fe81c166841e1f0aff8237798f3dd" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.346363 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.349503 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.351041 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"41ef32220934050618d75d02f8e7db52366fe81c166841e1f0aff8237798f3dd"} err="failed to get container status \"41ef32220934050618d75d02f8e7db52366fe81c166841e1f0aff8237798f3dd\": rpc error: code = NotFound desc = could not find container \"41ef32220934050618d75d02f8e7db52366fe81c166841e1f0aff8237798f3dd\": container with ID starting with 41ef32220934050618d75d02f8e7db52366fe81c166841e1f0aff8237798f3dd not found: ID does not exist" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.351232 5050 scope.go:117] "RemoveContainer" containerID="10a59a2d5f9c0d1c7ec2c0f35d755f4ddae5f08b2af203fbb5ad059fabc1dd3f" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.353874 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.354020 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.367056 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10a59a2d5f9c0d1c7ec2c0f35d755f4ddae5f08b2af203fbb5ad059fabc1dd3f"} err="failed to get container status \"10a59a2d5f9c0d1c7ec2c0f35d755f4ddae5f08b2af203fbb5ad059fabc1dd3f\": rpc error: code = NotFound desc = could not find container \"10a59a2d5f9c0d1c7ec2c0f35d755f4ddae5f08b2af203fbb5ad059fabc1dd3f\": container with ID starting with 10a59a2d5f9c0d1c7ec2c0f35d755f4ddae5f08b2af203fbb5ad059fabc1dd3f not found: ID does not exist" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.377324 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-config-data-custom\") pod \"cinder-api-0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.384940 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fr79l\" (UniqueName: \"kubernetes.io/projected/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-kube-api-access-fr79l\") pod \"cinder-api-0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.428990 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.487391 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.533930 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/153c15c7-1c66-41bd-b749-c4ffcf36aded-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " pod="openstack/ceilometer-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.534347 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/153c15c7-1c66-41bd-b749-c4ffcf36aded-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " pod="openstack/ceilometer-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.534385 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/153c15c7-1c66-41bd-b749-c4ffcf36aded-scripts\") pod \"ceilometer-0\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " pod="openstack/ceilometer-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.534434 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/153c15c7-1c66-41bd-b749-c4ffcf36aded-run-httpd\") pod \"ceilometer-0\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " pod="openstack/ceilometer-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.534557 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/153c15c7-1c66-41bd-b749-c4ffcf36aded-config-data\") pod \"ceilometer-0\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " pod="openstack/ceilometer-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.534601 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/153c15c7-1c66-41bd-b749-c4ffcf36aded-log-httpd\") pod \"ceilometer-0\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " pod="openstack/ceilometer-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.534681 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5zphb\" (UniqueName: \"kubernetes.io/projected/153c15c7-1c66-41bd-b749-c4ffcf36aded-kube-api-access-5zphb\") pod \"ceilometer-0\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " pod="openstack/ceilometer-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.645645 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/153c15c7-1c66-41bd-b749-c4ffcf36aded-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " pod="openstack/ceilometer-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.645714 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/153c15c7-1c66-41bd-b749-c4ffcf36aded-scripts\") pod \"ceilometer-0\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " pod="openstack/ceilometer-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.645772 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/153c15c7-1c66-41bd-b749-c4ffcf36aded-run-httpd\") pod \"ceilometer-0\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " pod="openstack/ceilometer-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.645909 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/153c15c7-1c66-41bd-b749-c4ffcf36aded-config-data\") pod \"ceilometer-0\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " pod="openstack/ceilometer-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.645942 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/153c15c7-1c66-41bd-b749-c4ffcf36aded-log-httpd\") pod \"ceilometer-0\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " pod="openstack/ceilometer-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.646026 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5zphb\" (UniqueName: \"kubernetes.io/projected/153c15c7-1c66-41bd-b749-c4ffcf36aded-kube-api-access-5zphb\") pod \"ceilometer-0\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " pod="openstack/ceilometer-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.646153 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/153c15c7-1c66-41bd-b749-c4ffcf36aded-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " pod="openstack/ceilometer-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.648812 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/153c15c7-1c66-41bd-b749-c4ffcf36aded-log-httpd\") pod \"ceilometer-0\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " pod="openstack/ceilometer-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.654326 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/153c15c7-1c66-41bd-b749-c4ffcf36aded-config-data\") pod \"ceilometer-0\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " pod="openstack/ceilometer-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.654600 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/153c15c7-1c66-41bd-b749-c4ffcf36aded-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " pod="openstack/ceilometer-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.654861 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/153c15c7-1c66-41bd-b749-c4ffcf36aded-run-httpd\") pod \"ceilometer-0\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " pod="openstack/ceilometer-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.660211 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/153c15c7-1c66-41bd-b749-c4ffcf36aded-scripts\") pod \"ceilometer-0\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " pod="openstack/ceilometer-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.663124 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/153c15c7-1c66-41bd-b749-c4ffcf36aded-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " pod="openstack/ceilometer-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.667364 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5zphb\" (UniqueName: \"kubernetes.io/projected/153c15c7-1c66-41bd-b749-c4ffcf36aded-kube-api-access-5zphb\") pod \"ceilometer-0\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " pod="openstack/ceilometer-0" Nov 23 15:01:18 crc kubenswrapper[5050]: I1123 15:01:18.806253 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 15:01:19 crc kubenswrapper[5050]: I1123 15:01:19.011080 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 23 15:01:19 crc kubenswrapper[5050]: I1123 15:01:19.332951 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:01:19 crc kubenswrapper[5050]: I1123 15:01:19.575774 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="005d3430-65f9-48c5-aeed-afa7fea464cd" path="/var/lib/kubelet/pods/005d3430-65f9-48c5-aeed-afa7fea464cd/volumes" Nov 23 15:01:19 crc kubenswrapper[5050]: I1123 15:01:19.576670 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="940dffda-38d6-469c-8453-5acb4092ed82" path="/var/lib/kubelet/pods/940dffda-38d6-469c-8453-5acb4092ed82/volumes" Nov 23 15:01:20 crc kubenswrapper[5050]: I1123 15:01:20.070542 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0","Type":"ContainerStarted","Data":"878b763faeb721f2171524629a6e423a6579824e5ba28a08437820030804f60f"} Nov 23 15:01:20 crc kubenswrapper[5050]: I1123 15:01:20.071092 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0","Type":"ContainerStarted","Data":"995a0dabf2d59b6a031197958997eafb205618dab2b1d1f6ccf0bf53bf2ddab8"} Nov 23 15:01:20 crc kubenswrapper[5050]: I1123 15:01:20.072732 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"153c15c7-1c66-41bd-b749-c4ffcf36aded","Type":"ContainerStarted","Data":"1104e32ee47767230a27c6054bba23d9158819355e4bdf202d0faf3993f4b90e"} Nov 23 15:01:21 crc kubenswrapper[5050]: I1123 15:01:21.085081 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"153c15c7-1c66-41bd-b749-c4ffcf36aded","Type":"ContainerStarted","Data":"f178f819f8702c2cb4266228044156442a5e47d4e9f9be48f59a3d7f1087fb03"} Nov 23 15:01:21 crc kubenswrapper[5050]: I1123 15:01:21.085583 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"153c15c7-1c66-41bd-b749-c4ffcf36aded","Type":"ContainerStarted","Data":"15f3969a8a7a909d23a8390a0a67bf1b44c8cba8d87ddc6f58c877a840e235a5"} Nov 23 15:01:21 crc kubenswrapper[5050]: I1123 15:01:21.088149 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0","Type":"ContainerStarted","Data":"08fc1f6beb7a82ec24f421d1df48b2d8f792f3248ae507344624f3d012bb743a"} Nov 23 15:01:21 crc kubenswrapper[5050]: I1123 15:01:21.088321 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 23 15:01:21 crc kubenswrapper[5050]: I1123 15:01:21.115615 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.11558516 podStartE2EDuration="3.11558516s" podCreationTimestamp="2025-11-23 15:01:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:01:21.110619912 +0000 UTC m=+1176.277616397" watchObservedRunningTime="2025-11-23 15:01:21.11558516 +0000 UTC m=+1176.282581645" Nov 23 15:01:21 crc kubenswrapper[5050]: I1123 15:01:21.226821 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:01:21 crc kubenswrapper[5050]: I1123 15:01:21.306881 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-d7556d5c6-lqb4v"] Nov 23 15:01:21 crc kubenswrapper[5050]: I1123 15:01:21.307222 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-d7556d5c6-lqb4v" podUID="0123488a-af6b-42f3-a828-58b8ee0f4639" containerName="barbican-api-log" containerID="cri-o://4782398b4faea9b16534db3a12e7092435fec0ad49f6e1920316c1aec76cb7b4" gracePeriod=30 Nov 23 15:01:21 crc kubenswrapper[5050]: I1123 15:01:21.307426 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-d7556d5c6-lqb4v" podUID="0123488a-af6b-42f3-a828-58b8ee0f4639" containerName="barbican-api" containerID="cri-o://6a21f9dddd87d645724f8021cd315b1c701c97263acf7edfe1af443b79bc46ad" gracePeriod=30 Nov 23 15:01:22 crc kubenswrapper[5050]: I1123 15:01:22.099880 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"153c15c7-1c66-41bd-b749-c4ffcf36aded","Type":"ContainerStarted","Data":"19f1b5194eedef22dced68a1910ec80c397d353e5869d0b9b6cbc5b129b09d78"} Nov 23 15:01:22 crc kubenswrapper[5050]: I1123 15:01:22.102685 5050 generic.go:334] "Generic (PLEG): container finished" podID="0123488a-af6b-42f3-a828-58b8ee0f4639" containerID="4782398b4faea9b16534db3a12e7092435fec0ad49f6e1920316c1aec76cb7b4" exitCode=143 Nov 23 15:01:22 crc kubenswrapper[5050]: I1123 15:01:22.103583 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-d7556d5c6-lqb4v" event={"ID":"0123488a-af6b-42f3-a828-58b8ee0f4639","Type":"ContainerDied","Data":"4782398b4faea9b16534db3a12e7092435fec0ad49f6e1920316c1aec76cb7b4"} Nov 23 15:01:22 crc kubenswrapper[5050]: I1123 15:01:22.202634 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" Nov 23 15:01:22 crc kubenswrapper[5050]: I1123 15:01:22.217865 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 23 15:01:22 crc kubenswrapper[5050]: I1123 15:01:22.287808 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-jd96g"] Nov 23 15:01:22 crc kubenswrapper[5050]: I1123 15:01:22.288156 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" podUID="fa26df5c-4d96-49d9-8e38-434ca7b03a84" containerName="dnsmasq-dns" containerID="cri-o://bd61e5d6f4fffaaacecb1155c22df9c3b1792f33900fceb21258173175465943" gracePeriod=10 Nov 23 15:01:22 crc kubenswrapper[5050]: I1123 15:01:22.378409 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-557bdb6d7b-clvms" Nov 23 15:01:22 crc kubenswrapper[5050]: I1123 15:01:22.576223 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 23 15:01:22 crc kubenswrapper[5050]: I1123 15:01:22.799618 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" Nov 23 15:01:22 crc kubenswrapper[5050]: I1123 15:01:22.867160 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2cgm\" (UniqueName: \"kubernetes.io/projected/fa26df5c-4d96-49d9-8e38-434ca7b03a84-kube-api-access-t2cgm\") pod \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\" (UID: \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\") " Nov 23 15:01:22 crc kubenswrapper[5050]: I1123 15:01:22.867309 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-ovsdbserver-nb\") pod \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\" (UID: \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\") " Nov 23 15:01:22 crc kubenswrapper[5050]: I1123 15:01:22.867347 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-config\") pod \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\" (UID: \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\") " Nov 23 15:01:22 crc kubenswrapper[5050]: I1123 15:01:22.867420 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-ovsdbserver-sb\") pod \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\" (UID: \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\") " Nov 23 15:01:22 crc kubenswrapper[5050]: I1123 15:01:22.867542 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-dns-swift-storage-0\") pod \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\" (UID: \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\") " Nov 23 15:01:22 crc kubenswrapper[5050]: I1123 15:01:22.867590 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-dns-svc\") pod \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\" (UID: \"fa26df5c-4d96-49d9-8e38-434ca7b03a84\") " Nov 23 15:01:22 crc kubenswrapper[5050]: I1123 15:01:22.880701 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa26df5c-4d96-49d9-8e38-434ca7b03a84-kube-api-access-t2cgm" (OuterVolumeSpecName: "kube-api-access-t2cgm") pod "fa26df5c-4d96-49d9-8e38-434ca7b03a84" (UID: "fa26df5c-4d96-49d9-8e38-434ca7b03a84"). InnerVolumeSpecName "kube-api-access-t2cgm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:01:22 crc kubenswrapper[5050]: I1123 15:01:22.954861 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "fa26df5c-4d96-49d9-8e38-434ca7b03a84" (UID: "fa26df5c-4d96-49d9-8e38-434ca7b03a84"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:01:22 crc kubenswrapper[5050]: I1123 15:01:22.955267 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "fa26df5c-4d96-49d9-8e38-434ca7b03a84" (UID: "fa26df5c-4d96-49d9-8e38-434ca7b03a84"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:01:22 crc kubenswrapper[5050]: I1123 15:01:22.955274 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "fa26df5c-4d96-49d9-8e38-434ca7b03a84" (UID: "fa26df5c-4d96-49d9-8e38-434ca7b03a84"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:01:22 crc kubenswrapper[5050]: I1123 15:01:22.962059 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "fa26df5c-4d96-49d9-8e38-434ca7b03a84" (UID: "fa26df5c-4d96-49d9-8e38-434ca7b03a84"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:01:22 crc kubenswrapper[5050]: I1123 15:01:22.970998 5050 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:22 crc kubenswrapper[5050]: I1123 15:01:22.971104 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:22 crc kubenswrapper[5050]: I1123 15:01:22.971193 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2cgm\" (UniqueName: \"kubernetes.io/projected/fa26df5c-4d96-49d9-8e38-434ca7b03a84-kube-api-access-t2cgm\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:22 crc kubenswrapper[5050]: I1123 15:01:22.971304 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:22 crc kubenswrapper[5050]: I1123 15:01:22.971385 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:22 crc kubenswrapper[5050]: I1123 15:01:22.987990 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-config" (OuterVolumeSpecName: "config") pod "fa26df5c-4d96-49d9-8e38-434ca7b03a84" (UID: "fa26df5c-4d96-49d9-8e38-434ca7b03a84"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:01:23 crc kubenswrapper[5050]: I1123 15:01:23.072971 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa26df5c-4d96-49d9-8e38-434ca7b03a84-config\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:23 crc kubenswrapper[5050]: I1123 15:01:23.117621 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"153c15c7-1c66-41bd-b749-c4ffcf36aded","Type":"ContainerStarted","Data":"d8eeba7955eccbe32932e5a02fbc4e826466b88d94a60d1ec3fbe69cac5eef3a"} Nov 23 15:01:23 crc kubenswrapper[5050]: I1123 15:01:23.119015 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 23 15:01:23 crc kubenswrapper[5050]: I1123 15:01:23.120625 5050 generic.go:334] "Generic (PLEG): container finished" podID="fa26df5c-4d96-49d9-8e38-434ca7b03a84" containerID="bd61e5d6f4fffaaacecb1155c22df9c3b1792f33900fceb21258173175465943" exitCode=0 Nov 23 15:01:23 crc kubenswrapper[5050]: I1123 15:01:23.120736 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" event={"ID":"fa26df5c-4d96-49d9-8e38-434ca7b03a84","Type":"ContainerDied","Data":"bd61e5d6f4fffaaacecb1155c22df9c3b1792f33900fceb21258173175465943"} Nov 23 15:01:23 crc kubenswrapper[5050]: I1123 15:01:23.120801 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" Nov 23 15:01:23 crc kubenswrapper[5050]: I1123 15:01:23.120827 5050 scope.go:117] "RemoveContainer" containerID="bd61e5d6f4fffaaacecb1155c22df9c3b1792f33900fceb21258173175465943" Nov 23 15:01:23 crc kubenswrapper[5050]: I1123 15:01:23.120810 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-jd96g" event={"ID":"fa26df5c-4d96-49d9-8e38-434ca7b03a84","Type":"ContainerDied","Data":"582d8b167761e7c3319f44f3d1b918c96b5740a1bc1caed5393c047af2063b3c"} Nov 23 15:01:23 crc kubenswrapper[5050]: I1123 15:01:23.153429 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.695129445 podStartE2EDuration="5.15340008s" podCreationTimestamp="2025-11-23 15:01:18 +0000 UTC" firstStartedPulling="2025-11-23 15:01:19.34723518 +0000 UTC m=+1174.514231665" lastFinishedPulling="2025-11-23 15:01:22.805505815 +0000 UTC m=+1177.972502300" observedRunningTime="2025-11-23 15:01:23.148464533 +0000 UTC m=+1178.315461038" watchObservedRunningTime="2025-11-23 15:01:23.15340008 +0000 UTC m=+1178.320396565" Nov 23 15:01:23 crc kubenswrapper[5050]: I1123 15:01:23.179521 5050 scope.go:117] "RemoveContainer" containerID="2849baeded4e502bf2e384360155c347b93dd1486d16d29497386423b90182a5" Nov 23 15:01:23 crc kubenswrapper[5050]: I1123 15:01:23.197406 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 23 15:01:23 crc kubenswrapper[5050]: I1123 15:01:23.209614 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-jd96g"] Nov 23 15:01:23 crc kubenswrapper[5050]: I1123 15:01:23.225540 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-jd96g"] Nov 23 15:01:23 crc kubenswrapper[5050]: I1123 15:01:23.239591 5050 scope.go:117] "RemoveContainer" containerID="bd61e5d6f4fffaaacecb1155c22df9c3b1792f33900fceb21258173175465943" Nov 23 15:01:23 crc kubenswrapper[5050]: E1123 15:01:23.240334 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd61e5d6f4fffaaacecb1155c22df9c3b1792f33900fceb21258173175465943\": container with ID starting with bd61e5d6f4fffaaacecb1155c22df9c3b1792f33900fceb21258173175465943 not found: ID does not exist" containerID="bd61e5d6f4fffaaacecb1155c22df9c3b1792f33900fceb21258173175465943" Nov 23 15:01:23 crc kubenswrapper[5050]: I1123 15:01:23.240388 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd61e5d6f4fffaaacecb1155c22df9c3b1792f33900fceb21258173175465943"} err="failed to get container status \"bd61e5d6f4fffaaacecb1155c22df9c3b1792f33900fceb21258173175465943\": rpc error: code = NotFound desc = could not find container \"bd61e5d6f4fffaaacecb1155c22df9c3b1792f33900fceb21258173175465943\": container with ID starting with bd61e5d6f4fffaaacecb1155c22df9c3b1792f33900fceb21258173175465943 not found: ID does not exist" Nov 23 15:01:23 crc kubenswrapper[5050]: I1123 15:01:23.240425 5050 scope.go:117] "RemoveContainer" containerID="2849baeded4e502bf2e384360155c347b93dd1486d16d29497386423b90182a5" Nov 23 15:01:23 crc kubenswrapper[5050]: E1123 15:01:23.241087 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2849baeded4e502bf2e384360155c347b93dd1486d16d29497386423b90182a5\": container with ID starting with 2849baeded4e502bf2e384360155c347b93dd1486d16d29497386423b90182a5 not found: ID does not exist" containerID="2849baeded4e502bf2e384360155c347b93dd1486d16d29497386423b90182a5" Nov 23 15:01:23 crc kubenswrapper[5050]: I1123 15:01:23.241133 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2849baeded4e502bf2e384360155c347b93dd1486d16d29497386423b90182a5"} err="failed to get container status \"2849baeded4e502bf2e384360155c347b93dd1486d16d29497386423b90182a5\": rpc error: code = NotFound desc = could not find container \"2849baeded4e502bf2e384360155c347b93dd1486d16d29497386423b90182a5\": container with ID starting with 2849baeded4e502bf2e384360155c347b93dd1486d16d29497386423b90182a5 not found: ID does not exist" Nov 23 15:01:23 crc kubenswrapper[5050]: I1123 15:01:23.561832 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa26df5c-4d96-49d9-8e38-434ca7b03a84" path="/var/lib/kubelet/pods/fa26df5c-4d96-49d9-8e38-434ca7b03a84/volumes" Nov 23 15:01:24 crc kubenswrapper[5050]: I1123 15:01:24.136669 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="17cf7e41-00d7-40a7-b1ec-514f69143dc1" containerName="cinder-scheduler" containerID="cri-o://7968f6a282ac76de9ce7a682968a7c5cb97295d69dd1c15203e12d614301595c" gracePeriod=30 Nov 23 15:01:24 crc kubenswrapper[5050]: I1123 15:01:24.138406 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="17cf7e41-00d7-40a7-b1ec-514f69143dc1" containerName="probe" containerID="cri-o://9a74fcd5cb3e71e3458c5d1c7eeef810707c6829e349ee1f44f266b37b9a2055" gracePeriod=30 Nov 23 15:01:24 crc kubenswrapper[5050]: I1123 15:01:24.554312 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-d7556d5c6-lqb4v" podUID="0123488a-af6b-42f3-a828-58b8ee0f4639" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.160:9311/healthcheck\": read tcp 10.217.0.2:42166->10.217.0.160:9311: read: connection reset by peer" Nov 23 15:01:24 crc kubenswrapper[5050]: I1123 15:01:24.555481 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-d7556d5c6-lqb4v" podUID="0123488a-af6b-42f3-a828-58b8ee0f4639" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.160:9311/healthcheck\": read tcp 10.217.0.2:42180->10.217.0.160:9311: read: connection reset by peer" Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.115608 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-d7556d5c6-lqb4v" Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.154825 5050 generic.go:334] "Generic (PLEG): container finished" podID="0123488a-af6b-42f3-a828-58b8ee0f4639" containerID="6a21f9dddd87d645724f8021cd315b1c701c97263acf7edfe1af443b79bc46ad" exitCode=0 Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.154902 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-d7556d5c6-lqb4v" event={"ID":"0123488a-af6b-42f3-a828-58b8ee0f4639","Type":"ContainerDied","Data":"6a21f9dddd87d645724f8021cd315b1c701c97263acf7edfe1af443b79bc46ad"} Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.154935 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-d7556d5c6-lqb4v" event={"ID":"0123488a-af6b-42f3-a828-58b8ee0f4639","Type":"ContainerDied","Data":"98c91cc4d876e763221b031aa7dd6b2f1bf0f4433a26a0d633d6a8ad901a7136"} Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.154954 5050 scope.go:117] "RemoveContainer" containerID="6a21f9dddd87d645724f8021cd315b1c701c97263acf7edfe1af443b79bc46ad" Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.155061 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-d7556d5c6-lqb4v" Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.166724 5050 generic.go:334] "Generic (PLEG): container finished" podID="17cf7e41-00d7-40a7-b1ec-514f69143dc1" containerID="9a74fcd5cb3e71e3458c5d1c7eeef810707c6829e349ee1f44f266b37b9a2055" exitCode=0 Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.166925 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"17cf7e41-00d7-40a7-b1ec-514f69143dc1","Type":"ContainerDied","Data":"9a74fcd5cb3e71e3458c5d1c7eeef810707c6829e349ee1f44f266b37b9a2055"} Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.186580 5050 scope.go:117] "RemoveContainer" containerID="4782398b4faea9b16534db3a12e7092435fec0ad49f6e1920316c1aec76cb7b4" Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.211846 5050 scope.go:117] "RemoveContainer" containerID="6a21f9dddd87d645724f8021cd315b1c701c97263acf7edfe1af443b79bc46ad" Nov 23 15:01:25 crc kubenswrapper[5050]: E1123 15:01:25.212242 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6a21f9dddd87d645724f8021cd315b1c701c97263acf7edfe1af443b79bc46ad\": container with ID starting with 6a21f9dddd87d645724f8021cd315b1c701c97263acf7edfe1af443b79bc46ad not found: ID does not exist" containerID="6a21f9dddd87d645724f8021cd315b1c701c97263acf7edfe1af443b79bc46ad" Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.212289 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a21f9dddd87d645724f8021cd315b1c701c97263acf7edfe1af443b79bc46ad"} err="failed to get container status \"6a21f9dddd87d645724f8021cd315b1c701c97263acf7edfe1af443b79bc46ad\": rpc error: code = NotFound desc = could not find container \"6a21f9dddd87d645724f8021cd315b1c701c97263acf7edfe1af443b79bc46ad\": container with ID starting with 6a21f9dddd87d645724f8021cd315b1c701c97263acf7edfe1af443b79bc46ad not found: ID does not exist" Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.212312 5050 scope.go:117] "RemoveContainer" containerID="4782398b4faea9b16534db3a12e7092435fec0ad49f6e1920316c1aec76cb7b4" Nov 23 15:01:25 crc kubenswrapper[5050]: E1123 15:01:25.212718 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4782398b4faea9b16534db3a12e7092435fec0ad49f6e1920316c1aec76cb7b4\": container with ID starting with 4782398b4faea9b16534db3a12e7092435fec0ad49f6e1920316c1aec76cb7b4 not found: ID does not exist" containerID="4782398b4faea9b16534db3a12e7092435fec0ad49f6e1920316c1aec76cb7b4" Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.212788 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4782398b4faea9b16534db3a12e7092435fec0ad49f6e1920316c1aec76cb7b4"} err="failed to get container status \"4782398b4faea9b16534db3a12e7092435fec0ad49f6e1920316c1aec76cb7b4\": rpc error: code = NotFound desc = could not find container \"4782398b4faea9b16534db3a12e7092435fec0ad49f6e1920316c1aec76cb7b4\": container with ID starting with 4782398b4faea9b16534db3a12e7092435fec0ad49f6e1920316c1aec76cb7b4 not found: ID does not exist" Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.220401 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0123488a-af6b-42f3-a828-58b8ee0f4639-combined-ca-bundle\") pod \"0123488a-af6b-42f3-a828-58b8ee0f4639\" (UID: \"0123488a-af6b-42f3-a828-58b8ee0f4639\") " Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.220496 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0123488a-af6b-42f3-a828-58b8ee0f4639-config-data\") pod \"0123488a-af6b-42f3-a828-58b8ee0f4639\" (UID: \"0123488a-af6b-42f3-a828-58b8ee0f4639\") " Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.220599 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dpsn6\" (UniqueName: \"kubernetes.io/projected/0123488a-af6b-42f3-a828-58b8ee0f4639-kube-api-access-dpsn6\") pod \"0123488a-af6b-42f3-a828-58b8ee0f4639\" (UID: \"0123488a-af6b-42f3-a828-58b8ee0f4639\") " Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.222378 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0123488a-af6b-42f3-a828-58b8ee0f4639-logs\") pod \"0123488a-af6b-42f3-a828-58b8ee0f4639\" (UID: \"0123488a-af6b-42f3-a828-58b8ee0f4639\") " Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.222977 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0123488a-af6b-42f3-a828-58b8ee0f4639-logs" (OuterVolumeSpecName: "logs") pod "0123488a-af6b-42f3-a828-58b8ee0f4639" (UID: "0123488a-af6b-42f3-a828-58b8ee0f4639"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.235973 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0123488a-af6b-42f3-a828-58b8ee0f4639-config-data-custom\") pod \"0123488a-af6b-42f3-a828-58b8ee0f4639\" (UID: \"0123488a-af6b-42f3-a828-58b8ee0f4639\") " Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.237725 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0123488a-af6b-42f3-a828-58b8ee0f4639-logs\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.239700 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0123488a-af6b-42f3-a828-58b8ee0f4639-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0123488a-af6b-42f3-a828-58b8ee0f4639" (UID: "0123488a-af6b-42f3-a828-58b8ee0f4639"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.240611 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0123488a-af6b-42f3-a828-58b8ee0f4639-kube-api-access-dpsn6" (OuterVolumeSpecName: "kube-api-access-dpsn6") pod "0123488a-af6b-42f3-a828-58b8ee0f4639" (UID: "0123488a-af6b-42f3-a828-58b8ee0f4639"). InnerVolumeSpecName "kube-api-access-dpsn6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.263550 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0123488a-af6b-42f3-a828-58b8ee0f4639-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0123488a-af6b-42f3-a828-58b8ee0f4639" (UID: "0123488a-af6b-42f3-a828-58b8ee0f4639"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.292775 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0123488a-af6b-42f3-a828-58b8ee0f4639-config-data" (OuterVolumeSpecName: "config-data") pod "0123488a-af6b-42f3-a828-58b8ee0f4639" (UID: "0123488a-af6b-42f3-a828-58b8ee0f4639"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.299713 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7cb7995f89-k8h9t" Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.340101 5050 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0123488a-af6b-42f3-a828-58b8ee0f4639-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.340152 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0123488a-af6b-42f3-a828-58b8ee0f4639-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.340165 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0123488a-af6b-42f3-a828-58b8ee0f4639-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.340176 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dpsn6\" (UniqueName: \"kubernetes.io/projected/0123488a-af6b-42f3-a828-58b8ee0f4639-kube-api-access-dpsn6\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.383302 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-557bdb6d7b-clvms"] Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.384018 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-557bdb6d7b-clvms" podUID="97329021-2f95-4540-bb83-c816a4f471ff" containerName="neutron-httpd" containerID="cri-o://f2861d7bc955e3ef8bd52b3ed3c192dfa3077057bcd368c4878b1882a30e8b48" gracePeriod=30 Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.384725 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-557bdb6d7b-clvms" podUID="97329021-2f95-4540-bb83-c816a4f471ff" containerName="neutron-api" containerID="cri-o://cbd55eeb58dd0470a584ce250f5cc0e0004303738408fea9238e70cbbbe00b8d" gracePeriod=30 Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.499534 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-d7556d5c6-lqb4v"] Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.509124 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-d7556d5c6-lqb4v"] Nov 23 15:01:25 crc kubenswrapper[5050]: I1123 15:01:25.565785 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0123488a-af6b-42f3-a828-58b8ee0f4639" path="/var/lib/kubelet/pods/0123488a-af6b-42f3-a828-58b8ee0f4639/volumes" Nov 23 15:01:26 crc kubenswrapper[5050]: I1123 15:01:26.181912 5050 generic.go:334] "Generic (PLEG): container finished" podID="97329021-2f95-4540-bb83-c816a4f471ff" containerID="f2861d7bc955e3ef8bd52b3ed3c192dfa3077057bcd368c4878b1882a30e8b48" exitCode=0 Nov 23 15:01:26 crc kubenswrapper[5050]: I1123 15:01:26.182016 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-557bdb6d7b-clvms" event={"ID":"97329021-2f95-4540-bb83-c816a4f471ff","Type":"ContainerDied","Data":"f2861d7bc955e3ef8bd52b3ed3c192dfa3077057bcd368c4878b1882a30e8b48"} Nov 23 15:01:27 crc kubenswrapper[5050]: I1123 15:01:27.200326 5050 generic.go:334] "Generic (PLEG): container finished" podID="97329021-2f95-4540-bb83-c816a4f471ff" containerID="cbd55eeb58dd0470a584ce250f5cc0e0004303738408fea9238e70cbbbe00b8d" exitCode=0 Nov 23 15:01:27 crc kubenswrapper[5050]: I1123 15:01:27.200402 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-557bdb6d7b-clvms" event={"ID":"97329021-2f95-4540-bb83-c816a4f471ff","Type":"ContainerDied","Data":"cbd55eeb58dd0470a584ce250f5cc0e0004303738408fea9238e70cbbbe00b8d"} Nov 23 15:01:27 crc kubenswrapper[5050]: I1123 15:01:27.457168 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-557bdb6d7b-clvms" Nov 23 15:01:27 crc kubenswrapper[5050]: I1123 15:01:27.587698 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-92cjn\" (UniqueName: \"kubernetes.io/projected/97329021-2f95-4540-bb83-c816a4f471ff-kube-api-access-92cjn\") pod \"97329021-2f95-4540-bb83-c816a4f471ff\" (UID: \"97329021-2f95-4540-bb83-c816a4f471ff\") " Nov 23 15:01:27 crc kubenswrapper[5050]: I1123 15:01:27.587764 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/97329021-2f95-4540-bb83-c816a4f471ff-config\") pod \"97329021-2f95-4540-bb83-c816a4f471ff\" (UID: \"97329021-2f95-4540-bb83-c816a4f471ff\") " Nov 23 15:01:27 crc kubenswrapper[5050]: I1123 15:01:27.587850 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/97329021-2f95-4540-bb83-c816a4f471ff-httpd-config\") pod \"97329021-2f95-4540-bb83-c816a4f471ff\" (UID: \"97329021-2f95-4540-bb83-c816a4f471ff\") " Nov 23 15:01:27 crc kubenswrapper[5050]: I1123 15:01:27.587900 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/97329021-2f95-4540-bb83-c816a4f471ff-ovndb-tls-certs\") pod \"97329021-2f95-4540-bb83-c816a4f471ff\" (UID: \"97329021-2f95-4540-bb83-c816a4f471ff\") " Nov 23 15:01:27 crc kubenswrapper[5050]: I1123 15:01:27.587929 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97329021-2f95-4540-bb83-c816a4f471ff-combined-ca-bundle\") pod \"97329021-2f95-4540-bb83-c816a4f471ff\" (UID: \"97329021-2f95-4540-bb83-c816a4f471ff\") " Nov 23 15:01:27 crc kubenswrapper[5050]: I1123 15:01:27.597845 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97329021-2f95-4540-bb83-c816a4f471ff-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "97329021-2f95-4540-bb83-c816a4f471ff" (UID: "97329021-2f95-4540-bb83-c816a4f471ff"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:27 crc kubenswrapper[5050]: I1123 15:01:27.598738 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97329021-2f95-4540-bb83-c816a4f471ff-kube-api-access-92cjn" (OuterVolumeSpecName: "kube-api-access-92cjn") pod "97329021-2f95-4540-bb83-c816a4f471ff" (UID: "97329021-2f95-4540-bb83-c816a4f471ff"). InnerVolumeSpecName "kube-api-access-92cjn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:01:27 crc kubenswrapper[5050]: I1123 15:01:27.664385 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97329021-2f95-4540-bb83-c816a4f471ff-config" (OuterVolumeSpecName: "config") pod "97329021-2f95-4540-bb83-c816a4f471ff" (UID: "97329021-2f95-4540-bb83-c816a4f471ff"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:27 crc kubenswrapper[5050]: I1123 15:01:27.669216 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97329021-2f95-4540-bb83-c816a4f471ff-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "97329021-2f95-4540-bb83-c816a4f471ff" (UID: "97329021-2f95-4540-bb83-c816a4f471ff"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:27 crc kubenswrapper[5050]: I1123 15:01:27.696534 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-92cjn\" (UniqueName: \"kubernetes.io/projected/97329021-2f95-4540-bb83-c816a4f471ff-kube-api-access-92cjn\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:27 crc kubenswrapper[5050]: I1123 15:01:27.696574 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/97329021-2f95-4540-bb83-c816a4f471ff-config\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:27 crc kubenswrapper[5050]: I1123 15:01:27.696585 5050 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/97329021-2f95-4540-bb83-c816a4f471ff-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:27 crc kubenswrapper[5050]: I1123 15:01:27.696594 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97329021-2f95-4540-bb83-c816a4f471ff-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:27 crc kubenswrapper[5050]: I1123 15:01:27.702647 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97329021-2f95-4540-bb83-c816a4f471ff-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "97329021-2f95-4540-bb83-c816a4f471ff" (UID: "97329021-2f95-4540-bb83-c816a4f471ff"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:27 crc kubenswrapper[5050]: I1123 15:01:27.799920 5050 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/97329021-2f95-4540-bb83-c816a4f471ff-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:27 crc kubenswrapper[5050]: I1123 15:01:27.906340 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.105616 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17cf7e41-00d7-40a7-b1ec-514f69143dc1-config-data\") pod \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\" (UID: \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\") " Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.105713 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17cf7e41-00d7-40a7-b1ec-514f69143dc1-combined-ca-bundle\") pod \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\" (UID: \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\") " Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.105805 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pnz7v\" (UniqueName: \"kubernetes.io/projected/17cf7e41-00d7-40a7-b1ec-514f69143dc1-kube-api-access-pnz7v\") pod \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\" (UID: \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\") " Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.105962 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/17cf7e41-00d7-40a7-b1ec-514f69143dc1-config-data-custom\") pod \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\" (UID: \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\") " Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.106050 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/17cf7e41-00d7-40a7-b1ec-514f69143dc1-scripts\") pod \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\" (UID: \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\") " Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.106079 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/17cf7e41-00d7-40a7-b1ec-514f69143dc1-etc-machine-id\") pod \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\" (UID: \"17cf7e41-00d7-40a7-b1ec-514f69143dc1\") " Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.106342 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/17cf7e41-00d7-40a7-b1ec-514f69143dc1-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "17cf7e41-00d7-40a7-b1ec-514f69143dc1" (UID: "17cf7e41-00d7-40a7-b1ec-514f69143dc1"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.107024 5050 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/17cf7e41-00d7-40a7-b1ec-514f69143dc1-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.111290 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17cf7e41-00d7-40a7-b1ec-514f69143dc1-kube-api-access-pnz7v" (OuterVolumeSpecName: "kube-api-access-pnz7v") pod "17cf7e41-00d7-40a7-b1ec-514f69143dc1" (UID: "17cf7e41-00d7-40a7-b1ec-514f69143dc1"). InnerVolumeSpecName "kube-api-access-pnz7v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.112302 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17cf7e41-00d7-40a7-b1ec-514f69143dc1-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "17cf7e41-00d7-40a7-b1ec-514f69143dc1" (UID: "17cf7e41-00d7-40a7-b1ec-514f69143dc1"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.113625 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17cf7e41-00d7-40a7-b1ec-514f69143dc1-scripts" (OuterVolumeSpecName: "scripts") pod "17cf7e41-00d7-40a7-b1ec-514f69143dc1" (UID: "17cf7e41-00d7-40a7-b1ec-514f69143dc1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.178015 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17cf7e41-00d7-40a7-b1ec-514f69143dc1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "17cf7e41-00d7-40a7-b1ec-514f69143dc1" (UID: "17cf7e41-00d7-40a7-b1ec-514f69143dc1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.210434 5050 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/17cf7e41-00d7-40a7-b1ec-514f69143dc1-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.210487 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/17cf7e41-00d7-40a7-b1ec-514f69143dc1-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.210498 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17cf7e41-00d7-40a7-b1ec-514f69143dc1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.210508 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pnz7v\" (UniqueName: \"kubernetes.io/projected/17cf7e41-00d7-40a7-b1ec-514f69143dc1-kube-api-access-pnz7v\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.223024 5050 generic.go:334] "Generic (PLEG): container finished" podID="17cf7e41-00d7-40a7-b1ec-514f69143dc1" containerID="7968f6a282ac76de9ce7a682968a7c5cb97295d69dd1c15203e12d614301595c" exitCode=0 Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.223075 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.223098 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"17cf7e41-00d7-40a7-b1ec-514f69143dc1","Type":"ContainerDied","Data":"7968f6a282ac76de9ce7a682968a7c5cb97295d69dd1c15203e12d614301595c"} Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.223168 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"17cf7e41-00d7-40a7-b1ec-514f69143dc1","Type":"ContainerDied","Data":"0c26bc795b908166c29fe32dd881854e5832b9993f06f826a9ba3ab8388a6d3f"} Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.223191 5050 scope.go:117] "RemoveContainer" containerID="9a74fcd5cb3e71e3458c5d1c7eeef810707c6829e349ee1f44f266b37b9a2055" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.229140 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-557bdb6d7b-clvms" event={"ID":"97329021-2f95-4540-bb83-c816a4f471ff","Type":"ContainerDied","Data":"f25b57c626b90edfafa28139b38a3e1769e9234306bb7404152116f754ad2822"} Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.230976 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-557bdb6d7b-clvms" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.272946 5050 scope.go:117] "RemoveContainer" containerID="7968f6a282ac76de9ce7a682968a7c5cb97295d69dd1c15203e12d614301595c" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.283056 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-557bdb6d7b-clvms"] Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.284670 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17cf7e41-00d7-40a7-b1ec-514f69143dc1-config-data" (OuterVolumeSpecName: "config-data") pod "17cf7e41-00d7-40a7-b1ec-514f69143dc1" (UID: "17cf7e41-00d7-40a7-b1ec-514f69143dc1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.294563 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-557bdb6d7b-clvms"] Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.300562 5050 scope.go:117] "RemoveContainer" containerID="9a74fcd5cb3e71e3458c5d1c7eeef810707c6829e349ee1f44f266b37b9a2055" Nov 23 15:01:28 crc kubenswrapper[5050]: E1123 15:01:28.301158 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a74fcd5cb3e71e3458c5d1c7eeef810707c6829e349ee1f44f266b37b9a2055\": container with ID starting with 9a74fcd5cb3e71e3458c5d1c7eeef810707c6829e349ee1f44f266b37b9a2055 not found: ID does not exist" containerID="9a74fcd5cb3e71e3458c5d1c7eeef810707c6829e349ee1f44f266b37b9a2055" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.301199 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a74fcd5cb3e71e3458c5d1c7eeef810707c6829e349ee1f44f266b37b9a2055"} err="failed to get container status \"9a74fcd5cb3e71e3458c5d1c7eeef810707c6829e349ee1f44f266b37b9a2055\": rpc error: code = NotFound desc = could not find container \"9a74fcd5cb3e71e3458c5d1c7eeef810707c6829e349ee1f44f266b37b9a2055\": container with ID starting with 9a74fcd5cb3e71e3458c5d1c7eeef810707c6829e349ee1f44f266b37b9a2055 not found: ID does not exist" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.301231 5050 scope.go:117] "RemoveContainer" containerID="7968f6a282ac76de9ce7a682968a7c5cb97295d69dd1c15203e12d614301595c" Nov 23 15:01:28 crc kubenswrapper[5050]: E1123 15:01:28.302385 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7968f6a282ac76de9ce7a682968a7c5cb97295d69dd1c15203e12d614301595c\": container with ID starting with 7968f6a282ac76de9ce7a682968a7c5cb97295d69dd1c15203e12d614301595c not found: ID does not exist" containerID="7968f6a282ac76de9ce7a682968a7c5cb97295d69dd1c15203e12d614301595c" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.302411 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7968f6a282ac76de9ce7a682968a7c5cb97295d69dd1c15203e12d614301595c"} err="failed to get container status \"7968f6a282ac76de9ce7a682968a7c5cb97295d69dd1c15203e12d614301595c\": rpc error: code = NotFound desc = could not find container \"7968f6a282ac76de9ce7a682968a7c5cb97295d69dd1c15203e12d614301595c\": container with ID starting with 7968f6a282ac76de9ce7a682968a7c5cb97295d69dd1c15203e12d614301595c not found: ID does not exist" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.302431 5050 scope.go:117] "RemoveContainer" containerID="f2861d7bc955e3ef8bd52b3ed3c192dfa3077057bcd368c4878b1882a30e8b48" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.312114 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17cf7e41-00d7-40a7-b1ec-514f69143dc1-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.337171 5050 scope.go:117] "RemoveContainer" containerID="cbd55eeb58dd0470a584ce250f5cc0e0004303738408fea9238e70cbbbe00b8d" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.556509 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.564253 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.592193 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 23 15:01:28 crc kubenswrapper[5050]: E1123 15:01:28.592624 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0123488a-af6b-42f3-a828-58b8ee0f4639" containerName="barbican-api-log" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.592644 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="0123488a-af6b-42f3-a828-58b8ee0f4639" containerName="barbican-api-log" Nov 23 15:01:28 crc kubenswrapper[5050]: E1123 15:01:28.592664 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97329021-2f95-4540-bb83-c816a4f471ff" containerName="neutron-httpd" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.592671 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="97329021-2f95-4540-bb83-c816a4f471ff" containerName="neutron-httpd" Nov 23 15:01:28 crc kubenswrapper[5050]: E1123 15:01:28.592685 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17cf7e41-00d7-40a7-b1ec-514f69143dc1" containerName="probe" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.592691 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="17cf7e41-00d7-40a7-b1ec-514f69143dc1" containerName="probe" Nov 23 15:01:28 crc kubenswrapper[5050]: E1123 15:01:28.592709 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0123488a-af6b-42f3-a828-58b8ee0f4639" containerName="barbican-api" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.592714 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="0123488a-af6b-42f3-a828-58b8ee0f4639" containerName="barbican-api" Nov 23 15:01:28 crc kubenswrapper[5050]: E1123 15:01:28.592724 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa26df5c-4d96-49d9-8e38-434ca7b03a84" containerName="init" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.592730 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa26df5c-4d96-49d9-8e38-434ca7b03a84" containerName="init" Nov 23 15:01:28 crc kubenswrapper[5050]: E1123 15:01:28.592739 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa26df5c-4d96-49d9-8e38-434ca7b03a84" containerName="dnsmasq-dns" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.592745 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa26df5c-4d96-49d9-8e38-434ca7b03a84" containerName="dnsmasq-dns" Nov 23 15:01:28 crc kubenswrapper[5050]: E1123 15:01:28.592761 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97329021-2f95-4540-bb83-c816a4f471ff" containerName="neutron-api" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.592767 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="97329021-2f95-4540-bb83-c816a4f471ff" containerName="neutron-api" Nov 23 15:01:28 crc kubenswrapper[5050]: E1123 15:01:28.592778 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17cf7e41-00d7-40a7-b1ec-514f69143dc1" containerName="cinder-scheduler" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.592784 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="17cf7e41-00d7-40a7-b1ec-514f69143dc1" containerName="cinder-scheduler" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.592949 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="17cf7e41-00d7-40a7-b1ec-514f69143dc1" containerName="cinder-scheduler" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.592968 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="97329021-2f95-4540-bb83-c816a4f471ff" containerName="neutron-api" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.592981 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="0123488a-af6b-42f3-a828-58b8ee0f4639" containerName="barbican-api" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.592992 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="97329021-2f95-4540-bb83-c816a4f471ff" containerName="neutron-httpd" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.593001 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="0123488a-af6b-42f3-a828-58b8ee0f4639" containerName="barbican-api-log" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.593016 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa26df5c-4d96-49d9-8e38-434ca7b03a84" containerName="dnsmasq-dns" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.593026 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="17cf7e41-00d7-40a7-b1ec-514f69143dc1" containerName="probe" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.594216 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.597402 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.650508 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.726346 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p5sqc\" (UniqueName: \"kubernetes.io/projected/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-kube-api-access-p5sqc\") pod \"cinder-scheduler-0\" (UID: \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.726506 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-scripts\") pod \"cinder-scheduler-0\" (UID: \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.726559 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.727723 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.727780 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.727855 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-config-data\") pod \"cinder-scheduler-0\" (UID: \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.830175 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p5sqc\" (UniqueName: \"kubernetes.io/projected/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-kube-api-access-p5sqc\") pod \"cinder-scheduler-0\" (UID: \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.830295 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-scripts\") pod \"cinder-scheduler-0\" (UID: \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.830349 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.830386 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.830429 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.830533 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-config-data\") pod \"cinder-scheduler-0\" (UID: \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.831868 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.836091 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-scripts\") pod \"cinder-scheduler-0\" (UID: \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.836273 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.837244 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.842104 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-config-data\") pod \"cinder-scheduler-0\" (UID: \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.852575 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p5sqc\" (UniqueName: \"kubernetes.io/projected/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-kube-api-access-p5sqc\") pod \"cinder-scheduler-0\" (UID: \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\") " pod="openstack/cinder-scheduler-0" Nov 23 15:01:28 crc kubenswrapper[5050]: I1123 15:01:28.963253 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 23 15:01:29 crc kubenswrapper[5050]: I1123 15:01:29.284866 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 23 15:01:29 crc kubenswrapper[5050]: I1123 15:01:29.565185 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="17cf7e41-00d7-40a7-b1ec-514f69143dc1" path="/var/lib/kubelet/pods/17cf7e41-00d7-40a7-b1ec-514f69143dc1/volumes" Nov 23 15:01:29 crc kubenswrapper[5050]: I1123 15:01:29.566566 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97329021-2f95-4540-bb83-c816a4f471ff" path="/var/lib/kubelet/pods/97329021-2f95-4540-bb83-c816a4f471ff/volumes" Nov 23 15:01:30 crc kubenswrapper[5050]: I1123 15:01:30.262134 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ba4a4a57-47b0-423d-8bb1-76953fb3a37b","Type":"ContainerStarted","Data":"88de6b6d747657765b520632673420c57f5942e93770f67efd5b9461639f7cdd"} Nov 23 15:01:30 crc kubenswrapper[5050]: I1123 15:01:30.262706 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ba4a4a57-47b0-423d-8bb1-76953fb3a37b","Type":"ContainerStarted","Data":"47e88a52634118d105d529a37a802d99e06eb6cbd871b2344b30d68517e46f9c"} Nov 23 15:01:30 crc kubenswrapper[5050]: I1123 15:01:30.425084 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 23 15:01:31 crc kubenswrapper[5050]: I1123 15:01:31.277579 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ba4a4a57-47b0-423d-8bb1-76953fb3a37b","Type":"ContainerStarted","Data":"ff0e97bc91a901d22909a988b2e36c290f89a2317abfa813abba814502aa0edc"} Nov 23 15:01:31 crc kubenswrapper[5050]: I1123 15:01:31.313618 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.313594214 podStartE2EDuration="3.313594214s" podCreationTimestamp="2025-11-23 15:01:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:01:31.312767931 +0000 UTC m=+1186.479764426" watchObservedRunningTime="2025-11-23 15:01:31.313594214 +0000 UTC m=+1186.480590709" Nov 23 15:01:32 crc kubenswrapper[5050]: I1123 15:01:32.806275 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:01:32 crc kubenswrapper[5050]: I1123 15:01:32.852514 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:01:33 crc kubenswrapper[5050]: I1123 15:01:33.008634 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:01:33 crc kubenswrapper[5050]: I1123 15:01:33.458667 5050 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod57d4c7fd-cbe6-41c6-868f-de274707cd2d"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod57d4c7fd-cbe6-41c6-868f-de274707cd2d] : Timed out while waiting for systemd to remove kubepods-besteffort-pod57d4c7fd_cbe6_41c6_868f_de274707cd2d.slice" Nov 23 15:01:33 crc kubenswrapper[5050]: E1123 15:01:33.458765 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort pod57d4c7fd-cbe6-41c6-868f-de274707cd2d] : unable to destroy cgroup paths for cgroup [kubepods besteffort pod57d4c7fd-cbe6-41c6-868f-de274707cd2d] : Timed out while waiting for systemd to remove kubepods-besteffort-pod57d4c7fd_cbe6_41c6_868f_de274707cd2d.slice" pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" podUID="57d4c7fd-cbe6-41c6-868f-de274707cd2d" Nov 23 15:01:33 crc kubenswrapper[5050]: I1123 15:01:33.963802 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 23 15:01:34 crc kubenswrapper[5050]: I1123 15:01:34.307582 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-dcnfh" Nov 23 15:01:34 crc kubenswrapper[5050]: I1123 15:01:34.334529 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-dcnfh"] Nov 23 15:01:34 crc kubenswrapper[5050]: I1123 15:01:34.347810 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-dcnfh"] Nov 23 15:01:35 crc kubenswrapper[5050]: I1123 15:01:35.563162 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57d4c7fd-cbe6-41c6-868f-de274707cd2d" path="/var/lib/kubelet/pods/57d4c7fd-cbe6-41c6-868f-de274707cd2d/volumes" Nov 23 15:01:35 crc kubenswrapper[5050]: I1123 15:01:35.996961 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-6b854f87-drlhc"] Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.001800 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.004478 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.009720 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.010000 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.015432 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6b854f87-drlhc"] Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.107435 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1187c9d-5557-496e-be1d-8df301d6daa6-combined-ca-bundle\") pod \"swift-proxy-6b854f87-drlhc\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.107520 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1187c9d-5557-496e-be1d-8df301d6daa6-log-httpd\") pod \"swift-proxy-6b854f87-drlhc\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.107586 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wx6m8\" (UniqueName: \"kubernetes.io/projected/c1187c9d-5557-496e-be1d-8df301d6daa6-kube-api-access-wx6m8\") pod \"swift-proxy-6b854f87-drlhc\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.107640 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1187c9d-5557-496e-be1d-8df301d6daa6-public-tls-certs\") pod \"swift-proxy-6b854f87-drlhc\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.107668 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c1187c9d-5557-496e-be1d-8df301d6daa6-etc-swift\") pod \"swift-proxy-6b854f87-drlhc\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.107699 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1187c9d-5557-496e-be1d-8df301d6daa6-internal-tls-certs\") pod \"swift-proxy-6b854f87-drlhc\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.107882 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1187c9d-5557-496e-be1d-8df301d6daa6-run-httpd\") pod \"swift-proxy-6b854f87-drlhc\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.107942 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1187c9d-5557-496e-be1d-8df301d6daa6-config-data\") pod \"swift-proxy-6b854f87-drlhc\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.209477 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1187c9d-5557-496e-be1d-8df301d6daa6-run-httpd\") pod \"swift-proxy-6b854f87-drlhc\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.209535 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1187c9d-5557-496e-be1d-8df301d6daa6-config-data\") pod \"swift-proxy-6b854f87-drlhc\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.209560 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1187c9d-5557-496e-be1d-8df301d6daa6-combined-ca-bundle\") pod \"swift-proxy-6b854f87-drlhc\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.209598 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1187c9d-5557-496e-be1d-8df301d6daa6-log-httpd\") pod \"swift-proxy-6b854f87-drlhc\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.209651 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wx6m8\" (UniqueName: \"kubernetes.io/projected/c1187c9d-5557-496e-be1d-8df301d6daa6-kube-api-access-wx6m8\") pod \"swift-proxy-6b854f87-drlhc\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.209698 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1187c9d-5557-496e-be1d-8df301d6daa6-public-tls-certs\") pod \"swift-proxy-6b854f87-drlhc\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.209726 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c1187c9d-5557-496e-be1d-8df301d6daa6-etc-swift\") pod \"swift-proxy-6b854f87-drlhc\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.209750 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1187c9d-5557-496e-be1d-8df301d6daa6-internal-tls-certs\") pod \"swift-proxy-6b854f87-drlhc\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.210041 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1187c9d-5557-496e-be1d-8df301d6daa6-run-httpd\") pod \"swift-proxy-6b854f87-drlhc\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.210711 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1187c9d-5557-496e-be1d-8df301d6daa6-log-httpd\") pod \"swift-proxy-6b854f87-drlhc\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.217468 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1187c9d-5557-496e-be1d-8df301d6daa6-public-tls-certs\") pod \"swift-proxy-6b854f87-drlhc\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.217643 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c1187c9d-5557-496e-be1d-8df301d6daa6-etc-swift\") pod \"swift-proxy-6b854f87-drlhc\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.224499 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1187c9d-5557-496e-be1d-8df301d6daa6-internal-tls-certs\") pod \"swift-proxy-6b854f87-drlhc\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.224785 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1187c9d-5557-496e-be1d-8df301d6daa6-combined-ca-bundle\") pod \"swift-proxy-6b854f87-drlhc\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.224798 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1187c9d-5557-496e-be1d-8df301d6daa6-config-data\") pod \"swift-proxy-6b854f87-drlhc\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.234701 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wx6m8\" (UniqueName: \"kubernetes.io/projected/c1187c9d-5557-496e-be1d-8df301d6daa6-kube-api-access-wx6m8\") pod \"swift-proxy-6b854f87-drlhc\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.321298 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:36 crc kubenswrapper[5050]: I1123 15:01:36.943356 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6b854f87-drlhc"] Nov 23 15:01:37 crc kubenswrapper[5050]: I1123 15:01:37.339968 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6b854f87-drlhc" event={"ID":"c1187c9d-5557-496e-be1d-8df301d6daa6","Type":"ContainerStarted","Data":"15153a6ddd26bb52ffe675fc4511dfe431c750717734e0d53c6df64c01657ed6"} Nov 23 15:01:37 crc kubenswrapper[5050]: I1123 15:01:37.340034 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6b854f87-drlhc" event={"ID":"c1187c9d-5557-496e-be1d-8df301d6daa6","Type":"ContainerStarted","Data":"b1c52e489d41dc167d934414599ef28dff555b6b9b1022ec9897bbd738edc1ae"} Nov 23 15:01:37 crc kubenswrapper[5050]: I1123 15:01:37.405657 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 23 15:01:37 crc kubenswrapper[5050]: I1123 15:01:37.407589 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 23 15:01:37 crc kubenswrapper[5050]: I1123 15:01:37.410163 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 23 15:01:37 crc kubenswrapper[5050]: I1123 15:01:37.410415 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-2lghk" Nov 23 15:01:37 crc kubenswrapper[5050]: I1123 15:01:37.421480 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 23 15:01:37 crc kubenswrapper[5050]: I1123 15:01:37.429887 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 23 15:01:37 crc kubenswrapper[5050]: I1123 15:01:37.552676 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/ecb3607d-200e-40b2-815f-cffc65cb6d74-openstack-config-secret\") pod \"openstackclient\" (UID: \"ecb3607d-200e-40b2-815f-cffc65cb6d74\") " pod="openstack/openstackclient" Nov 23 15:01:37 crc kubenswrapper[5050]: I1123 15:01:37.553431 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecb3607d-200e-40b2-815f-cffc65cb6d74-combined-ca-bundle\") pod \"openstackclient\" (UID: \"ecb3607d-200e-40b2-815f-cffc65cb6d74\") " pod="openstack/openstackclient" Nov 23 15:01:37 crc kubenswrapper[5050]: I1123 15:01:37.553557 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4pmkl\" (UniqueName: \"kubernetes.io/projected/ecb3607d-200e-40b2-815f-cffc65cb6d74-kube-api-access-4pmkl\") pod \"openstackclient\" (UID: \"ecb3607d-200e-40b2-815f-cffc65cb6d74\") " pod="openstack/openstackclient" Nov 23 15:01:37 crc kubenswrapper[5050]: I1123 15:01:37.553676 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/ecb3607d-200e-40b2-815f-cffc65cb6d74-openstack-config\") pod \"openstackclient\" (UID: \"ecb3607d-200e-40b2-815f-cffc65cb6d74\") " pod="openstack/openstackclient" Nov 23 15:01:37 crc kubenswrapper[5050]: I1123 15:01:37.655679 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/ecb3607d-200e-40b2-815f-cffc65cb6d74-openstack-config-secret\") pod \"openstackclient\" (UID: \"ecb3607d-200e-40b2-815f-cffc65cb6d74\") " pod="openstack/openstackclient" Nov 23 15:01:37 crc kubenswrapper[5050]: I1123 15:01:37.655746 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecb3607d-200e-40b2-815f-cffc65cb6d74-combined-ca-bundle\") pod \"openstackclient\" (UID: \"ecb3607d-200e-40b2-815f-cffc65cb6d74\") " pod="openstack/openstackclient" Nov 23 15:01:37 crc kubenswrapper[5050]: I1123 15:01:37.655787 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4pmkl\" (UniqueName: \"kubernetes.io/projected/ecb3607d-200e-40b2-815f-cffc65cb6d74-kube-api-access-4pmkl\") pod \"openstackclient\" (UID: \"ecb3607d-200e-40b2-815f-cffc65cb6d74\") " pod="openstack/openstackclient" Nov 23 15:01:37 crc kubenswrapper[5050]: I1123 15:01:37.655849 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/ecb3607d-200e-40b2-815f-cffc65cb6d74-openstack-config\") pod \"openstackclient\" (UID: \"ecb3607d-200e-40b2-815f-cffc65cb6d74\") " pod="openstack/openstackclient" Nov 23 15:01:37 crc kubenswrapper[5050]: I1123 15:01:37.657047 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/ecb3607d-200e-40b2-815f-cffc65cb6d74-openstack-config\") pod \"openstackclient\" (UID: \"ecb3607d-200e-40b2-815f-cffc65cb6d74\") " pod="openstack/openstackclient" Nov 23 15:01:37 crc kubenswrapper[5050]: I1123 15:01:37.661293 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecb3607d-200e-40b2-815f-cffc65cb6d74-combined-ca-bundle\") pod \"openstackclient\" (UID: \"ecb3607d-200e-40b2-815f-cffc65cb6d74\") " pod="openstack/openstackclient" Nov 23 15:01:37 crc kubenswrapper[5050]: I1123 15:01:37.661377 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/ecb3607d-200e-40b2-815f-cffc65cb6d74-openstack-config-secret\") pod \"openstackclient\" (UID: \"ecb3607d-200e-40b2-815f-cffc65cb6d74\") " pod="openstack/openstackclient" Nov 23 15:01:37 crc kubenswrapper[5050]: I1123 15:01:37.678156 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4pmkl\" (UniqueName: \"kubernetes.io/projected/ecb3607d-200e-40b2-815f-cffc65cb6d74-kube-api-access-4pmkl\") pod \"openstackclient\" (UID: \"ecb3607d-200e-40b2-815f-cffc65cb6d74\") " pod="openstack/openstackclient" Nov 23 15:01:37 crc kubenswrapper[5050]: I1123 15:01:37.736688 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 23 15:01:37 crc kubenswrapper[5050]: I1123 15:01:37.846273 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 23 15:01:37 crc kubenswrapper[5050]: I1123 15:01:37.861388 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 23 15:01:37 crc kubenswrapper[5050]: I1123 15:01:37.885559 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 23 15:01:37 crc kubenswrapper[5050]: I1123 15:01:37.887702 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 23 15:01:37 crc kubenswrapper[5050]: I1123 15:01:37.915152 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 23 15:01:37 crc kubenswrapper[5050]: E1123 15:01:37.931656 5050 log.go:32] "RunPodSandbox from runtime service failed" err=< Nov 23 15:01:37 crc kubenswrapper[5050]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_ecb3607d-200e-40b2-815f-cffc65cb6d74_0(1aa0a64edcafd8c40dfb788debd99e4b61186b6ee0006c95326df2a6702f0d03): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"1aa0a64edcafd8c40dfb788debd99e4b61186b6ee0006c95326df2a6702f0d03" Netns:"/var/run/netns/6dafadb4-653a-4c5d-b29e-0391afed92cb" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=1aa0a64edcafd8c40dfb788debd99e4b61186b6ee0006c95326df2a6702f0d03;K8S_POD_UID=ecb3607d-200e-40b2-815f-cffc65cb6d74" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/ecb3607d-200e-40b2-815f-cffc65cb6d74]: expected pod UID "ecb3607d-200e-40b2-815f-cffc65cb6d74" but got "014d6c50-fe8c-4b39-bccd-531037f1ff10" from Kube API Nov 23 15:01:37 crc kubenswrapper[5050]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 23 15:01:37 crc kubenswrapper[5050]: > Nov 23 15:01:37 crc kubenswrapper[5050]: E1123 15:01:37.931745 5050 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Nov 23 15:01:37 crc kubenswrapper[5050]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_ecb3607d-200e-40b2-815f-cffc65cb6d74_0(1aa0a64edcafd8c40dfb788debd99e4b61186b6ee0006c95326df2a6702f0d03): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"1aa0a64edcafd8c40dfb788debd99e4b61186b6ee0006c95326df2a6702f0d03" Netns:"/var/run/netns/6dafadb4-653a-4c5d-b29e-0391afed92cb" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=1aa0a64edcafd8c40dfb788debd99e4b61186b6ee0006c95326df2a6702f0d03;K8S_POD_UID=ecb3607d-200e-40b2-815f-cffc65cb6d74" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/ecb3607d-200e-40b2-815f-cffc65cb6d74]: expected pod UID "ecb3607d-200e-40b2-815f-cffc65cb6d74" but got "014d6c50-fe8c-4b39-bccd-531037f1ff10" from Kube API Nov 23 15:01:37 crc kubenswrapper[5050]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 23 15:01:37 crc kubenswrapper[5050]: > pod="openstack/openstackclient" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.071344 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srl5p\" (UniqueName: \"kubernetes.io/projected/014d6c50-fe8c-4b39-bccd-531037f1ff10-kube-api-access-srl5p\") pod \"openstackclient\" (UID: \"014d6c50-fe8c-4b39-bccd-531037f1ff10\") " pod="openstack/openstackclient" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.071459 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/014d6c50-fe8c-4b39-bccd-531037f1ff10-openstack-config-secret\") pod \"openstackclient\" (UID: \"014d6c50-fe8c-4b39-bccd-531037f1ff10\") " pod="openstack/openstackclient" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.071520 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/014d6c50-fe8c-4b39-bccd-531037f1ff10-openstack-config\") pod \"openstackclient\" (UID: \"014d6c50-fe8c-4b39-bccd-531037f1ff10\") " pod="openstack/openstackclient" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.071567 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/014d6c50-fe8c-4b39-bccd-531037f1ff10-combined-ca-bundle\") pod \"openstackclient\" (UID: \"014d6c50-fe8c-4b39-bccd-531037f1ff10\") " pod="openstack/openstackclient" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.173961 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srl5p\" (UniqueName: \"kubernetes.io/projected/014d6c50-fe8c-4b39-bccd-531037f1ff10-kube-api-access-srl5p\") pod \"openstackclient\" (UID: \"014d6c50-fe8c-4b39-bccd-531037f1ff10\") " pod="openstack/openstackclient" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.174027 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/014d6c50-fe8c-4b39-bccd-531037f1ff10-openstack-config-secret\") pod \"openstackclient\" (UID: \"014d6c50-fe8c-4b39-bccd-531037f1ff10\") " pod="openstack/openstackclient" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.174096 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/014d6c50-fe8c-4b39-bccd-531037f1ff10-openstack-config\") pod \"openstackclient\" (UID: \"014d6c50-fe8c-4b39-bccd-531037f1ff10\") " pod="openstack/openstackclient" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.175546 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/014d6c50-fe8c-4b39-bccd-531037f1ff10-combined-ca-bundle\") pod \"openstackclient\" (UID: \"014d6c50-fe8c-4b39-bccd-531037f1ff10\") " pod="openstack/openstackclient" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.175734 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/014d6c50-fe8c-4b39-bccd-531037f1ff10-openstack-config\") pod \"openstackclient\" (UID: \"014d6c50-fe8c-4b39-bccd-531037f1ff10\") " pod="openstack/openstackclient" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.184551 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/014d6c50-fe8c-4b39-bccd-531037f1ff10-combined-ca-bundle\") pod \"openstackclient\" (UID: \"014d6c50-fe8c-4b39-bccd-531037f1ff10\") " pod="openstack/openstackclient" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.185104 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/014d6c50-fe8c-4b39-bccd-531037f1ff10-openstack-config-secret\") pod \"openstackclient\" (UID: \"014d6c50-fe8c-4b39-bccd-531037f1ff10\") " pod="openstack/openstackclient" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.200935 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srl5p\" (UniqueName: \"kubernetes.io/projected/014d6c50-fe8c-4b39-bccd-531037f1ff10-kube-api-access-srl5p\") pod \"openstackclient\" (UID: \"014d6c50-fe8c-4b39-bccd-531037f1ff10\") " pod="openstack/openstackclient" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.237727 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.370988 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.373369 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6b854f87-drlhc" event={"ID":"c1187c9d-5557-496e-be1d-8df301d6daa6","Type":"ContainerStarted","Data":"ddcc4b1b60ec1caba00533bfdcca9aabeb296bd155561a69a9313a277fd7f548"} Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.373412 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.373818 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.381683 5050 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="ecb3607d-200e-40b2-815f-cffc65cb6d74" podUID="014d6c50-fe8c-4b39-bccd-531037f1ff10" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.401073 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.445056 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-6b854f87-drlhc" podStartSLOduration=3.445027878 podStartE2EDuration="3.445027878s" podCreationTimestamp="2025-11-23 15:01:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:01:38.438763073 +0000 UTC m=+1193.605759558" watchObservedRunningTime="2025-11-23 15:01:38.445027878 +0000 UTC m=+1193.612024363" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.583000 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/ecb3607d-200e-40b2-815f-cffc65cb6d74-openstack-config-secret\") pod \"ecb3607d-200e-40b2-815f-cffc65cb6d74\" (UID: \"ecb3607d-200e-40b2-815f-cffc65cb6d74\") " Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.583118 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/ecb3607d-200e-40b2-815f-cffc65cb6d74-openstack-config\") pod \"ecb3607d-200e-40b2-815f-cffc65cb6d74\" (UID: \"ecb3607d-200e-40b2-815f-cffc65cb6d74\") " Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.583298 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4pmkl\" (UniqueName: \"kubernetes.io/projected/ecb3607d-200e-40b2-815f-cffc65cb6d74-kube-api-access-4pmkl\") pod \"ecb3607d-200e-40b2-815f-cffc65cb6d74\" (UID: \"ecb3607d-200e-40b2-815f-cffc65cb6d74\") " Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.583349 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecb3607d-200e-40b2-815f-cffc65cb6d74-combined-ca-bundle\") pod \"ecb3607d-200e-40b2-815f-cffc65cb6d74\" (UID: \"ecb3607d-200e-40b2-815f-cffc65cb6d74\") " Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.584791 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ecb3607d-200e-40b2-815f-cffc65cb6d74-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "ecb3607d-200e-40b2-815f-cffc65cb6d74" (UID: "ecb3607d-200e-40b2-815f-cffc65cb6d74"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.591713 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ecb3607d-200e-40b2-815f-cffc65cb6d74-kube-api-access-4pmkl" (OuterVolumeSpecName: "kube-api-access-4pmkl") pod "ecb3607d-200e-40b2-815f-cffc65cb6d74" (UID: "ecb3607d-200e-40b2-815f-cffc65cb6d74"). InnerVolumeSpecName "kube-api-access-4pmkl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.595652 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecb3607d-200e-40b2-815f-cffc65cb6d74-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "ecb3607d-200e-40b2-815f-cffc65cb6d74" (UID: "ecb3607d-200e-40b2-815f-cffc65cb6d74"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.600982 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecb3607d-200e-40b2-815f-cffc65cb6d74-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ecb3607d-200e-40b2-815f-cffc65cb6d74" (UID: "ecb3607d-200e-40b2-815f-cffc65cb6d74"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.685686 5050 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/ecb3607d-200e-40b2-815f-cffc65cb6d74-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.685726 5050 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/ecb3607d-200e-40b2-815f-cffc65cb6d74-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.685736 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4pmkl\" (UniqueName: \"kubernetes.io/projected/ecb3607d-200e-40b2-815f-cffc65cb6d74-kube-api-access-4pmkl\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.685744 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecb3607d-200e-40b2-815f-cffc65cb6d74-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:38 crc kubenswrapper[5050]: I1123 15:01:38.942044 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 23 15:01:38 crc kubenswrapper[5050]: W1123 15:01:38.948503 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod014d6c50_fe8c_4b39_bccd_531037f1ff10.slice/crio-e3be63759416398693ff94c500976e898db62d35a44087363de8d1b8da2d05e0 WatchSource:0}: Error finding container e3be63759416398693ff94c500976e898db62d35a44087363de8d1b8da2d05e0: Status 404 returned error can't find the container with id e3be63759416398693ff94c500976e898db62d35a44087363de8d1b8da2d05e0 Nov 23 15:01:39 crc kubenswrapper[5050]: I1123 15:01:39.287735 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 23 15:01:39 crc kubenswrapper[5050]: I1123 15:01:39.314153 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:01:39 crc kubenswrapper[5050]: I1123 15:01:39.316587 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="153c15c7-1c66-41bd-b749-c4ffcf36aded" containerName="proxy-httpd" containerID="cri-o://d8eeba7955eccbe32932e5a02fbc4e826466b88d94a60d1ec3fbe69cac5eef3a" gracePeriod=30 Nov 23 15:01:39 crc kubenswrapper[5050]: I1123 15:01:39.315199 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="153c15c7-1c66-41bd-b749-c4ffcf36aded" containerName="ceilometer-central-agent" containerID="cri-o://15f3969a8a7a909d23a8390a0a67bf1b44c8cba8d87ddc6f58c877a840e235a5" gracePeriod=30 Nov 23 15:01:39 crc kubenswrapper[5050]: I1123 15:01:39.316895 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="153c15c7-1c66-41bd-b749-c4ffcf36aded" containerName="ceilometer-notification-agent" containerID="cri-o://f178f819f8702c2cb4266228044156442a5e47d4e9f9be48f59a3d7f1087fb03" gracePeriod=30 Nov 23 15:01:39 crc kubenswrapper[5050]: I1123 15:01:39.317005 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="153c15c7-1c66-41bd-b749-c4ffcf36aded" containerName="sg-core" containerID="cri-o://19f1b5194eedef22dced68a1910ec80c397d353e5869d0b9b6cbc5b129b09d78" gracePeriod=30 Nov 23 15:01:39 crc kubenswrapper[5050]: I1123 15:01:39.327349 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="153c15c7-1c66-41bd-b749-c4ffcf36aded" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.166:3000/\": EOF" Nov 23 15:01:39 crc kubenswrapper[5050]: I1123 15:01:39.383060 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"014d6c50-fe8c-4b39-bccd-531037f1ff10","Type":"ContainerStarted","Data":"e3be63759416398693ff94c500976e898db62d35a44087363de8d1b8da2d05e0"} Nov 23 15:01:39 crc kubenswrapper[5050]: I1123 15:01:39.383158 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 23 15:01:39 crc kubenswrapper[5050]: I1123 15:01:39.387893 5050 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="ecb3607d-200e-40b2-815f-cffc65cb6d74" podUID="014d6c50-fe8c-4b39-bccd-531037f1ff10" Nov 23 15:01:39 crc kubenswrapper[5050]: I1123 15:01:39.563368 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ecb3607d-200e-40b2-815f-cffc65cb6d74" path="/var/lib/kubelet/pods/ecb3607d-200e-40b2-815f-cffc65cb6d74/volumes" Nov 23 15:01:40 crc kubenswrapper[5050]: I1123 15:01:40.411729 5050 generic.go:334] "Generic (PLEG): container finished" podID="153c15c7-1c66-41bd-b749-c4ffcf36aded" containerID="d8eeba7955eccbe32932e5a02fbc4e826466b88d94a60d1ec3fbe69cac5eef3a" exitCode=0 Nov 23 15:01:40 crc kubenswrapper[5050]: I1123 15:01:40.411774 5050 generic.go:334] "Generic (PLEG): container finished" podID="153c15c7-1c66-41bd-b749-c4ffcf36aded" containerID="19f1b5194eedef22dced68a1910ec80c397d353e5869d0b9b6cbc5b129b09d78" exitCode=2 Nov 23 15:01:40 crc kubenswrapper[5050]: I1123 15:01:40.411785 5050 generic.go:334] "Generic (PLEG): container finished" podID="153c15c7-1c66-41bd-b749-c4ffcf36aded" containerID="15f3969a8a7a909d23a8390a0a67bf1b44c8cba8d87ddc6f58c877a840e235a5" exitCode=0 Nov 23 15:01:40 crc kubenswrapper[5050]: I1123 15:01:40.412918 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"153c15c7-1c66-41bd-b749-c4ffcf36aded","Type":"ContainerDied","Data":"d8eeba7955eccbe32932e5a02fbc4e826466b88d94a60d1ec3fbe69cac5eef3a"} Nov 23 15:01:40 crc kubenswrapper[5050]: I1123 15:01:40.412992 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"153c15c7-1c66-41bd-b749-c4ffcf36aded","Type":"ContainerDied","Data":"19f1b5194eedef22dced68a1910ec80c397d353e5869d0b9b6cbc5b129b09d78"} Nov 23 15:01:40 crc kubenswrapper[5050]: I1123 15:01:40.413024 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"153c15c7-1c66-41bd-b749-c4ffcf36aded","Type":"ContainerDied","Data":"15f3969a8a7a909d23a8390a0a67bf1b44c8cba8d87ddc6f58c877a840e235a5"} Nov 23 15:01:42 crc kubenswrapper[5050]: I1123 15:01:42.441498 5050 generic.go:334] "Generic (PLEG): container finished" podID="153c15c7-1c66-41bd-b749-c4ffcf36aded" containerID="f178f819f8702c2cb4266228044156442a5e47d4e9f9be48f59a3d7f1087fb03" exitCode=0 Nov 23 15:01:42 crc kubenswrapper[5050]: I1123 15:01:42.441636 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"153c15c7-1c66-41bd-b749-c4ffcf36aded","Type":"ContainerDied","Data":"f178f819f8702c2cb4266228044156442a5e47d4e9f9be48f59a3d7f1087fb03"} Nov 23 15:01:42 crc kubenswrapper[5050]: I1123 15:01:42.752340 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 15:01:42 crc kubenswrapper[5050]: I1123 15:01:42.871558 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/153c15c7-1c66-41bd-b749-c4ffcf36aded-combined-ca-bundle\") pod \"153c15c7-1c66-41bd-b749-c4ffcf36aded\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " Nov 23 15:01:42 crc kubenswrapper[5050]: I1123 15:01:42.871632 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5zphb\" (UniqueName: \"kubernetes.io/projected/153c15c7-1c66-41bd-b749-c4ffcf36aded-kube-api-access-5zphb\") pod \"153c15c7-1c66-41bd-b749-c4ffcf36aded\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " Nov 23 15:01:42 crc kubenswrapper[5050]: I1123 15:01:42.871759 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/153c15c7-1c66-41bd-b749-c4ffcf36aded-scripts\") pod \"153c15c7-1c66-41bd-b749-c4ffcf36aded\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " Nov 23 15:01:42 crc kubenswrapper[5050]: I1123 15:01:42.871800 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/153c15c7-1c66-41bd-b749-c4ffcf36aded-sg-core-conf-yaml\") pod \"153c15c7-1c66-41bd-b749-c4ffcf36aded\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " Nov 23 15:01:42 crc kubenswrapper[5050]: I1123 15:01:42.871837 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/153c15c7-1c66-41bd-b749-c4ffcf36aded-log-httpd\") pod \"153c15c7-1c66-41bd-b749-c4ffcf36aded\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " Nov 23 15:01:42 crc kubenswrapper[5050]: I1123 15:01:42.871894 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/153c15c7-1c66-41bd-b749-c4ffcf36aded-run-httpd\") pod \"153c15c7-1c66-41bd-b749-c4ffcf36aded\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " Nov 23 15:01:42 crc kubenswrapper[5050]: I1123 15:01:42.871977 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/153c15c7-1c66-41bd-b749-c4ffcf36aded-config-data\") pod \"153c15c7-1c66-41bd-b749-c4ffcf36aded\" (UID: \"153c15c7-1c66-41bd-b749-c4ffcf36aded\") " Nov 23 15:01:42 crc kubenswrapper[5050]: I1123 15:01:42.872598 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/153c15c7-1c66-41bd-b749-c4ffcf36aded-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "153c15c7-1c66-41bd-b749-c4ffcf36aded" (UID: "153c15c7-1c66-41bd-b749-c4ffcf36aded"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:01:42 crc kubenswrapper[5050]: I1123 15:01:42.872857 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/153c15c7-1c66-41bd-b749-c4ffcf36aded-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "153c15c7-1c66-41bd-b749-c4ffcf36aded" (UID: "153c15c7-1c66-41bd-b749-c4ffcf36aded"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:01:42 crc kubenswrapper[5050]: I1123 15:01:42.883014 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/153c15c7-1c66-41bd-b749-c4ffcf36aded-kube-api-access-5zphb" (OuterVolumeSpecName: "kube-api-access-5zphb") pod "153c15c7-1c66-41bd-b749-c4ffcf36aded" (UID: "153c15c7-1c66-41bd-b749-c4ffcf36aded"). InnerVolumeSpecName "kube-api-access-5zphb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:01:42 crc kubenswrapper[5050]: I1123 15:01:42.884539 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/153c15c7-1c66-41bd-b749-c4ffcf36aded-scripts" (OuterVolumeSpecName: "scripts") pod "153c15c7-1c66-41bd-b749-c4ffcf36aded" (UID: "153c15c7-1c66-41bd-b749-c4ffcf36aded"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:42 crc kubenswrapper[5050]: I1123 15:01:42.904672 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/153c15c7-1c66-41bd-b749-c4ffcf36aded-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "153c15c7-1c66-41bd-b749-c4ffcf36aded" (UID: "153c15c7-1c66-41bd-b749-c4ffcf36aded"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:42 crc kubenswrapper[5050]: I1123 15:01:42.975008 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5zphb\" (UniqueName: \"kubernetes.io/projected/153c15c7-1c66-41bd-b749-c4ffcf36aded-kube-api-access-5zphb\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:42 crc kubenswrapper[5050]: I1123 15:01:42.975045 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/153c15c7-1c66-41bd-b749-c4ffcf36aded-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:42 crc kubenswrapper[5050]: I1123 15:01:42.975059 5050 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/153c15c7-1c66-41bd-b749-c4ffcf36aded-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:42 crc kubenswrapper[5050]: I1123 15:01:42.975070 5050 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/153c15c7-1c66-41bd-b749-c4ffcf36aded-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:42 crc kubenswrapper[5050]: I1123 15:01:42.975082 5050 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/153c15c7-1c66-41bd-b749-c4ffcf36aded-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:42 crc kubenswrapper[5050]: I1123 15:01:42.975252 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/153c15c7-1c66-41bd-b749-c4ffcf36aded-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "153c15c7-1c66-41bd-b749-c4ffcf36aded" (UID: "153c15c7-1c66-41bd-b749-c4ffcf36aded"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:42 crc kubenswrapper[5050]: I1123 15:01:42.978158 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/153c15c7-1c66-41bd-b749-c4ffcf36aded-config-data" (OuterVolumeSpecName: "config-data") pod "153c15c7-1c66-41bd-b749-c4ffcf36aded" (UID: "153c15c7-1c66-41bd-b749-c4ffcf36aded"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.076859 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/153c15c7-1c66-41bd-b749-c4ffcf36aded-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.076904 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/153c15c7-1c66-41bd-b749-c4ffcf36aded-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.455952 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"153c15c7-1c66-41bd-b749-c4ffcf36aded","Type":"ContainerDied","Data":"1104e32ee47767230a27c6054bba23d9158819355e4bdf202d0faf3993f4b90e"} Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.456027 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.456535 5050 scope.go:117] "RemoveContainer" containerID="d8eeba7955eccbe32932e5a02fbc4e826466b88d94a60d1ec3fbe69cac5eef3a" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.494964 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.505270 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.532893 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:01:43 crc kubenswrapper[5050]: E1123 15:01:43.533404 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="153c15c7-1c66-41bd-b749-c4ffcf36aded" containerName="ceilometer-central-agent" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.533422 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="153c15c7-1c66-41bd-b749-c4ffcf36aded" containerName="ceilometer-central-agent" Nov 23 15:01:43 crc kubenswrapper[5050]: E1123 15:01:43.533463 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="153c15c7-1c66-41bd-b749-c4ffcf36aded" containerName="proxy-httpd" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.533470 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="153c15c7-1c66-41bd-b749-c4ffcf36aded" containerName="proxy-httpd" Nov 23 15:01:43 crc kubenswrapper[5050]: E1123 15:01:43.533491 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="153c15c7-1c66-41bd-b749-c4ffcf36aded" containerName="ceilometer-notification-agent" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.533497 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="153c15c7-1c66-41bd-b749-c4ffcf36aded" containerName="ceilometer-notification-agent" Nov 23 15:01:43 crc kubenswrapper[5050]: E1123 15:01:43.533519 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="153c15c7-1c66-41bd-b749-c4ffcf36aded" containerName="sg-core" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.533525 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="153c15c7-1c66-41bd-b749-c4ffcf36aded" containerName="sg-core" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.533694 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="153c15c7-1c66-41bd-b749-c4ffcf36aded" containerName="ceilometer-central-agent" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.533706 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="153c15c7-1c66-41bd-b749-c4ffcf36aded" containerName="proxy-httpd" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.533719 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="153c15c7-1c66-41bd-b749-c4ffcf36aded" containerName="sg-core" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.533733 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="153c15c7-1c66-41bd-b749-c4ffcf36aded" containerName="ceilometer-notification-agent" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.535641 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.538470 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.538761 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.543496 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.571729 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="153c15c7-1c66-41bd-b749-c4ffcf36aded" path="/var/lib/kubelet/pods/153c15c7-1c66-41bd-b749-c4ffcf36aded/volumes" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.689191 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf22bd66-89a1-426e-b976-5048e892fd0a-run-httpd\") pod \"ceilometer-0\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " pod="openstack/ceilometer-0" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.689251 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bf22bd66-89a1-426e-b976-5048e892fd0a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " pod="openstack/ceilometer-0" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.689351 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf22bd66-89a1-426e-b976-5048e892fd0a-scripts\") pod \"ceilometer-0\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " pod="openstack/ceilometer-0" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.689387 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rczq\" (UniqueName: \"kubernetes.io/projected/bf22bd66-89a1-426e-b976-5048e892fd0a-kube-api-access-2rczq\") pod \"ceilometer-0\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " pod="openstack/ceilometer-0" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.689412 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf22bd66-89a1-426e-b976-5048e892fd0a-config-data\") pod \"ceilometer-0\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " pod="openstack/ceilometer-0" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.689534 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf22bd66-89a1-426e-b976-5048e892fd0a-log-httpd\") pod \"ceilometer-0\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " pod="openstack/ceilometer-0" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.689595 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf22bd66-89a1-426e-b976-5048e892fd0a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " pod="openstack/ceilometer-0" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.791712 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf22bd66-89a1-426e-b976-5048e892fd0a-log-httpd\") pod \"ceilometer-0\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " pod="openstack/ceilometer-0" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.791812 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf22bd66-89a1-426e-b976-5048e892fd0a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " pod="openstack/ceilometer-0" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.791861 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf22bd66-89a1-426e-b976-5048e892fd0a-run-httpd\") pod \"ceilometer-0\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " pod="openstack/ceilometer-0" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.791913 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bf22bd66-89a1-426e-b976-5048e892fd0a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " pod="openstack/ceilometer-0" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.792397 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf22bd66-89a1-426e-b976-5048e892fd0a-run-httpd\") pod \"ceilometer-0\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " pod="openstack/ceilometer-0" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.792397 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf22bd66-89a1-426e-b976-5048e892fd0a-log-httpd\") pod \"ceilometer-0\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " pod="openstack/ceilometer-0" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.792547 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf22bd66-89a1-426e-b976-5048e892fd0a-scripts\") pod \"ceilometer-0\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " pod="openstack/ceilometer-0" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.792950 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rczq\" (UniqueName: \"kubernetes.io/projected/bf22bd66-89a1-426e-b976-5048e892fd0a-kube-api-access-2rczq\") pod \"ceilometer-0\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " pod="openstack/ceilometer-0" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.792977 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf22bd66-89a1-426e-b976-5048e892fd0a-config-data\") pod \"ceilometer-0\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " pod="openstack/ceilometer-0" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.798181 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf22bd66-89a1-426e-b976-5048e892fd0a-config-data\") pod \"ceilometer-0\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " pod="openstack/ceilometer-0" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.798349 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bf22bd66-89a1-426e-b976-5048e892fd0a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " pod="openstack/ceilometer-0" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.799082 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf22bd66-89a1-426e-b976-5048e892fd0a-scripts\") pod \"ceilometer-0\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " pod="openstack/ceilometer-0" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.800730 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf22bd66-89a1-426e-b976-5048e892fd0a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " pod="openstack/ceilometer-0" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.817806 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rczq\" (UniqueName: \"kubernetes.io/projected/bf22bd66-89a1-426e-b976-5048e892fd0a-kube-api-access-2rczq\") pod \"ceilometer-0\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " pod="openstack/ceilometer-0" Nov 23 15:01:43 crc kubenswrapper[5050]: I1123 15:01:43.901249 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 15:01:46 crc kubenswrapper[5050]: I1123 15:01:46.327799 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:46 crc kubenswrapper[5050]: I1123 15:01:46.328658 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:01:49 crc kubenswrapper[5050]: I1123 15:01:49.369165 5050 scope.go:117] "RemoveContainer" containerID="19f1b5194eedef22dced68a1910ec80c397d353e5869d0b9b6cbc5b129b09d78" Nov 23 15:01:49 crc kubenswrapper[5050]: I1123 15:01:49.465874 5050 scope.go:117] "RemoveContainer" containerID="f178f819f8702c2cb4266228044156442a5e47d4e9f9be48f59a3d7f1087fb03" Nov 23 15:01:49 crc kubenswrapper[5050]: I1123 15:01:49.520285 5050 scope.go:117] "RemoveContainer" containerID="15f3969a8a7a909d23a8390a0a67bf1b44c8cba8d87ddc6f58c877a840e235a5" Nov 23 15:01:50 crc kubenswrapper[5050]: I1123 15:01:50.045040 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:01:50 crc kubenswrapper[5050]: I1123 15:01:50.395244 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:01:50 crc kubenswrapper[5050]: I1123 15:01:50.571712 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf22bd66-89a1-426e-b976-5048e892fd0a","Type":"ContainerStarted","Data":"346c65a9631af3081b025ae1e6d22181f7b71a8b4f1d4af1179e4b500c7e33e5"} Nov 23 15:01:50 crc kubenswrapper[5050]: I1123 15:01:50.576257 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"014d6c50-fe8c-4b39-bccd-531037f1ff10","Type":"ContainerStarted","Data":"af27c7343928c8d4c4bff7ed205f0bb16b5e7302d0aa9e3deefadfcb0edf736c"} Nov 23 15:01:50 crc kubenswrapper[5050]: I1123 15:01:50.598517 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.078989745 podStartE2EDuration="13.5984916s" podCreationTimestamp="2025-11-23 15:01:37 +0000 UTC" firstStartedPulling="2025-11-23 15:01:38.954047558 +0000 UTC m=+1194.121044043" lastFinishedPulling="2025-11-23 15:01:49.473549423 +0000 UTC m=+1204.640545898" observedRunningTime="2025-11-23 15:01:50.592557925 +0000 UTC m=+1205.759554430" watchObservedRunningTime="2025-11-23 15:01:50.5984916 +0000 UTC m=+1205.765488085" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.433557 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-rl7fq"] Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.435295 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-rl7fq" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.450757 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-rl7fq"] Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.477507 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/35ca6cdf-0126-49e9-a731-efa32a5e7edf-operator-scripts\") pod \"nova-api-db-create-rl7fq\" (UID: \"35ca6cdf-0126-49e9-a731-efa32a5e7edf\") " pod="openstack/nova-api-db-create-rl7fq" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.477817 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whqql\" (UniqueName: \"kubernetes.io/projected/35ca6cdf-0126-49e9-a731-efa32a5e7edf-kube-api-access-whqql\") pod \"nova-api-db-create-rl7fq\" (UID: \"35ca6cdf-0126-49e9-a731-efa32a5e7edf\") " pod="openstack/nova-api-db-create-rl7fq" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.579800 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/35ca6cdf-0126-49e9-a731-efa32a5e7edf-operator-scripts\") pod \"nova-api-db-create-rl7fq\" (UID: \"35ca6cdf-0126-49e9-a731-efa32a5e7edf\") " pod="openstack/nova-api-db-create-rl7fq" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.579919 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whqql\" (UniqueName: \"kubernetes.io/projected/35ca6cdf-0126-49e9-a731-efa32a5e7edf-kube-api-access-whqql\") pod \"nova-api-db-create-rl7fq\" (UID: \"35ca6cdf-0126-49e9-a731-efa32a5e7edf\") " pod="openstack/nova-api-db-create-rl7fq" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.581395 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/35ca6cdf-0126-49e9-a731-efa32a5e7edf-operator-scripts\") pod \"nova-api-db-create-rl7fq\" (UID: \"35ca6cdf-0126-49e9-a731-efa32a5e7edf\") " pod="openstack/nova-api-db-create-rl7fq" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.591695 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-mwptv"] Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.593191 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-mwptv"] Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.593304 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-mwptv" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.639532 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whqql\" (UniqueName: \"kubernetes.io/projected/35ca6cdf-0126-49e9-a731-efa32a5e7edf-kube-api-access-whqql\") pod \"nova-api-db-create-rl7fq\" (UID: \"35ca6cdf-0126-49e9-a731-efa32a5e7edf\") " pod="openstack/nova-api-db-create-rl7fq" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.651737 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf22bd66-89a1-426e-b976-5048e892fd0a","Type":"ContainerStarted","Data":"427daadd3a86480e5477fc5370f76629ddff3edb6271bef638fd9337073aaf25"} Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.651861 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf22bd66-89a1-426e-b976-5048e892fd0a","Type":"ContainerStarted","Data":"6d8ca1a4fe2226502af880dfc1ca8adeeae1a4d077e080e11fbc07014425f0a5"} Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.682615 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a-operator-scripts\") pod \"nova-cell0-db-create-mwptv\" (UID: \"c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a\") " pod="openstack/nova-cell0-db-create-mwptv" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.682733 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfq9p\" (UniqueName: \"kubernetes.io/projected/c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a-kube-api-access-zfq9p\") pod \"nova-cell0-db-create-mwptv\" (UID: \"c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a\") " pod="openstack/nova-cell0-db-create-mwptv" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.697494 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-hxh4h"] Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.698929 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-hxh4h" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.721083 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-f81d-account-create-5bgqw"] Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.726430 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f81d-account-create-5bgqw" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.748423 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.748901 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-hxh4h"] Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.760616 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-f81d-account-create-5bgqw"] Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.761599 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-rl7fq" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.791386 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8601165-154f-4167-a470-e06481c1944e-operator-scripts\") pod \"nova-cell1-db-create-hxh4h\" (UID: \"e8601165-154f-4167-a470-e06481c1944e\") " pod="openstack/nova-cell1-db-create-hxh4h" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.791478 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a-operator-scripts\") pod \"nova-cell0-db-create-mwptv\" (UID: \"c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a\") " pod="openstack/nova-cell0-db-create-mwptv" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.791516 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4hcg\" (UniqueName: \"kubernetes.io/projected/0dc7ce44-5e6c-4971-b829-54700cb6334f-kube-api-access-f4hcg\") pod \"nova-api-f81d-account-create-5bgqw\" (UID: \"0dc7ce44-5e6c-4971-b829-54700cb6334f\") " pod="openstack/nova-api-f81d-account-create-5bgqw" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.791543 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0dc7ce44-5e6c-4971-b829-54700cb6334f-operator-scripts\") pod \"nova-api-f81d-account-create-5bgqw\" (UID: \"0dc7ce44-5e6c-4971-b829-54700cb6334f\") " pod="openstack/nova-api-f81d-account-create-5bgqw" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.791562 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzbsx\" (UniqueName: \"kubernetes.io/projected/e8601165-154f-4167-a470-e06481c1944e-kube-api-access-pzbsx\") pod \"nova-cell1-db-create-hxh4h\" (UID: \"e8601165-154f-4167-a470-e06481c1944e\") " pod="openstack/nova-cell1-db-create-hxh4h" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.791591 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfq9p\" (UniqueName: \"kubernetes.io/projected/c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a-kube-api-access-zfq9p\") pod \"nova-cell0-db-create-mwptv\" (UID: \"c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a\") " pod="openstack/nova-cell0-db-create-mwptv" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.792813 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a-operator-scripts\") pod \"nova-cell0-db-create-mwptv\" (UID: \"c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a\") " pod="openstack/nova-cell0-db-create-mwptv" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.831094 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfq9p\" (UniqueName: \"kubernetes.io/projected/c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a-kube-api-access-zfq9p\") pod \"nova-cell0-db-create-mwptv\" (UID: \"c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a\") " pod="openstack/nova-cell0-db-create-mwptv" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.895015 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8601165-154f-4167-a470-e06481c1944e-operator-scripts\") pod \"nova-cell1-db-create-hxh4h\" (UID: \"e8601165-154f-4167-a470-e06481c1944e\") " pod="openstack/nova-cell1-db-create-hxh4h" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.895433 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4hcg\" (UniqueName: \"kubernetes.io/projected/0dc7ce44-5e6c-4971-b829-54700cb6334f-kube-api-access-f4hcg\") pod \"nova-api-f81d-account-create-5bgqw\" (UID: \"0dc7ce44-5e6c-4971-b829-54700cb6334f\") " pod="openstack/nova-api-f81d-account-create-5bgqw" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.895484 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0dc7ce44-5e6c-4971-b829-54700cb6334f-operator-scripts\") pod \"nova-api-f81d-account-create-5bgqw\" (UID: \"0dc7ce44-5e6c-4971-b829-54700cb6334f\") " pod="openstack/nova-api-f81d-account-create-5bgqw" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.895509 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pzbsx\" (UniqueName: \"kubernetes.io/projected/e8601165-154f-4167-a470-e06481c1944e-kube-api-access-pzbsx\") pod \"nova-cell1-db-create-hxh4h\" (UID: \"e8601165-154f-4167-a470-e06481c1944e\") " pod="openstack/nova-cell1-db-create-hxh4h" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.896121 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8601165-154f-4167-a470-e06481c1944e-operator-scripts\") pod \"nova-cell1-db-create-hxh4h\" (UID: \"e8601165-154f-4167-a470-e06481c1944e\") " pod="openstack/nova-cell1-db-create-hxh4h" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.896743 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0dc7ce44-5e6c-4971-b829-54700cb6334f-operator-scripts\") pod \"nova-api-f81d-account-create-5bgqw\" (UID: \"0dc7ce44-5e6c-4971-b829-54700cb6334f\") " pod="openstack/nova-api-f81d-account-create-5bgqw" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.925556 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-3052-account-create-ztg8b"] Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.927088 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-3052-account-create-ztg8b" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.932245 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzbsx\" (UniqueName: \"kubernetes.io/projected/e8601165-154f-4167-a470-e06481c1944e-kube-api-access-pzbsx\") pod \"nova-cell1-db-create-hxh4h\" (UID: \"e8601165-154f-4167-a470-e06481c1944e\") " pod="openstack/nova-cell1-db-create-hxh4h" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.932558 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.933171 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4hcg\" (UniqueName: \"kubernetes.io/projected/0dc7ce44-5e6c-4971-b829-54700cb6334f-kube-api-access-f4hcg\") pod \"nova-api-f81d-account-create-5bgqw\" (UID: \"0dc7ce44-5e6c-4971-b829-54700cb6334f\") " pod="openstack/nova-api-f81d-account-create-5bgqw" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.948899 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f81d-account-create-5bgqw" Nov 23 15:01:51 crc kubenswrapper[5050]: I1123 15:01:51.973554 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-3052-account-create-ztg8b"] Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:51.998001 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e7a21645-b633-4073-a971-c8896b6dbc06-operator-scripts\") pod \"nova-cell0-3052-account-create-ztg8b\" (UID: \"e7a21645-b633-4073-a971-c8896b6dbc06\") " pod="openstack/nova-cell0-3052-account-create-ztg8b" Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:51.998049 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l745z\" (UniqueName: \"kubernetes.io/projected/e7a21645-b633-4073-a971-c8896b6dbc06-kube-api-access-l745z\") pod \"nova-cell0-3052-account-create-ztg8b\" (UID: \"e7a21645-b633-4073-a971-c8896b6dbc06\") " pod="openstack/nova-cell0-3052-account-create-ztg8b" Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:52.025174 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-mwptv" Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:52.065365 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-94ab-account-create-dt6k2"] Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:52.075977 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-94ab-account-create-dt6k2" Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:52.079570 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:52.084713 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-94ab-account-create-dt6k2"] Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:52.090415 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-hxh4h" Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:52.100586 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e7a21645-b633-4073-a971-c8896b6dbc06-operator-scripts\") pod \"nova-cell0-3052-account-create-ztg8b\" (UID: \"e7a21645-b633-4073-a971-c8896b6dbc06\") " pod="openstack/nova-cell0-3052-account-create-ztg8b" Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:52.100630 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l745z\" (UniqueName: \"kubernetes.io/projected/e7a21645-b633-4073-a971-c8896b6dbc06-kube-api-access-l745z\") pod \"nova-cell0-3052-account-create-ztg8b\" (UID: \"e7a21645-b633-4073-a971-c8896b6dbc06\") " pod="openstack/nova-cell0-3052-account-create-ztg8b" Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:52.103122 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e7a21645-b633-4073-a971-c8896b6dbc06-operator-scripts\") pod \"nova-cell0-3052-account-create-ztg8b\" (UID: \"e7a21645-b633-4073-a971-c8896b6dbc06\") " pod="openstack/nova-cell0-3052-account-create-ztg8b" Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:52.134583 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l745z\" (UniqueName: \"kubernetes.io/projected/e7a21645-b633-4073-a971-c8896b6dbc06-kube-api-access-l745z\") pod \"nova-cell0-3052-account-create-ztg8b\" (UID: \"e7a21645-b633-4073-a971-c8896b6dbc06\") " pod="openstack/nova-cell0-3052-account-create-ztg8b" Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:52.204966 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/344e218a-8f67-40b6-90e6-6970ca14cda3-operator-scripts\") pod \"nova-cell1-94ab-account-create-dt6k2\" (UID: \"344e218a-8f67-40b6-90e6-6970ca14cda3\") " pod="openstack/nova-cell1-94ab-account-create-dt6k2" Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:52.205143 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bw4nr\" (UniqueName: \"kubernetes.io/projected/344e218a-8f67-40b6-90e6-6970ca14cda3-kube-api-access-bw4nr\") pod \"nova-cell1-94ab-account-create-dt6k2\" (UID: \"344e218a-8f67-40b6-90e6-6970ca14cda3\") " pod="openstack/nova-cell1-94ab-account-create-dt6k2" Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:52.279151 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-3052-account-create-ztg8b" Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:52.307935 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/344e218a-8f67-40b6-90e6-6970ca14cda3-operator-scripts\") pod \"nova-cell1-94ab-account-create-dt6k2\" (UID: \"344e218a-8f67-40b6-90e6-6970ca14cda3\") " pod="openstack/nova-cell1-94ab-account-create-dt6k2" Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:52.308058 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bw4nr\" (UniqueName: \"kubernetes.io/projected/344e218a-8f67-40b6-90e6-6970ca14cda3-kube-api-access-bw4nr\") pod \"nova-cell1-94ab-account-create-dt6k2\" (UID: \"344e218a-8f67-40b6-90e6-6970ca14cda3\") " pod="openstack/nova-cell1-94ab-account-create-dt6k2" Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:52.310001 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/344e218a-8f67-40b6-90e6-6970ca14cda3-operator-scripts\") pod \"nova-cell1-94ab-account-create-dt6k2\" (UID: \"344e218a-8f67-40b6-90e6-6970ca14cda3\") " pod="openstack/nova-cell1-94ab-account-create-dt6k2" Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:52.326368 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bw4nr\" (UniqueName: \"kubernetes.io/projected/344e218a-8f67-40b6-90e6-6970ca14cda3-kube-api-access-bw4nr\") pod \"nova-cell1-94ab-account-create-dt6k2\" (UID: \"344e218a-8f67-40b6-90e6-6970ca14cda3\") " pod="openstack/nova-cell1-94ab-account-create-dt6k2" Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:52.403641 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-94ab-account-create-dt6k2" Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:52.447351 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-rl7fq"] Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:52.639077 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-mwptv"] Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:52.696869 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-mwptv" event={"ID":"c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a","Type":"ContainerStarted","Data":"6a374cc028001032f0d3016f7682900288a52b2d2c1cbe1667c32ec4ebaa2a40"} Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:52.704957 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-rl7fq" event={"ID":"35ca6cdf-0126-49e9-a731-efa32a5e7edf","Type":"ContainerStarted","Data":"1cbc679703ac9ee301570bd1f6fd44d5dcab81094567d2548567af41d22c5cf6"} Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:52.734319 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf22bd66-89a1-426e-b976-5048e892fd0a","Type":"ContainerStarted","Data":"00bffdb05a2cea22a8013de3f7d37366b08b1085052fa214184fef486440f100"} Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:52.757907 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-f81d-account-create-5bgqw"] Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:52.779621 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-hxh4h"] Nov 23 15:01:52 crc kubenswrapper[5050]: I1123 15:01:52.988303 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-94ab-account-create-dt6k2"] Nov 23 15:01:52 crc kubenswrapper[5050]: W1123 15:01:52.996728 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod344e218a_8f67_40b6_90e6_6970ca14cda3.slice/crio-80f990119c44c97da89ab94bbf932898922eae45388641cded4e7533c9901af5 WatchSource:0}: Error finding container 80f990119c44c97da89ab94bbf932898922eae45388641cded4e7533c9901af5: Status 404 returned error can't find the container with id 80f990119c44c97da89ab94bbf932898922eae45388641cded4e7533c9901af5 Nov 23 15:01:53 crc kubenswrapper[5050]: I1123 15:01:53.002275 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-3052-account-create-ztg8b"] Nov 23 15:01:53 crc kubenswrapper[5050]: I1123 15:01:53.753479 5050 generic.go:334] "Generic (PLEG): container finished" podID="c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a" containerID="67627a24454619a22e3572ee09d69aded5b815e44193c91d154fa269a5576af2" exitCode=0 Nov 23 15:01:53 crc kubenswrapper[5050]: I1123 15:01:53.753677 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-mwptv" event={"ID":"c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a","Type":"ContainerDied","Data":"67627a24454619a22e3572ee09d69aded5b815e44193c91d154fa269a5576af2"} Nov 23 15:01:53 crc kubenswrapper[5050]: I1123 15:01:53.761812 5050 generic.go:334] "Generic (PLEG): container finished" podID="e8601165-154f-4167-a470-e06481c1944e" containerID="74740a260cb5560b82138d1b2de9a496e2eaceebcd795071134e76087256d9cf" exitCode=0 Nov 23 15:01:53 crc kubenswrapper[5050]: I1123 15:01:53.761919 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-hxh4h" event={"ID":"e8601165-154f-4167-a470-e06481c1944e","Type":"ContainerDied","Data":"74740a260cb5560b82138d1b2de9a496e2eaceebcd795071134e76087256d9cf"} Nov 23 15:01:53 crc kubenswrapper[5050]: I1123 15:01:53.761974 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-hxh4h" event={"ID":"e8601165-154f-4167-a470-e06481c1944e","Type":"ContainerStarted","Data":"06f4e322879604ce15fa9919d05f7977caa6bb0045cb22936d9dc4a5c4af7e30"} Nov 23 15:01:53 crc kubenswrapper[5050]: I1123 15:01:53.763881 5050 generic.go:334] "Generic (PLEG): container finished" podID="35ca6cdf-0126-49e9-a731-efa32a5e7edf" containerID="49627df0d423eed22e18e73cad1c6b2832d4b69f55e220d377e77c16051b72e0" exitCode=0 Nov 23 15:01:53 crc kubenswrapper[5050]: I1123 15:01:53.763977 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-rl7fq" event={"ID":"35ca6cdf-0126-49e9-a731-efa32a5e7edf","Type":"ContainerDied","Data":"49627df0d423eed22e18e73cad1c6b2832d4b69f55e220d377e77c16051b72e0"} Nov 23 15:01:53 crc kubenswrapper[5050]: I1123 15:01:53.773245 5050 generic.go:334] "Generic (PLEG): container finished" podID="344e218a-8f67-40b6-90e6-6970ca14cda3" containerID="0df6e87311c357f4060157ad3d323dd9559acb0de7668b9590e059d1943caa42" exitCode=0 Nov 23 15:01:53 crc kubenswrapper[5050]: I1123 15:01:53.773373 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-94ab-account-create-dt6k2" event={"ID":"344e218a-8f67-40b6-90e6-6970ca14cda3","Type":"ContainerDied","Data":"0df6e87311c357f4060157ad3d323dd9559acb0de7668b9590e059d1943caa42"} Nov 23 15:01:53 crc kubenswrapper[5050]: I1123 15:01:53.773460 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-94ab-account-create-dt6k2" event={"ID":"344e218a-8f67-40b6-90e6-6970ca14cda3","Type":"ContainerStarted","Data":"80f990119c44c97da89ab94bbf932898922eae45388641cded4e7533c9901af5"} Nov 23 15:01:53 crc kubenswrapper[5050]: I1123 15:01:53.775283 5050 generic.go:334] "Generic (PLEG): container finished" podID="e7a21645-b633-4073-a971-c8896b6dbc06" containerID="8413b1717750dc10c45317bdb032089ccf0935be49b622bea66cda6bbeae307e" exitCode=0 Nov 23 15:01:53 crc kubenswrapper[5050]: I1123 15:01:53.775349 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-3052-account-create-ztg8b" event={"ID":"e7a21645-b633-4073-a971-c8896b6dbc06","Type":"ContainerDied","Data":"8413b1717750dc10c45317bdb032089ccf0935be49b622bea66cda6bbeae307e"} Nov 23 15:01:53 crc kubenswrapper[5050]: I1123 15:01:53.775373 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-3052-account-create-ztg8b" event={"ID":"e7a21645-b633-4073-a971-c8896b6dbc06","Type":"ContainerStarted","Data":"93974c9a4982c91e0914e3f59036a097a41433a76f7b376dd6130988f6cb3d3f"} Nov 23 15:01:53 crc kubenswrapper[5050]: I1123 15:01:53.779138 5050 generic.go:334] "Generic (PLEG): container finished" podID="0dc7ce44-5e6c-4971-b829-54700cb6334f" containerID="6de079cc2bb464468ef1408abbf5b65fc5771b1f360bfbfbcac40963b5df4e6d" exitCode=0 Nov 23 15:01:53 crc kubenswrapper[5050]: I1123 15:01:53.779186 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f81d-account-create-5bgqw" event={"ID":"0dc7ce44-5e6c-4971-b829-54700cb6334f","Type":"ContainerDied","Data":"6de079cc2bb464468ef1408abbf5b65fc5771b1f360bfbfbcac40963b5df4e6d"} Nov 23 15:01:53 crc kubenswrapper[5050]: I1123 15:01:53.779213 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f81d-account-create-5bgqw" event={"ID":"0dc7ce44-5e6c-4971-b829-54700cb6334f","Type":"ContainerStarted","Data":"9f0131e5ef71ccafda692581a226cd88897a69b148e79712c5ccd03fc5f6145c"} Nov 23 15:01:54 crc kubenswrapper[5050]: I1123 15:01:54.794269 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf22bd66-89a1-426e-b976-5048e892fd0a","Type":"ContainerStarted","Data":"f80c7749236f5263982dab4bf87f3ce304251eab9531a2aa6d4a46707413ac9d"} Nov 23 15:01:54 crc kubenswrapper[5050]: I1123 15:01:54.794808 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bf22bd66-89a1-426e-b976-5048e892fd0a" containerName="ceilometer-central-agent" containerID="cri-o://6d8ca1a4fe2226502af880dfc1ca8adeeae1a4d077e080e11fbc07014425f0a5" gracePeriod=30 Nov 23 15:01:54 crc kubenswrapper[5050]: I1123 15:01:54.795107 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bf22bd66-89a1-426e-b976-5048e892fd0a" containerName="sg-core" containerID="cri-o://00bffdb05a2cea22a8013de3f7d37366b08b1085052fa214184fef486440f100" gracePeriod=30 Nov 23 15:01:54 crc kubenswrapper[5050]: I1123 15:01:54.795302 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bf22bd66-89a1-426e-b976-5048e892fd0a" containerName="proxy-httpd" containerID="cri-o://f80c7749236f5263982dab4bf87f3ce304251eab9531a2aa6d4a46707413ac9d" gracePeriod=30 Nov 23 15:01:54 crc kubenswrapper[5050]: I1123 15:01:54.795390 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bf22bd66-89a1-426e-b976-5048e892fd0a" containerName="ceilometer-notification-agent" containerID="cri-o://427daadd3a86480e5477fc5370f76629ddff3edb6271bef638fd9337073aaf25" gracePeriod=30 Nov 23 15:01:54 crc kubenswrapper[5050]: I1123 15:01:54.833942 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=7.8791527630000004 podStartE2EDuration="11.833920622s" podCreationTimestamp="2025-11-23 15:01:43 +0000 UTC" firstStartedPulling="2025-11-23 15:01:50.060894913 +0000 UTC m=+1205.227891418" lastFinishedPulling="2025-11-23 15:01:54.015662752 +0000 UTC m=+1209.182659277" observedRunningTime="2025-11-23 15:01:54.827075052 +0000 UTC m=+1209.994071537" watchObservedRunningTime="2025-11-23 15:01:54.833920622 +0000 UTC m=+1210.000917107" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.301246 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-hxh4h" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.385635 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8601165-154f-4167-a470-e06481c1944e-operator-scripts\") pod \"e8601165-154f-4167-a470-e06481c1944e\" (UID: \"e8601165-154f-4167-a470-e06481c1944e\") " Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.385986 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pzbsx\" (UniqueName: \"kubernetes.io/projected/e8601165-154f-4167-a470-e06481c1944e-kube-api-access-pzbsx\") pod \"e8601165-154f-4167-a470-e06481c1944e\" (UID: \"e8601165-154f-4167-a470-e06481c1944e\") " Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.387744 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e8601165-154f-4167-a470-e06481c1944e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e8601165-154f-4167-a470-e06481c1944e" (UID: "e8601165-154f-4167-a470-e06481c1944e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.396785 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8601165-154f-4167-a470-e06481c1944e-kube-api-access-pzbsx" (OuterVolumeSpecName: "kube-api-access-pzbsx") pod "e8601165-154f-4167-a470-e06481c1944e" (UID: "e8601165-154f-4167-a470-e06481c1944e"). InnerVolumeSpecName "kube-api-access-pzbsx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.488649 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pzbsx\" (UniqueName: \"kubernetes.io/projected/e8601165-154f-4167-a470-e06481c1944e-kube-api-access-pzbsx\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.489013 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8601165-154f-4167-a470-e06481c1944e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.512893 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-3052-account-create-ztg8b" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.542609 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-mwptv" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.552163 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f81d-account-create-5bgqw" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.573866 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-94ab-account-create-dt6k2" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.584904 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-rl7fq" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.589964 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e7a21645-b633-4073-a971-c8896b6dbc06-operator-scripts\") pod \"e7a21645-b633-4073-a971-c8896b6dbc06\" (UID: \"e7a21645-b633-4073-a971-c8896b6dbc06\") " Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.590358 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l745z\" (UniqueName: \"kubernetes.io/projected/e7a21645-b633-4073-a971-c8896b6dbc06-kube-api-access-l745z\") pod \"e7a21645-b633-4073-a971-c8896b6dbc06\" (UID: \"e7a21645-b633-4073-a971-c8896b6dbc06\") " Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.590840 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7a21645-b633-4073-a971-c8896b6dbc06-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e7a21645-b633-4073-a971-c8896b6dbc06" (UID: "e7a21645-b633-4073-a971-c8896b6dbc06"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.595642 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e7a21645-b633-4073-a971-c8896b6dbc06-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.598871 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7a21645-b633-4073-a971-c8896b6dbc06-kube-api-access-l745z" (OuterVolumeSpecName: "kube-api-access-l745z") pod "e7a21645-b633-4073-a971-c8896b6dbc06" (UID: "e7a21645-b633-4073-a971-c8896b6dbc06"). InnerVolumeSpecName "kube-api-access-l745z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.697309 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/344e218a-8f67-40b6-90e6-6970ca14cda3-operator-scripts\") pod \"344e218a-8f67-40b6-90e6-6970ca14cda3\" (UID: \"344e218a-8f67-40b6-90e6-6970ca14cda3\") " Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.697395 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/35ca6cdf-0126-49e9-a731-efa32a5e7edf-operator-scripts\") pod \"35ca6cdf-0126-49e9-a731-efa32a5e7edf\" (UID: \"35ca6cdf-0126-49e9-a731-efa32a5e7edf\") " Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.697468 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bw4nr\" (UniqueName: \"kubernetes.io/projected/344e218a-8f67-40b6-90e6-6970ca14cda3-kube-api-access-bw4nr\") pod \"344e218a-8f67-40b6-90e6-6970ca14cda3\" (UID: \"344e218a-8f67-40b6-90e6-6970ca14cda3\") " Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.697492 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-whqql\" (UniqueName: \"kubernetes.io/projected/35ca6cdf-0126-49e9-a731-efa32a5e7edf-kube-api-access-whqql\") pod \"35ca6cdf-0126-49e9-a731-efa32a5e7edf\" (UID: \"35ca6cdf-0126-49e9-a731-efa32a5e7edf\") " Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.697665 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0dc7ce44-5e6c-4971-b829-54700cb6334f-operator-scripts\") pod \"0dc7ce44-5e6c-4971-b829-54700cb6334f\" (UID: \"0dc7ce44-5e6c-4971-b829-54700cb6334f\") " Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.697757 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f4hcg\" (UniqueName: \"kubernetes.io/projected/0dc7ce44-5e6c-4971-b829-54700cb6334f-kube-api-access-f4hcg\") pod \"0dc7ce44-5e6c-4971-b829-54700cb6334f\" (UID: \"0dc7ce44-5e6c-4971-b829-54700cb6334f\") " Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.697891 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zfq9p\" (UniqueName: \"kubernetes.io/projected/c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a-kube-api-access-zfq9p\") pod \"c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a\" (UID: \"c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a\") " Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.697920 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a-operator-scripts\") pod \"c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a\" (UID: \"c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a\") " Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.698387 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l745z\" (UniqueName: \"kubernetes.io/projected/e7a21645-b633-4073-a971-c8896b6dbc06-kube-api-access-l745z\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.698726 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/344e218a-8f67-40b6-90e6-6970ca14cda3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "344e218a-8f67-40b6-90e6-6970ca14cda3" (UID: "344e218a-8f67-40b6-90e6-6970ca14cda3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.699812 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0dc7ce44-5e6c-4971-b829-54700cb6334f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0dc7ce44-5e6c-4971-b829-54700cb6334f" (UID: "0dc7ce44-5e6c-4971-b829-54700cb6334f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.699812 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/35ca6cdf-0126-49e9-a731-efa32a5e7edf-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "35ca6cdf-0126-49e9-a731-efa32a5e7edf" (UID: "35ca6cdf-0126-49e9-a731-efa32a5e7edf"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.699821 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a" (UID: "c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.702917 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0dc7ce44-5e6c-4971-b829-54700cb6334f-kube-api-access-f4hcg" (OuterVolumeSpecName: "kube-api-access-f4hcg") pod "0dc7ce44-5e6c-4971-b829-54700cb6334f" (UID: "0dc7ce44-5e6c-4971-b829-54700cb6334f"). InnerVolumeSpecName "kube-api-access-f4hcg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.704006 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a-kube-api-access-zfq9p" (OuterVolumeSpecName: "kube-api-access-zfq9p") pod "c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a" (UID: "c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a"). InnerVolumeSpecName "kube-api-access-zfq9p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.704585 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/344e218a-8f67-40b6-90e6-6970ca14cda3-kube-api-access-bw4nr" (OuterVolumeSpecName: "kube-api-access-bw4nr") pod "344e218a-8f67-40b6-90e6-6970ca14cda3" (UID: "344e218a-8f67-40b6-90e6-6970ca14cda3"). InnerVolumeSpecName "kube-api-access-bw4nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.704788 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35ca6cdf-0126-49e9-a731-efa32a5e7edf-kube-api-access-whqql" (OuterVolumeSpecName: "kube-api-access-whqql") pod "35ca6cdf-0126-49e9-a731-efa32a5e7edf" (UID: "35ca6cdf-0126-49e9-a731-efa32a5e7edf"). InnerVolumeSpecName "kube-api-access-whqql". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.800739 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0dc7ce44-5e6c-4971-b829-54700cb6334f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.800790 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f4hcg\" (UniqueName: \"kubernetes.io/projected/0dc7ce44-5e6c-4971-b829-54700cb6334f-kube-api-access-f4hcg\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.800808 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zfq9p\" (UniqueName: \"kubernetes.io/projected/c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a-kube-api-access-zfq9p\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.800822 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.800837 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/344e218a-8f67-40b6-90e6-6970ca14cda3-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.800846 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/35ca6cdf-0126-49e9-a731-efa32a5e7edf-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.800858 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bw4nr\" (UniqueName: \"kubernetes.io/projected/344e218a-8f67-40b6-90e6-6970ca14cda3-kube-api-access-bw4nr\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.800871 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-whqql\" (UniqueName: \"kubernetes.io/projected/35ca6cdf-0126-49e9-a731-efa32a5e7edf-kube-api-access-whqql\") on node \"crc\" DevicePath \"\"" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.820192 5050 generic.go:334] "Generic (PLEG): container finished" podID="bf22bd66-89a1-426e-b976-5048e892fd0a" containerID="f80c7749236f5263982dab4bf87f3ce304251eab9531a2aa6d4a46707413ac9d" exitCode=0 Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.820232 5050 generic.go:334] "Generic (PLEG): container finished" podID="bf22bd66-89a1-426e-b976-5048e892fd0a" containerID="00bffdb05a2cea22a8013de3f7d37366b08b1085052fa214184fef486440f100" exitCode=2 Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.820241 5050 generic.go:334] "Generic (PLEG): container finished" podID="bf22bd66-89a1-426e-b976-5048e892fd0a" containerID="427daadd3a86480e5477fc5370f76629ddff3edb6271bef638fd9337073aaf25" exitCode=0 Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.820296 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf22bd66-89a1-426e-b976-5048e892fd0a","Type":"ContainerDied","Data":"f80c7749236f5263982dab4bf87f3ce304251eab9531a2aa6d4a46707413ac9d"} Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.820329 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf22bd66-89a1-426e-b976-5048e892fd0a","Type":"ContainerDied","Data":"00bffdb05a2cea22a8013de3f7d37366b08b1085052fa214184fef486440f100"} Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.820344 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf22bd66-89a1-426e-b976-5048e892fd0a","Type":"ContainerDied","Data":"427daadd3a86480e5477fc5370f76629ddff3edb6271bef638fd9337073aaf25"} Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.822016 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-3052-account-create-ztg8b" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.822013 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-3052-account-create-ztg8b" event={"ID":"e7a21645-b633-4073-a971-c8896b6dbc06","Type":"ContainerDied","Data":"93974c9a4982c91e0914e3f59036a097a41433a76f7b376dd6130988f6cb3d3f"} Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.822069 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="93974c9a4982c91e0914e3f59036a097a41433a76f7b376dd6130988f6cb3d3f" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.823803 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f81d-account-create-5bgqw" event={"ID":"0dc7ce44-5e6c-4971-b829-54700cb6334f","Type":"ContainerDied","Data":"9f0131e5ef71ccafda692581a226cd88897a69b148e79712c5ccd03fc5f6145c"} Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.823838 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9f0131e5ef71ccafda692581a226cd88897a69b148e79712c5ccd03fc5f6145c" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.823844 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f81d-account-create-5bgqw" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.825632 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-mwptv" event={"ID":"c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a","Type":"ContainerDied","Data":"6a374cc028001032f0d3016f7682900288a52b2d2c1cbe1667c32ec4ebaa2a40"} Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.825749 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6a374cc028001032f0d3016f7682900288a52b2d2c1cbe1667c32ec4ebaa2a40" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.825916 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-mwptv" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.831157 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-hxh4h" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.831414 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-hxh4h" event={"ID":"e8601165-154f-4167-a470-e06481c1944e","Type":"ContainerDied","Data":"06f4e322879604ce15fa9919d05f7977caa6bb0045cb22936d9dc4a5c4af7e30"} Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.831471 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="06f4e322879604ce15fa9919d05f7977caa6bb0045cb22936d9dc4a5c4af7e30" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.837057 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-rl7fq" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.837046 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-rl7fq" event={"ID":"35ca6cdf-0126-49e9-a731-efa32a5e7edf","Type":"ContainerDied","Data":"1cbc679703ac9ee301570bd1f6fd44d5dcab81094567d2548567af41d22c5cf6"} Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.837403 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1cbc679703ac9ee301570bd1f6fd44d5dcab81094567d2548567af41d22c5cf6" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.839410 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-94ab-account-create-dt6k2" event={"ID":"344e218a-8f67-40b6-90e6-6970ca14cda3","Type":"ContainerDied","Data":"80f990119c44c97da89ab94bbf932898922eae45388641cded4e7533c9901af5"} Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.839537 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="80f990119c44c97da89ab94bbf932898922eae45388641cded4e7533c9901af5" Nov 23 15:01:55 crc kubenswrapper[5050]: I1123 15:01:55.839597 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-94ab-account-create-dt6k2" Nov 23 15:01:56 crc kubenswrapper[5050]: I1123 15:01:56.182482 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 15:01:56 crc kubenswrapper[5050]: I1123 15:01:56.183501 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="83a846d6-b03f-4bc1-ac31-cacf0ee96658" containerName="glance-log" containerID="cri-o://4eaad0b4b2774c58b0ed715e526bba7629a4da67f0b876ef9fe855cfffd60bbf" gracePeriod=30 Nov 23 15:01:56 crc kubenswrapper[5050]: I1123 15:01:56.183663 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="83a846d6-b03f-4bc1-ac31-cacf0ee96658" containerName="glance-httpd" containerID="cri-o://361f0f712cdb56c1a734658f4960db8d2ba0252abc0913de0ab85035acfe8a7b" gracePeriod=30 Nov 23 15:01:56 crc kubenswrapper[5050]: E1123 15:01:56.317252 5050 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod83a846d6_b03f_4bc1_ac31_cacf0ee96658.slice/crio-4eaad0b4b2774c58b0ed715e526bba7629a4da67f0b876ef9fe855cfffd60bbf.scope\": RecentStats: unable to find data in memory cache]" Nov 23 15:01:56 crc kubenswrapper[5050]: I1123 15:01:56.853849 5050 generic.go:334] "Generic (PLEG): container finished" podID="83a846d6-b03f-4bc1-ac31-cacf0ee96658" containerID="4eaad0b4b2774c58b0ed715e526bba7629a4da67f0b876ef9fe855cfffd60bbf" exitCode=143 Nov 23 15:01:56 crc kubenswrapper[5050]: I1123 15:01:56.853920 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"83a846d6-b03f-4bc1-ac31-cacf0ee96658","Type":"ContainerDied","Data":"4eaad0b4b2774c58b0ed715e526bba7629a4da67f0b876ef9fe855cfffd60bbf"} Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.074525 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-dpm95"] Nov 23 15:01:57 crc kubenswrapper[5050]: E1123 15:01:57.075508 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0dc7ce44-5e6c-4971-b829-54700cb6334f" containerName="mariadb-account-create" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.075529 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="0dc7ce44-5e6c-4971-b829-54700cb6334f" containerName="mariadb-account-create" Nov 23 15:01:57 crc kubenswrapper[5050]: E1123 15:01:57.075544 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8601165-154f-4167-a470-e06481c1944e" containerName="mariadb-database-create" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.075556 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8601165-154f-4167-a470-e06481c1944e" containerName="mariadb-database-create" Nov 23 15:01:57 crc kubenswrapper[5050]: E1123 15:01:57.075583 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="344e218a-8f67-40b6-90e6-6970ca14cda3" containerName="mariadb-account-create" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.075589 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="344e218a-8f67-40b6-90e6-6970ca14cda3" containerName="mariadb-account-create" Nov 23 15:01:57 crc kubenswrapper[5050]: E1123 15:01:57.075600 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7a21645-b633-4073-a971-c8896b6dbc06" containerName="mariadb-account-create" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.075606 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7a21645-b633-4073-a971-c8896b6dbc06" containerName="mariadb-account-create" Nov 23 15:01:57 crc kubenswrapper[5050]: E1123 15:01:57.075628 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35ca6cdf-0126-49e9-a731-efa32a5e7edf" containerName="mariadb-database-create" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.075635 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="35ca6cdf-0126-49e9-a731-efa32a5e7edf" containerName="mariadb-database-create" Nov 23 15:01:57 crc kubenswrapper[5050]: E1123 15:01:57.075654 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a" containerName="mariadb-database-create" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.075663 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a" containerName="mariadb-database-create" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.075843 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a" containerName="mariadb-database-create" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.075868 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="35ca6cdf-0126-49e9-a731-efa32a5e7edf" containerName="mariadb-database-create" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.075881 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8601165-154f-4167-a470-e06481c1944e" containerName="mariadb-database-create" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.075893 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="344e218a-8f67-40b6-90e6-6970ca14cda3" containerName="mariadb-account-create" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.075904 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="0dc7ce44-5e6c-4971-b829-54700cb6334f" containerName="mariadb-account-create" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.075916 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7a21645-b633-4073-a971-c8896b6dbc06" containerName="mariadb-account-create" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.076702 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-dpm95" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.081356 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.081588 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-qdpxk" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.081831 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.095316 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-dpm95"] Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.125460 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-dpm95\" (UID: \"8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c\") " pod="openstack/nova-cell0-conductor-db-sync-dpm95" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.125823 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rlc2s\" (UniqueName: \"kubernetes.io/projected/8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c-kube-api-access-rlc2s\") pod \"nova-cell0-conductor-db-sync-dpm95\" (UID: \"8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c\") " pod="openstack/nova-cell0-conductor-db-sync-dpm95" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.125945 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c-config-data\") pod \"nova-cell0-conductor-db-sync-dpm95\" (UID: \"8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c\") " pod="openstack/nova-cell0-conductor-db-sync-dpm95" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.126030 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c-scripts\") pod \"nova-cell0-conductor-db-sync-dpm95\" (UID: \"8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c\") " pod="openstack/nova-cell0-conductor-db-sync-dpm95" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.262910 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c-scripts\") pod \"nova-cell0-conductor-db-sync-dpm95\" (UID: \"8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c\") " pod="openstack/nova-cell0-conductor-db-sync-dpm95" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.263392 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-dpm95\" (UID: \"8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c\") " pod="openstack/nova-cell0-conductor-db-sync-dpm95" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.263531 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rlc2s\" (UniqueName: \"kubernetes.io/projected/8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c-kube-api-access-rlc2s\") pod \"nova-cell0-conductor-db-sync-dpm95\" (UID: \"8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c\") " pod="openstack/nova-cell0-conductor-db-sync-dpm95" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.264662 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c-config-data\") pod \"nova-cell0-conductor-db-sync-dpm95\" (UID: \"8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c\") " pod="openstack/nova-cell0-conductor-db-sync-dpm95" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.276234 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c-config-data\") pod \"nova-cell0-conductor-db-sync-dpm95\" (UID: \"8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c\") " pod="openstack/nova-cell0-conductor-db-sync-dpm95" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.276508 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c-scripts\") pod \"nova-cell0-conductor-db-sync-dpm95\" (UID: \"8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c\") " pod="openstack/nova-cell0-conductor-db-sync-dpm95" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.276954 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-dpm95\" (UID: \"8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c\") " pod="openstack/nova-cell0-conductor-db-sync-dpm95" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.283204 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rlc2s\" (UniqueName: \"kubernetes.io/projected/8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c-kube-api-access-rlc2s\") pod \"nova-cell0-conductor-db-sync-dpm95\" (UID: \"8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c\") " pod="openstack/nova-cell0-conductor-db-sync-dpm95" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.445592 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-dpm95" Nov 23 15:01:57 crc kubenswrapper[5050]: I1123 15:01:57.925686 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-dpm95"] Nov 23 15:01:57 crc kubenswrapper[5050]: W1123 15:01:57.937489 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8d6f5af2_c95a_47c1_b9c3_2d13eea56e9c.slice/crio-313decd7392548ebdbc72d5f6af7648bcbdce0351cee6f4d5dcc48bf78402d4d WatchSource:0}: Error finding container 313decd7392548ebdbc72d5f6af7648bcbdce0351cee6f4d5dcc48bf78402d4d: Status 404 returned error can't find the container with id 313decd7392548ebdbc72d5f6af7648bcbdce0351cee6f4d5dcc48bf78402d4d Nov 23 15:01:58 crc kubenswrapper[5050]: I1123 15:01:58.388952 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 15:01:58 crc kubenswrapper[5050]: I1123 15:01:58.389681 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="8d700d7b-7ad1-41bc-a0fe-a60f106ae33b" containerName="glance-log" containerID="cri-o://a8481807947b6125b632d234b7c09c019ca4e117d153545ff0b1bfd657c9904e" gracePeriod=30 Nov 23 15:01:58 crc kubenswrapper[5050]: I1123 15:01:58.389833 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="8d700d7b-7ad1-41bc-a0fe-a60f106ae33b" containerName="glance-httpd" containerID="cri-o://7c37b6da7c1af7beb9189416f11bacec18531c7a243fc6250c6f74bebc17d8c8" gracePeriod=30 Nov 23 15:01:58 crc kubenswrapper[5050]: I1123 15:01:58.886479 5050 generic.go:334] "Generic (PLEG): container finished" podID="8d700d7b-7ad1-41bc-a0fe-a60f106ae33b" containerID="a8481807947b6125b632d234b7c09c019ca4e117d153545ff0b1bfd657c9904e" exitCode=143 Nov 23 15:01:58 crc kubenswrapper[5050]: I1123 15:01:58.886591 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b","Type":"ContainerDied","Data":"a8481807947b6125b632d234b7c09c019ca4e117d153545ff0b1bfd657c9904e"} Nov 23 15:01:58 crc kubenswrapper[5050]: I1123 15:01:58.890646 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-dpm95" event={"ID":"8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c","Type":"ContainerStarted","Data":"313decd7392548ebdbc72d5f6af7648bcbdce0351cee6f4d5dcc48bf78402d4d"} Nov 23 15:01:59 crc kubenswrapper[5050]: I1123 15:01:59.913738 5050 generic.go:334] "Generic (PLEG): container finished" podID="83a846d6-b03f-4bc1-ac31-cacf0ee96658" containerID="361f0f712cdb56c1a734658f4960db8d2ba0252abc0913de0ab85035acfe8a7b" exitCode=0 Nov 23 15:01:59 crc kubenswrapper[5050]: I1123 15:01:59.913976 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"83a846d6-b03f-4bc1-ac31-cacf0ee96658","Type":"ContainerDied","Data":"361f0f712cdb56c1a734658f4960db8d2ba0252abc0913de0ab85035acfe8a7b"} Nov 23 15:01:59 crc kubenswrapper[5050]: I1123 15:01:59.914251 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"83a846d6-b03f-4bc1-ac31-cacf0ee96658","Type":"ContainerDied","Data":"df108e38b00fd595113f100be472d8632ad509e0b3b11d89b353d918c4ca4e8c"} Nov 23 15:01:59 crc kubenswrapper[5050]: I1123 15:01:59.914273 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="df108e38b00fd595113f100be472d8632ad509e0b3b11d89b353d918c4ca4e8c" Nov 23 15:01:59 crc kubenswrapper[5050]: I1123 15:01:59.976543 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 23 15:02:00 crc kubenswrapper[5050]: I1123 15:02:00.126526 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83a846d6-b03f-4bc1-ac31-cacf0ee96658-combined-ca-bundle\") pod \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " Nov 23 15:02:00 crc kubenswrapper[5050]: I1123 15:02:00.126604 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83a846d6-b03f-4bc1-ac31-cacf0ee96658-scripts\") pod \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " Nov 23 15:02:00 crc kubenswrapper[5050]: I1123 15:02:00.126649 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/83a846d6-b03f-4bc1-ac31-cacf0ee96658-httpd-run\") pod \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " Nov 23 15:02:00 crc kubenswrapper[5050]: I1123 15:02:00.126699 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83a846d6-b03f-4bc1-ac31-cacf0ee96658-config-data\") pod \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " Nov 23 15:02:00 crc kubenswrapper[5050]: I1123 15:02:00.126742 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjdj6\" (UniqueName: \"kubernetes.io/projected/83a846d6-b03f-4bc1-ac31-cacf0ee96658-kube-api-access-zjdj6\") pod \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " Nov 23 15:02:00 crc kubenswrapper[5050]: I1123 15:02:00.126836 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/83a846d6-b03f-4bc1-ac31-cacf0ee96658-public-tls-certs\") pod \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " Nov 23 15:02:00 crc kubenswrapper[5050]: I1123 15:02:00.126859 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " Nov 23 15:02:00 crc kubenswrapper[5050]: I1123 15:02:00.126905 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/83a846d6-b03f-4bc1-ac31-cacf0ee96658-logs\") pod \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\" (UID: \"83a846d6-b03f-4bc1-ac31-cacf0ee96658\") " Nov 23 15:02:00 crc kubenswrapper[5050]: I1123 15:02:00.128386 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83a846d6-b03f-4bc1-ac31-cacf0ee96658-logs" (OuterVolumeSpecName: "logs") pod "83a846d6-b03f-4bc1-ac31-cacf0ee96658" (UID: "83a846d6-b03f-4bc1-ac31-cacf0ee96658"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:02:00 crc kubenswrapper[5050]: I1123 15:02:00.129997 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83a846d6-b03f-4bc1-ac31-cacf0ee96658-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "83a846d6-b03f-4bc1-ac31-cacf0ee96658" (UID: "83a846d6-b03f-4bc1-ac31-cacf0ee96658"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:02:00 crc kubenswrapper[5050]: I1123 15:02:00.148746 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "83a846d6-b03f-4bc1-ac31-cacf0ee96658" (UID: "83a846d6-b03f-4bc1-ac31-cacf0ee96658"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 23 15:02:00 crc kubenswrapper[5050]: I1123 15:02:00.152006 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83a846d6-b03f-4bc1-ac31-cacf0ee96658-scripts" (OuterVolumeSpecName: "scripts") pod "83a846d6-b03f-4bc1-ac31-cacf0ee96658" (UID: "83a846d6-b03f-4bc1-ac31-cacf0ee96658"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:00 crc kubenswrapper[5050]: I1123 15:02:00.158359 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83a846d6-b03f-4bc1-ac31-cacf0ee96658-kube-api-access-zjdj6" (OuterVolumeSpecName: "kube-api-access-zjdj6") pod "83a846d6-b03f-4bc1-ac31-cacf0ee96658" (UID: "83a846d6-b03f-4bc1-ac31-cacf0ee96658"). InnerVolumeSpecName "kube-api-access-zjdj6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:02:00 crc kubenswrapper[5050]: I1123 15:02:00.191343 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83a846d6-b03f-4bc1-ac31-cacf0ee96658-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "83a846d6-b03f-4bc1-ac31-cacf0ee96658" (UID: "83a846d6-b03f-4bc1-ac31-cacf0ee96658"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:00 crc kubenswrapper[5050]: I1123 15:02:00.222595 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83a846d6-b03f-4bc1-ac31-cacf0ee96658-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "83a846d6-b03f-4bc1-ac31-cacf0ee96658" (UID: "83a846d6-b03f-4bc1-ac31-cacf0ee96658"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:00 crc kubenswrapper[5050]: I1123 15:02:00.229152 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83a846d6-b03f-4bc1-ac31-cacf0ee96658-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:00 crc kubenswrapper[5050]: I1123 15:02:00.229189 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83a846d6-b03f-4bc1-ac31-cacf0ee96658-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:00 crc kubenswrapper[5050]: I1123 15:02:00.229205 5050 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/83a846d6-b03f-4bc1-ac31-cacf0ee96658-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:00 crc kubenswrapper[5050]: I1123 15:02:00.229222 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjdj6\" (UniqueName: \"kubernetes.io/projected/83a846d6-b03f-4bc1-ac31-cacf0ee96658-kube-api-access-zjdj6\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:00 crc kubenswrapper[5050]: I1123 15:02:00.229263 5050 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 23 15:02:00 crc kubenswrapper[5050]: I1123 15:02:00.229273 5050 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/83a846d6-b03f-4bc1-ac31-cacf0ee96658-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:00 crc kubenswrapper[5050]: I1123 15:02:00.229283 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/83a846d6-b03f-4bc1-ac31-cacf0ee96658-logs\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:00 crc kubenswrapper[5050]: I1123 15:02:00.249744 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83a846d6-b03f-4bc1-ac31-cacf0ee96658-config-data" (OuterVolumeSpecName: "config-data") pod "83a846d6-b03f-4bc1-ac31-cacf0ee96658" (UID: "83a846d6-b03f-4bc1-ac31-cacf0ee96658"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:00 crc kubenswrapper[5050]: I1123 15:02:00.251960 5050 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 23 15:02:00 crc kubenswrapper[5050]: I1123 15:02:00.330945 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83a846d6-b03f-4bc1-ac31-cacf0ee96658-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:00 crc kubenswrapper[5050]: I1123 15:02:00.330974 5050 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:00 crc kubenswrapper[5050]: I1123 15:02:00.923190 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.001880 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.013180 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.024652 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 15:02:01 crc kubenswrapper[5050]: E1123 15:02:01.025333 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83a846d6-b03f-4bc1-ac31-cacf0ee96658" containerName="glance-httpd" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.025363 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="83a846d6-b03f-4bc1-ac31-cacf0ee96658" containerName="glance-httpd" Nov 23 15:02:01 crc kubenswrapper[5050]: E1123 15:02:01.025391 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83a846d6-b03f-4bc1-ac31-cacf0ee96658" containerName="glance-log" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.025401 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="83a846d6-b03f-4bc1-ac31-cacf0ee96658" containerName="glance-log" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.025702 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="83a846d6-b03f-4bc1-ac31-cacf0ee96658" containerName="glance-httpd" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.025721 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="83a846d6-b03f-4bc1-ac31-cacf0ee96658" containerName="glance-log" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.027143 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.034192 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.035114 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.040387 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.152748 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.153212 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-scripts\") pod \"glance-default-external-api-0\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.153306 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-config-data\") pod \"glance-default-external-api-0\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.153404 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-logs\") pod \"glance-default-external-api-0\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.153601 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.153725 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.154052 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.154224 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z59db\" (UniqueName: \"kubernetes.io/projected/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-kube-api-access-z59db\") pod \"glance-default-external-api-0\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.257424 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.256818 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.264607 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z59db\" (UniqueName: \"kubernetes.io/projected/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-kube-api-access-z59db\") pod \"glance-default-external-api-0\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.264996 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.265113 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-scripts\") pod \"glance-default-external-api-0\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.265135 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-config-data\") pod \"glance-default-external-api-0\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.265190 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-logs\") pod \"glance-default-external-api-0\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.265227 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.265247 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.266575 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-logs\") pod \"glance-default-external-api-0\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.268669 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.271210 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-scripts\") pod \"glance-default-external-api-0\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.271563 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.272360 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-config-data\") pod \"glance-default-external-api-0\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.279949 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.287883 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z59db\" (UniqueName: \"kubernetes.io/projected/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-kube-api-access-z59db\") pod \"glance-default-external-api-0\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.328993 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.372990 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.569238 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83a846d6-b03f-4bc1-ac31-cacf0ee96658" path="/var/lib/kubelet/pods/83a846d6-b03f-4bc1-ac31-cacf0ee96658/volumes" Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.972583 5050 generic.go:334] "Generic (PLEG): container finished" podID="8d700d7b-7ad1-41bc-a0fe-a60f106ae33b" containerID="7c37b6da7c1af7beb9189416f11bacec18531c7a243fc6250c6f74bebc17d8c8" exitCode=0 Nov 23 15:02:01 crc kubenswrapper[5050]: I1123 15:02:01.972642 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b","Type":"ContainerDied","Data":"7c37b6da7c1af7beb9189416f11bacec18531c7a243fc6250c6f74bebc17d8c8"} Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.092221 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.385985 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.493543 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-combined-ca-bundle\") pod \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.493704 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-scripts\") pod \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.493807 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-config-data\") pod \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.493976 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8hp4\" (UniqueName: \"kubernetes.io/projected/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-kube-api-access-h8hp4\") pod \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.494031 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-httpd-run\") pod \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.494096 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.494139 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-logs\") pod \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.494233 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-internal-tls-certs\") pod \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\" (UID: \"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b\") " Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.496290 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "8d700d7b-7ad1-41bc-a0fe-a60f106ae33b" (UID: "8d700d7b-7ad1-41bc-a0fe-a60f106ae33b"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.496316 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-logs" (OuterVolumeSpecName: "logs") pod "8d700d7b-7ad1-41bc-a0fe-a60f106ae33b" (UID: "8d700d7b-7ad1-41bc-a0fe-a60f106ae33b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.506508 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-kube-api-access-h8hp4" (OuterVolumeSpecName: "kube-api-access-h8hp4") pod "8d700d7b-7ad1-41bc-a0fe-a60f106ae33b" (UID: "8d700d7b-7ad1-41bc-a0fe-a60f106ae33b"). InnerVolumeSpecName "kube-api-access-h8hp4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.506585 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-scripts" (OuterVolumeSpecName: "scripts") pod "8d700d7b-7ad1-41bc-a0fe-a60f106ae33b" (UID: "8d700d7b-7ad1-41bc-a0fe-a60f106ae33b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.506704 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "glance") pod "8d700d7b-7ad1-41bc-a0fe-a60f106ae33b" (UID: "8d700d7b-7ad1-41bc-a0fe-a60f106ae33b"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.562504 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8d700d7b-7ad1-41bc-a0fe-a60f106ae33b" (UID: "8d700d7b-7ad1-41bc-a0fe-a60f106ae33b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.582911 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "8d700d7b-7ad1-41bc-a0fe-a60f106ae33b" (UID: "8d700d7b-7ad1-41bc-a0fe-a60f106ae33b"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.597689 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.597739 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.597749 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8hp4\" (UniqueName: \"kubernetes.io/projected/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-kube-api-access-h8hp4\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.597761 5050 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.597792 5050 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.597803 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-logs\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.597812 5050 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.623435 5050 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.643852 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-config-data" (OuterVolumeSpecName: "config-data") pod "8d700d7b-7ad1-41bc-a0fe-a60f106ae33b" (UID: "8d700d7b-7ad1-41bc-a0fe-a60f106ae33b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.700984 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.701019 5050 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.984825 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8d700d7b-7ad1-41bc-a0fe-a60f106ae33b","Type":"ContainerDied","Data":"b8d222eba1709a414d9b5d966984f29435629b884da50f63cde094aea412d09f"} Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.985088 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.985321 5050 scope.go:117] "RemoveContainer" containerID="7c37b6da7c1af7beb9189416f11bacec18531c7a243fc6250c6f74bebc17d8c8" Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.999591 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322","Type":"ContainerStarted","Data":"9f6dbbcaa59ff563c11d0cdfecbaa890da1c52e228d3d8a74b64182f665c6508"} Nov 23 15:02:02 crc kubenswrapper[5050]: I1123 15:02:02.999650 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322","Type":"ContainerStarted","Data":"695d9f05d4e86563b322744876bb859b745d0d12fe1faf2a90694532e039fc7f"} Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.005783 5050 generic.go:334] "Generic (PLEG): container finished" podID="bf22bd66-89a1-426e-b976-5048e892fd0a" containerID="6d8ca1a4fe2226502af880dfc1ca8adeeae1a4d077e080e11fbc07014425f0a5" exitCode=0 Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.005841 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf22bd66-89a1-426e-b976-5048e892fd0a","Type":"ContainerDied","Data":"6d8ca1a4fe2226502af880dfc1ca8adeeae1a4d077e080e11fbc07014425f0a5"} Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.026003 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.037467 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.053058 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 15:02:03 crc kubenswrapper[5050]: E1123 15:02:03.053790 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d700d7b-7ad1-41bc-a0fe-a60f106ae33b" containerName="glance-httpd" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.053810 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d700d7b-7ad1-41bc-a0fe-a60f106ae33b" containerName="glance-httpd" Nov 23 15:02:03 crc kubenswrapper[5050]: E1123 15:02:03.053834 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d700d7b-7ad1-41bc-a0fe-a60f106ae33b" containerName="glance-log" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.053860 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d700d7b-7ad1-41bc-a0fe-a60f106ae33b" containerName="glance-log" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.054127 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d700d7b-7ad1-41bc-a0fe-a60f106ae33b" containerName="glance-log" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.054899 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d700d7b-7ad1-41bc-a0fe-a60f106ae33b" containerName="glance-httpd" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.067329 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.068021 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.073703 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.074860 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.213576 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.213723 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.213751 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.213787 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x24jh\" (UniqueName: \"kubernetes.io/projected/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-kube-api-access-x24jh\") pod \"glance-default-internal-api-0\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.213818 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.214044 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.214096 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-logs\") pod \"glance-default-internal-api-0\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.214145 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.316191 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.316242 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.316266 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x24jh\" (UniqueName: \"kubernetes.io/projected/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-kube-api-access-x24jh\") pod \"glance-default-internal-api-0\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.316300 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.316338 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.316353 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-logs\") pod \"glance-default-internal-api-0\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.316371 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.316435 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.317850 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/glance-default-internal-api-0" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.317895 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.317991 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-logs\") pod \"glance-default-internal-api-0\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.328048 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.330678 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.330775 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.337030 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.365579 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x24jh\" (UniqueName: \"kubernetes.io/projected/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-kube-api-access-x24jh\") pod \"glance-default-internal-api-0\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.393902 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " pod="openstack/glance-default-internal-api-0" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.564692 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d700d7b-7ad1-41bc-a0fe-a60f106ae33b" path="/var/lib/kubelet/pods/8d700d7b-7ad1-41bc-a0fe-a60f106ae33b/volumes" Nov 23 15:02:03 crc kubenswrapper[5050]: I1123 15:02:03.695110 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 23 15:02:07 crc kubenswrapper[5050]: I1123 15:02:07.719184 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 15:02:07 crc kubenswrapper[5050]: I1123 15:02:07.821118 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2rczq\" (UniqueName: \"kubernetes.io/projected/bf22bd66-89a1-426e-b976-5048e892fd0a-kube-api-access-2rczq\") pod \"bf22bd66-89a1-426e-b976-5048e892fd0a\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " Nov 23 15:02:07 crc kubenswrapper[5050]: I1123 15:02:07.821295 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf22bd66-89a1-426e-b976-5048e892fd0a-config-data\") pod \"bf22bd66-89a1-426e-b976-5048e892fd0a\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " Nov 23 15:02:07 crc kubenswrapper[5050]: I1123 15:02:07.822159 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bf22bd66-89a1-426e-b976-5048e892fd0a-sg-core-conf-yaml\") pod \"bf22bd66-89a1-426e-b976-5048e892fd0a\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " Nov 23 15:02:07 crc kubenswrapper[5050]: I1123 15:02:07.822291 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf22bd66-89a1-426e-b976-5048e892fd0a-combined-ca-bundle\") pod \"bf22bd66-89a1-426e-b976-5048e892fd0a\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " Nov 23 15:02:07 crc kubenswrapper[5050]: I1123 15:02:07.822429 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf22bd66-89a1-426e-b976-5048e892fd0a-run-httpd\") pod \"bf22bd66-89a1-426e-b976-5048e892fd0a\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " Nov 23 15:02:07 crc kubenswrapper[5050]: I1123 15:02:07.822486 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf22bd66-89a1-426e-b976-5048e892fd0a-log-httpd\") pod \"bf22bd66-89a1-426e-b976-5048e892fd0a\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " Nov 23 15:02:07 crc kubenswrapper[5050]: I1123 15:02:07.822538 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf22bd66-89a1-426e-b976-5048e892fd0a-scripts\") pod \"bf22bd66-89a1-426e-b976-5048e892fd0a\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " Nov 23 15:02:07 crc kubenswrapper[5050]: I1123 15:02:07.823324 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf22bd66-89a1-426e-b976-5048e892fd0a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "bf22bd66-89a1-426e-b976-5048e892fd0a" (UID: "bf22bd66-89a1-426e-b976-5048e892fd0a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:02:07 crc kubenswrapper[5050]: I1123 15:02:07.823579 5050 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf22bd66-89a1-426e-b976-5048e892fd0a-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:07 crc kubenswrapper[5050]: I1123 15:02:07.823639 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf22bd66-89a1-426e-b976-5048e892fd0a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "bf22bd66-89a1-426e-b976-5048e892fd0a" (UID: "bf22bd66-89a1-426e-b976-5048e892fd0a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:02:07 crc kubenswrapper[5050]: I1123 15:02:07.826604 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf22bd66-89a1-426e-b976-5048e892fd0a-kube-api-access-2rczq" (OuterVolumeSpecName: "kube-api-access-2rczq") pod "bf22bd66-89a1-426e-b976-5048e892fd0a" (UID: "bf22bd66-89a1-426e-b976-5048e892fd0a"). InnerVolumeSpecName "kube-api-access-2rczq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:02:07 crc kubenswrapper[5050]: I1123 15:02:07.858908 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf22bd66-89a1-426e-b976-5048e892fd0a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "bf22bd66-89a1-426e-b976-5048e892fd0a" (UID: "bf22bd66-89a1-426e-b976-5048e892fd0a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:07 crc kubenswrapper[5050]: I1123 15:02:07.859049 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf22bd66-89a1-426e-b976-5048e892fd0a-scripts" (OuterVolumeSpecName: "scripts") pod "bf22bd66-89a1-426e-b976-5048e892fd0a" (UID: "bf22bd66-89a1-426e-b976-5048e892fd0a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:07 crc kubenswrapper[5050]: I1123 15:02:07.923916 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf22bd66-89a1-426e-b976-5048e892fd0a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bf22bd66-89a1-426e-b976-5048e892fd0a" (UID: "bf22bd66-89a1-426e-b976-5048e892fd0a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:07 crc kubenswrapper[5050]: I1123 15:02:07.925006 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf22bd66-89a1-426e-b976-5048e892fd0a-combined-ca-bundle\") pod \"bf22bd66-89a1-426e-b976-5048e892fd0a\" (UID: \"bf22bd66-89a1-426e-b976-5048e892fd0a\") " Nov 23 15:02:07 crc kubenswrapper[5050]: I1123 15:02:07.925694 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf22bd66-89a1-426e-b976-5048e892fd0a-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:07 crc kubenswrapper[5050]: I1123 15:02:07.925717 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2rczq\" (UniqueName: \"kubernetes.io/projected/bf22bd66-89a1-426e-b976-5048e892fd0a-kube-api-access-2rczq\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:07 crc kubenswrapper[5050]: I1123 15:02:07.925735 5050 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bf22bd66-89a1-426e-b976-5048e892fd0a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:07 crc kubenswrapper[5050]: I1123 15:02:07.925752 5050 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bf22bd66-89a1-426e-b976-5048e892fd0a-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:07 crc kubenswrapper[5050]: W1123 15:02:07.925855 5050 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/bf22bd66-89a1-426e-b976-5048e892fd0a/volumes/kubernetes.io~secret/combined-ca-bundle Nov 23 15:02:07 crc kubenswrapper[5050]: I1123 15:02:07.925873 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf22bd66-89a1-426e-b976-5048e892fd0a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bf22bd66-89a1-426e-b976-5048e892fd0a" (UID: "bf22bd66-89a1-426e-b976-5048e892fd0a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:07 crc kubenswrapper[5050]: I1123 15:02:07.951165 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf22bd66-89a1-426e-b976-5048e892fd0a-config-data" (OuterVolumeSpecName: "config-data") pod "bf22bd66-89a1-426e-b976-5048e892fd0a" (UID: "bf22bd66-89a1-426e-b976-5048e892fd0a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.027821 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf22bd66-89a1-426e-b976-5048e892fd0a-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.027866 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf22bd66-89a1-426e-b976-5048e892fd0a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.067996 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bf22bd66-89a1-426e-b976-5048e892fd0a","Type":"ContainerDied","Data":"346c65a9631af3081b025ae1e6d22181f7b71a8b4f1d4af1179e4b500c7e33e5"} Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.068117 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.114771 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.128847 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.138075 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:02:08 crc kubenswrapper[5050]: E1123 15:02:08.138567 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf22bd66-89a1-426e-b976-5048e892fd0a" containerName="proxy-httpd" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.138589 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf22bd66-89a1-426e-b976-5048e892fd0a" containerName="proxy-httpd" Nov 23 15:02:08 crc kubenswrapper[5050]: E1123 15:02:08.138606 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf22bd66-89a1-426e-b976-5048e892fd0a" containerName="ceilometer-notification-agent" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.138615 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf22bd66-89a1-426e-b976-5048e892fd0a" containerName="ceilometer-notification-agent" Nov 23 15:02:08 crc kubenswrapper[5050]: E1123 15:02:08.138633 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf22bd66-89a1-426e-b976-5048e892fd0a" containerName="sg-core" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.138640 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf22bd66-89a1-426e-b976-5048e892fd0a" containerName="sg-core" Nov 23 15:02:08 crc kubenswrapper[5050]: E1123 15:02:08.138663 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf22bd66-89a1-426e-b976-5048e892fd0a" containerName="ceilometer-central-agent" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.138671 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf22bd66-89a1-426e-b976-5048e892fd0a" containerName="ceilometer-central-agent" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.138876 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf22bd66-89a1-426e-b976-5048e892fd0a" containerName="ceilometer-notification-agent" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.138890 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf22bd66-89a1-426e-b976-5048e892fd0a" containerName="ceilometer-central-agent" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.138908 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf22bd66-89a1-426e-b976-5048e892fd0a" containerName="sg-core" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.138929 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf22bd66-89a1-426e-b976-5048e892fd0a" containerName="proxy-httpd" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.143520 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.147988 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.148425 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.152131 5050 scope.go:117] "RemoveContainer" containerID="a8481807947b6125b632d234b7c09c019ca4e117d153545ff0b1bfd657c9904e" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.198266 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.237600 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-log-httpd\") pod \"ceilometer-0\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " pod="openstack/ceilometer-0" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.237701 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " pod="openstack/ceilometer-0" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.237752 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-run-httpd\") pod \"ceilometer-0\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " pod="openstack/ceilometer-0" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.237922 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n886n\" (UniqueName: \"kubernetes.io/projected/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-kube-api-access-n886n\") pod \"ceilometer-0\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " pod="openstack/ceilometer-0" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.238095 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-scripts\") pod \"ceilometer-0\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " pod="openstack/ceilometer-0" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.238247 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " pod="openstack/ceilometer-0" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.238294 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-config-data\") pod \"ceilometer-0\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " pod="openstack/ceilometer-0" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.241957 5050 scope.go:117] "RemoveContainer" containerID="f80c7749236f5263982dab4bf87f3ce304251eab9531a2aa6d4a46707413ac9d" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.283326 5050 scope.go:117] "RemoveContainer" containerID="00bffdb05a2cea22a8013de3f7d37366b08b1085052fa214184fef486440f100" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.321530 5050 scope.go:117] "RemoveContainer" containerID="427daadd3a86480e5477fc5370f76629ddff3edb6271bef638fd9337073aaf25" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.343592 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-run-httpd\") pod \"ceilometer-0\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " pod="openstack/ceilometer-0" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.343698 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n886n\" (UniqueName: \"kubernetes.io/projected/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-kube-api-access-n886n\") pod \"ceilometer-0\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " pod="openstack/ceilometer-0" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.343748 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-scripts\") pod \"ceilometer-0\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " pod="openstack/ceilometer-0" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.343812 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " pod="openstack/ceilometer-0" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.343849 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-config-data\") pod \"ceilometer-0\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " pod="openstack/ceilometer-0" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.343908 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-log-httpd\") pod \"ceilometer-0\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " pod="openstack/ceilometer-0" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.343944 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " pod="openstack/ceilometer-0" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.345108 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-log-httpd\") pod \"ceilometer-0\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " pod="openstack/ceilometer-0" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.345770 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-run-httpd\") pod \"ceilometer-0\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " pod="openstack/ceilometer-0" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.351029 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-scripts\") pod \"ceilometer-0\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " pod="openstack/ceilometer-0" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.351847 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-config-data\") pod \"ceilometer-0\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " pod="openstack/ceilometer-0" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.354810 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " pod="openstack/ceilometer-0" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.355322 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " pod="openstack/ceilometer-0" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.369992 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n886n\" (UniqueName: \"kubernetes.io/projected/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-kube-api-access-n886n\") pod \"ceilometer-0\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " pod="openstack/ceilometer-0" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.383136 5050 scope.go:117] "RemoveContainer" containerID="6d8ca1a4fe2226502af880dfc1ca8adeeae1a4d077e080e11fbc07014425f0a5" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.464836 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.842292 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 15:02:08 crc kubenswrapper[5050]: W1123 15:02:08.880946 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6c5e9b39_91fa_4f4e_9d95_0599bc22472d.slice/crio-ead3684cbd6eabb3234f8cced02e8a5b37dce04bd21bcbc733c85ff1de0b6f1c WatchSource:0}: Error finding container ead3684cbd6eabb3234f8cced02e8a5b37dce04bd21bcbc733c85ff1de0b6f1c: Status 404 returned error can't find the container with id ead3684cbd6eabb3234f8cced02e8a5b37dce04bd21bcbc733c85ff1de0b6f1c Nov 23 15:02:08 crc kubenswrapper[5050]: I1123 15:02:08.971077 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:02:08 crc kubenswrapper[5050]: W1123 15:02:08.985551 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod502f3f9b_d57f_4f0b_bca8_c74f139cb28e.slice/crio-2842553d3264cb20a1183862ab46d75413f078b9db0d773f351658ea0eebb971 WatchSource:0}: Error finding container 2842553d3264cb20a1183862ab46d75413f078b9db0d773f351658ea0eebb971: Status 404 returned error can't find the container with id 2842553d3264cb20a1183862ab46d75413f078b9db0d773f351658ea0eebb971 Nov 23 15:02:09 crc kubenswrapper[5050]: I1123 15:02:09.087759 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6c5e9b39-91fa-4f4e-9d95-0599bc22472d","Type":"ContainerStarted","Data":"ead3684cbd6eabb3234f8cced02e8a5b37dce04bd21bcbc733c85ff1de0b6f1c"} Nov 23 15:02:09 crc kubenswrapper[5050]: I1123 15:02:09.098738 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-dpm95" event={"ID":"8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c","Type":"ContainerStarted","Data":"5f6b86f81e584e0181c2a2f21d3c83b22e43fc8bc0a8211a4b5dcfeb058a4ea0"} Nov 23 15:02:09 crc kubenswrapper[5050]: I1123 15:02:09.122591 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-dpm95" podStartSLOduration=1.777989042 podStartE2EDuration="12.122564367s" podCreationTimestamp="2025-11-23 15:01:57 +0000 UTC" firstStartedPulling="2025-11-23 15:01:57.940235639 +0000 UTC m=+1213.107232124" lastFinishedPulling="2025-11-23 15:02:08.284810964 +0000 UTC m=+1223.451807449" observedRunningTime="2025-11-23 15:02:09.119996425 +0000 UTC m=+1224.286992930" watchObservedRunningTime="2025-11-23 15:02:09.122564367 +0000 UTC m=+1224.289560852" Nov 23 15:02:09 crc kubenswrapper[5050]: I1123 15:02:09.132709 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"502f3f9b-d57f-4f0b-bca8-c74f139cb28e","Type":"ContainerStarted","Data":"2842553d3264cb20a1183862ab46d75413f078b9db0d773f351658ea0eebb971"} Nov 23 15:02:09 crc kubenswrapper[5050]: I1123 15:02:09.712823 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf22bd66-89a1-426e-b976-5048e892fd0a" path="/var/lib/kubelet/pods/bf22bd66-89a1-426e-b976-5048e892fd0a/volumes" Nov 23 15:02:10 crc kubenswrapper[5050]: I1123 15:02:10.150165 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6c5e9b39-91fa-4f4e-9d95-0599bc22472d","Type":"ContainerStarted","Data":"29d8b6a8f6bcb0d52d1fb31bd328b23184faf043db74c7e0325581a2ae92bc41"} Nov 23 15:02:10 crc kubenswrapper[5050]: I1123 15:02:10.152226 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322","Type":"ContainerStarted","Data":"63cf2a00b105f3566525e540e376dbc5f791f9441863630deaff0d4a49ab2c58"} Nov 23 15:02:10 crc kubenswrapper[5050]: I1123 15:02:10.155902 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"502f3f9b-d57f-4f0b-bca8-c74f139cb28e","Type":"ContainerStarted","Data":"e3edf6026dbde8c1a65c0a3b72bbcf4aca60b485fbef6d305cbf2300aeae879f"} Nov 23 15:02:10 crc kubenswrapper[5050]: I1123 15:02:10.193682 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=10.193663456 podStartE2EDuration="10.193663456s" podCreationTimestamp="2025-11-23 15:02:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:02:10.175724756 +0000 UTC m=+1225.342721241" watchObservedRunningTime="2025-11-23 15:02:10.193663456 +0000 UTC m=+1225.360659941" Nov 23 15:02:11 crc kubenswrapper[5050]: I1123 15:02:11.168851 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6c5e9b39-91fa-4f4e-9d95-0599bc22472d","Type":"ContainerStarted","Data":"afbf2c9723a3c51e2ef343739812462a5a83e6a2cd33bd51d95de96ab628994d"} Nov 23 15:02:11 crc kubenswrapper[5050]: I1123 15:02:11.171293 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"502f3f9b-d57f-4f0b-bca8-c74f139cb28e","Type":"ContainerStarted","Data":"15dcfb17c69865a099d9ce6907920ef90f8b98c5fe76f727295785a19af0728e"} Nov 23 15:02:11 crc kubenswrapper[5050]: I1123 15:02:11.200546 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=8.200518646 podStartE2EDuration="8.200518646s" podCreationTimestamp="2025-11-23 15:02:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:02:11.190924119 +0000 UTC m=+1226.357920604" watchObservedRunningTime="2025-11-23 15:02:11.200518646 +0000 UTC m=+1226.367515131" Nov 23 15:02:11 crc kubenswrapper[5050]: I1123 15:02:11.373153 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 23 15:02:11 crc kubenswrapper[5050]: I1123 15:02:11.373209 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 23 15:02:11 crc kubenswrapper[5050]: I1123 15:02:11.412852 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 23 15:02:11 crc kubenswrapper[5050]: I1123 15:02:11.424296 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 23 15:02:12 crc kubenswrapper[5050]: I1123 15:02:12.189089 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"502f3f9b-d57f-4f0b-bca8-c74f139cb28e","Type":"ContainerStarted","Data":"54bca3f2ac608d9e6f781c539a46416d75c9afb9a9f7929f5f053c30b3f9c130"} Nov 23 15:02:12 crc kubenswrapper[5050]: I1123 15:02:12.189553 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 23 15:02:12 crc kubenswrapper[5050]: I1123 15:02:12.189576 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 23 15:02:13 crc kubenswrapper[5050]: I1123 15:02:13.696247 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 23 15:02:13 crc kubenswrapper[5050]: I1123 15:02:13.697839 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 23 15:02:13 crc kubenswrapper[5050]: I1123 15:02:13.758906 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 23 15:02:13 crc kubenswrapper[5050]: I1123 15:02:13.764782 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 23 15:02:14 crc kubenswrapper[5050]: I1123 15:02:14.223892 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"502f3f9b-d57f-4f0b-bca8-c74f139cb28e","Type":"ContainerStarted","Data":"ac42ebbf67a8dcb7ddb5df6c37469b0efbf7f6b0a30370b30b97ed374ffdf9c6"} Nov 23 15:02:14 crc kubenswrapper[5050]: I1123 15:02:14.224679 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 23 15:02:14 crc kubenswrapper[5050]: I1123 15:02:14.224798 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 23 15:02:14 crc kubenswrapper[5050]: I1123 15:02:14.226057 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 23 15:02:14 crc kubenswrapper[5050]: I1123 15:02:14.392734 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 23 15:02:14 crc kubenswrapper[5050]: I1123 15:02:14.419647 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.25455259 podStartE2EDuration="6.419626453s" podCreationTimestamp="2025-11-23 15:02:08 +0000 UTC" firstStartedPulling="2025-11-23 15:02:08.991252291 +0000 UTC m=+1224.158248776" lastFinishedPulling="2025-11-23 15:02:13.156326154 +0000 UTC m=+1228.323322639" observedRunningTime="2025-11-23 15:02:14.26003386 +0000 UTC m=+1229.427030345" watchObservedRunningTime="2025-11-23 15:02:14.419626453 +0000 UTC m=+1229.586622938" Nov 23 15:02:16 crc kubenswrapper[5050]: I1123 15:02:16.371931 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 23 15:02:16 crc kubenswrapper[5050]: I1123 15:02:16.409517 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 23 15:02:16 crc kubenswrapper[5050]: I1123 15:02:16.417138 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 23 15:02:24 crc kubenswrapper[5050]: I1123 15:02:24.376163 5050 generic.go:334] "Generic (PLEG): container finished" podID="8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c" containerID="5f6b86f81e584e0181c2a2f21d3c83b22e43fc8bc0a8211a4b5dcfeb058a4ea0" exitCode=0 Nov 23 15:02:24 crc kubenswrapper[5050]: I1123 15:02:24.376256 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-dpm95" event={"ID":"8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c","Type":"ContainerDied","Data":"5f6b86f81e584e0181c2a2f21d3c83b22e43fc8bc0a8211a4b5dcfeb058a4ea0"} Nov 23 15:02:25 crc kubenswrapper[5050]: I1123 15:02:25.852845 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-dpm95" Nov 23 15:02:25 crc kubenswrapper[5050]: I1123 15:02:25.914731 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rlc2s\" (UniqueName: \"kubernetes.io/projected/8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c-kube-api-access-rlc2s\") pod \"8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c\" (UID: \"8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c\") " Nov 23 15:02:25 crc kubenswrapper[5050]: I1123 15:02:25.914799 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c-scripts\") pod \"8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c\" (UID: \"8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c\") " Nov 23 15:02:25 crc kubenswrapper[5050]: I1123 15:02:25.914832 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c-config-data\") pod \"8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c\" (UID: \"8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c\") " Nov 23 15:02:25 crc kubenswrapper[5050]: I1123 15:02:25.914861 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c-combined-ca-bundle\") pod \"8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c\" (UID: \"8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c\") " Nov 23 15:02:25 crc kubenswrapper[5050]: I1123 15:02:25.926686 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c-kube-api-access-rlc2s" (OuterVolumeSpecName: "kube-api-access-rlc2s") pod "8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c" (UID: "8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c"). InnerVolumeSpecName "kube-api-access-rlc2s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:02:25 crc kubenswrapper[5050]: I1123 15:02:25.934671 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c-scripts" (OuterVolumeSpecName: "scripts") pod "8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c" (UID: "8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:25 crc kubenswrapper[5050]: I1123 15:02:25.959164 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c" (UID: "8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:25 crc kubenswrapper[5050]: I1123 15:02:25.971684 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c-config-data" (OuterVolumeSpecName: "config-data") pod "8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c" (UID: "8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:26 crc kubenswrapper[5050]: I1123 15:02:26.018358 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:26 crc kubenswrapper[5050]: I1123 15:02:26.018714 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:26 crc kubenswrapper[5050]: I1123 15:02:26.018823 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:26 crc kubenswrapper[5050]: I1123 15:02:26.018912 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rlc2s\" (UniqueName: \"kubernetes.io/projected/8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c-kube-api-access-rlc2s\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:26 crc kubenswrapper[5050]: I1123 15:02:26.405969 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-dpm95" event={"ID":"8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c","Type":"ContainerDied","Data":"313decd7392548ebdbc72d5f6af7648bcbdce0351cee6f4d5dcc48bf78402d4d"} Nov 23 15:02:26 crc kubenswrapper[5050]: I1123 15:02:26.406435 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="313decd7392548ebdbc72d5f6af7648bcbdce0351cee6f4d5dcc48bf78402d4d" Nov 23 15:02:26 crc kubenswrapper[5050]: I1123 15:02:26.406588 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-dpm95" Nov 23 15:02:26 crc kubenswrapper[5050]: I1123 15:02:26.628349 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 23 15:02:26 crc kubenswrapper[5050]: E1123 15:02:26.629609 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c" containerName="nova-cell0-conductor-db-sync" Nov 23 15:02:26 crc kubenswrapper[5050]: I1123 15:02:26.629775 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c" containerName="nova-cell0-conductor-db-sync" Nov 23 15:02:26 crc kubenswrapper[5050]: I1123 15:02:26.630240 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c" containerName="nova-cell0-conductor-db-sync" Nov 23 15:02:26 crc kubenswrapper[5050]: I1123 15:02:26.631533 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 23 15:02:26 crc kubenswrapper[5050]: I1123 15:02:26.634814 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 23 15:02:26 crc kubenswrapper[5050]: I1123 15:02:26.638933 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-qdpxk" Nov 23 15:02:26 crc kubenswrapper[5050]: I1123 15:02:26.646525 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 23 15:02:26 crc kubenswrapper[5050]: I1123 15:02:26.735375 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d1fe4ad-9245-4af5-b378-c908ce72f08c-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"4d1fe4ad-9245-4af5-b378-c908ce72f08c\") " pod="openstack/nova-cell0-conductor-0" Nov 23 15:02:26 crc kubenswrapper[5050]: I1123 15:02:26.735498 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjxsg\" (UniqueName: \"kubernetes.io/projected/4d1fe4ad-9245-4af5-b378-c908ce72f08c-kube-api-access-gjxsg\") pod \"nova-cell0-conductor-0\" (UID: \"4d1fe4ad-9245-4af5-b378-c908ce72f08c\") " pod="openstack/nova-cell0-conductor-0" Nov 23 15:02:26 crc kubenswrapper[5050]: I1123 15:02:26.735625 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d1fe4ad-9245-4af5-b378-c908ce72f08c-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"4d1fe4ad-9245-4af5-b378-c908ce72f08c\") " pod="openstack/nova-cell0-conductor-0" Nov 23 15:02:26 crc kubenswrapper[5050]: I1123 15:02:26.837801 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjxsg\" (UniqueName: \"kubernetes.io/projected/4d1fe4ad-9245-4af5-b378-c908ce72f08c-kube-api-access-gjxsg\") pod \"nova-cell0-conductor-0\" (UID: \"4d1fe4ad-9245-4af5-b378-c908ce72f08c\") " pod="openstack/nova-cell0-conductor-0" Nov 23 15:02:26 crc kubenswrapper[5050]: I1123 15:02:26.838034 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d1fe4ad-9245-4af5-b378-c908ce72f08c-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"4d1fe4ad-9245-4af5-b378-c908ce72f08c\") " pod="openstack/nova-cell0-conductor-0" Nov 23 15:02:26 crc kubenswrapper[5050]: I1123 15:02:26.838282 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d1fe4ad-9245-4af5-b378-c908ce72f08c-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"4d1fe4ad-9245-4af5-b378-c908ce72f08c\") " pod="openstack/nova-cell0-conductor-0" Nov 23 15:02:26 crc kubenswrapper[5050]: I1123 15:02:26.844304 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d1fe4ad-9245-4af5-b378-c908ce72f08c-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"4d1fe4ad-9245-4af5-b378-c908ce72f08c\") " pod="openstack/nova-cell0-conductor-0" Nov 23 15:02:26 crc kubenswrapper[5050]: I1123 15:02:26.845597 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d1fe4ad-9245-4af5-b378-c908ce72f08c-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"4d1fe4ad-9245-4af5-b378-c908ce72f08c\") " pod="openstack/nova-cell0-conductor-0" Nov 23 15:02:26 crc kubenswrapper[5050]: I1123 15:02:26.861881 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjxsg\" (UniqueName: \"kubernetes.io/projected/4d1fe4ad-9245-4af5-b378-c908ce72f08c-kube-api-access-gjxsg\") pod \"nova-cell0-conductor-0\" (UID: \"4d1fe4ad-9245-4af5-b378-c908ce72f08c\") " pod="openstack/nova-cell0-conductor-0" Nov 23 15:02:26 crc kubenswrapper[5050]: I1123 15:02:26.981307 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 23 15:02:27 crc kubenswrapper[5050]: I1123 15:02:27.459073 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 23 15:02:28 crc kubenswrapper[5050]: I1123 15:02:28.434901 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"4d1fe4ad-9245-4af5-b378-c908ce72f08c","Type":"ContainerStarted","Data":"42a4a3972d0dfdbfa5a41878495e90a1790378398c3c410fbb952adc25a032b1"} Nov 23 15:02:28 crc kubenswrapper[5050]: I1123 15:02:28.435856 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"4d1fe4ad-9245-4af5-b378-c908ce72f08c","Type":"ContainerStarted","Data":"62b396d3a144a1d490a54faf3ddd19396d3c2fc2bd4890fdb04f6dad65320ebe"} Nov 23 15:02:28 crc kubenswrapper[5050]: I1123 15:02:28.435883 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 23 15:02:28 crc kubenswrapper[5050]: I1123 15:02:28.467547 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.467518629 podStartE2EDuration="2.467518629s" podCreationTimestamp="2025-11-23 15:02:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:02:28.454316494 +0000 UTC m=+1243.621312979" watchObservedRunningTime="2025-11-23 15:02:28.467518629 +0000 UTC m=+1243.634515134" Nov 23 15:02:29 crc kubenswrapper[5050]: I1123 15:02:29.224199 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:02:29 crc kubenswrapper[5050]: I1123 15:02:29.224274 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.036420 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.632323 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-j52wg"] Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.636789 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-j52wg" Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.641968 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.642220 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.652387 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-j52wg"] Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.718711 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/948389bb-db2c-40c2-a458-b54cb0fa94ee-config-data\") pod \"nova-cell0-cell-mapping-j52wg\" (UID: \"948389bb-db2c-40c2-a458-b54cb0fa94ee\") " pod="openstack/nova-cell0-cell-mapping-j52wg" Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.719058 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhmbc\" (UniqueName: \"kubernetes.io/projected/948389bb-db2c-40c2-a458-b54cb0fa94ee-kube-api-access-dhmbc\") pod \"nova-cell0-cell-mapping-j52wg\" (UID: \"948389bb-db2c-40c2-a458-b54cb0fa94ee\") " pod="openstack/nova-cell0-cell-mapping-j52wg" Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.719293 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/948389bb-db2c-40c2-a458-b54cb0fa94ee-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-j52wg\" (UID: \"948389bb-db2c-40c2-a458-b54cb0fa94ee\") " pod="openstack/nova-cell0-cell-mapping-j52wg" Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.719383 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/948389bb-db2c-40c2-a458-b54cb0fa94ee-scripts\") pod \"nova-cell0-cell-mapping-j52wg\" (UID: \"948389bb-db2c-40c2-a458-b54cb0fa94ee\") " pod="openstack/nova-cell0-cell-mapping-j52wg" Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.821499 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhmbc\" (UniqueName: \"kubernetes.io/projected/948389bb-db2c-40c2-a458-b54cb0fa94ee-kube-api-access-dhmbc\") pod \"nova-cell0-cell-mapping-j52wg\" (UID: \"948389bb-db2c-40c2-a458-b54cb0fa94ee\") " pod="openstack/nova-cell0-cell-mapping-j52wg" Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.821617 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/948389bb-db2c-40c2-a458-b54cb0fa94ee-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-j52wg\" (UID: \"948389bb-db2c-40c2-a458-b54cb0fa94ee\") " pod="openstack/nova-cell0-cell-mapping-j52wg" Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.821657 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/948389bb-db2c-40c2-a458-b54cb0fa94ee-scripts\") pod \"nova-cell0-cell-mapping-j52wg\" (UID: \"948389bb-db2c-40c2-a458-b54cb0fa94ee\") " pod="openstack/nova-cell0-cell-mapping-j52wg" Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.821682 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/948389bb-db2c-40c2-a458-b54cb0fa94ee-config-data\") pod \"nova-cell0-cell-mapping-j52wg\" (UID: \"948389bb-db2c-40c2-a458-b54cb0fa94ee\") " pod="openstack/nova-cell0-cell-mapping-j52wg" Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.836022 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/948389bb-db2c-40c2-a458-b54cb0fa94ee-scripts\") pod \"nova-cell0-cell-mapping-j52wg\" (UID: \"948389bb-db2c-40c2-a458-b54cb0fa94ee\") " pod="openstack/nova-cell0-cell-mapping-j52wg" Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.836966 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/948389bb-db2c-40c2-a458-b54cb0fa94ee-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-j52wg\" (UID: \"948389bb-db2c-40c2-a458-b54cb0fa94ee\") " pod="openstack/nova-cell0-cell-mapping-j52wg" Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.839146 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/948389bb-db2c-40c2-a458-b54cb0fa94ee-config-data\") pod \"nova-cell0-cell-mapping-j52wg\" (UID: \"948389bb-db2c-40c2-a458-b54cb0fa94ee\") " pod="openstack/nova-cell0-cell-mapping-j52wg" Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.865817 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.867994 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.872855 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.873251 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhmbc\" (UniqueName: \"kubernetes.io/projected/948389bb-db2c-40c2-a458-b54cb0fa94ee-kube-api-access-dhmbc\") pod \"nova-cell0-cell-mapping-j52wg\" (UID: \"948389bb-db2c-40c2-a458-b54cb0fa94ee\") " pod="openstack/nova-cell0-cell-mapping-j52wg" Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.876782 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.930554 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/275dac05-e3bc-4f6d-ab7b-f95acbb50692-logs\") pod \"nova-api-0\" (UID: \"275dac05-e3bc-4f6d-ab7b-f95acbb50692\") " pod="openstack/nova-api-0" Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.930637 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/275dac05-e3bc-4f6d-ab7b-f95acbb50692-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"275dac05-e3bc-4f6d-ab7b-f95acbb50692\") " pod="openstack/nova-api-0" Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.930666 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/275dac05-e3bc-4f6d-ab7b-f95acbb50692-config-data\") pod \"nova-api-0\" (UID: \"275dac05-e3bc-4f6d-ab7b-f95acbb50692\") " pod="openstack/nova-api-0" Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.930694 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2g9f\" (UniqueName: \"kubernetes.io/projected/275dac05-e3bc-4f6d-ab7b-f95acbb50692-kube-api-access-n2g9f\") pod \"nova-api-0\" (UID: \"275dac05-e3bc-4f6d-ab7b-f95acbb50692\") " pod="openstack/nova-api-0" Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.956911 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.958640 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.968601 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.973769 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.991126 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 23 15:02:37 crc kubenswrapper[5050]: I1123 15:02:37.997851 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.008735 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.017293 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-j52wg" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.033234 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3584331-f147-4f05-9652-397c75f881db-config-data\") pod \"nova-scheduler-0\" (UID: \"a3584331-f147-4f05-9652-397c75f881db\") " pod="openstack/nova-scheduler-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.033304 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/275dac05-e3bc-4f6d-ab7b-f95acbb50692-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"275dac05-e3bc-4f6d-ab7b-f95acbb50692\") " pod="openstack/nova-api-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.033340 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/275dac05-e3bc-4f6d-ab7b-f95acbb50692-config-data\") pod \"nova-api-0\" (UID: \"275dac05-e3bc-4f6d-ab7b-f95acbb50692\") " pod="openstack/nova-api-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.033364 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2g9f\" (UniqueName: \"kubernetes.io/projected/275dac05-e3bc-4f6d-ab7b-f95acbb50692-kube-api-access-n2g9f\") pod \"nova-api-0\" (UID: \"275dac05-e3bc-4f6d-ab7b-f95acbb50692\") " pod="openstack/nova-api-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.033434 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2965j\" (UniqueName: \"kubernetes.io/projected/a3584331-f147-4f05-9652-397c75f881db-kube-api-access-2965j\") pod \"nova-scheduler-0\" (UID: \"a3584331-f147-4f05-9652-397c75f881db\") " pod="openstack/nova-scheduler-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.033486 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3584331-f147-4f05-9652-397c75f881db-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a3584331-f147-4f05-9652-397c75f881db\") " pod="openstack/nova-scheduler-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.033531 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/275dac05-e3bc-4f6d-ab7b-f95acbb50692-logs\") pod \"nova-api-0\" (UID: \"275dac05-e3bc-4f6d-ab7b-f95acbb50692\") " pod="openstack/nova-api-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.034012 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/275dac05-e3bc-4f6d-ab7b-f95acbb50692-logs\") pod \"nova-api-0\" (UID: \"275dac05-e3bc-4f6d-ab7b-f95acbb50692\") " pod="openstack/nova-api-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.039586 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/275dac05-e3bc-4f6d-ab7b-f95acbb50692-config-data\") pod \"nova-api-0\" (UID: \"275dac05-e3bc-4f6d-ab7b-f95acbb50692\") " pod="openstack/nova-api-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.039699 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.065327 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/275dac05-e3bc-4f6d-ab7b-f95acbb50692-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"275dac05-e3bc-4f6d-ab7b-f95acbb50692\") " pod="openstack/nova-api-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.083264 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2g9f\" (UniqueName: \"kubernetes.io/projected/275dac05-e3bc-4f6d-ab7b-f95acbb50692-kube-api-access-n2g9f\") pod \"nova-api-0\" (UID: \"275dac05-e3bc-4f6d-ab7b-f95acbb50692\") " pod="openstack/nova-api-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.137027 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3584331-f147-4f05-9652-397c75f881db-config-data\") pod \"nova-scheduler-0\" (UID: \"a3584331-f147-4f05-9652-397c75f881db\") " pod="openstack/nova-scheduler-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.137108 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pcr62\" (UniqueName: \"kubernetes.io/projected/42b8cea2-5a21-47d1-a396-adf85162c112-kube-api-access-pcr62\") pod \"nova-metadata-0\" (UID: \"42b8cea2-5a21-47d1-a396-adf85162c112\") " pod="openstack/nova-metadata-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.137139 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42b8cea2-5a21-47d1-a396-adf85162c112-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"42b8cea2-5a21-47d1-a396-adf85162c112\") " pod="openstack/nova-metadata-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.137179 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42b8cea2-5a21-47d1-a396-adf85162c112-config-data\") pod \"nova-metadata-0\" (UID: \"42b8cea2-5a21-47d1-a396-adf85162c112\") " pod="openstack/nova-metadata-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.137246 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42b8cea2-5a21-47d1-a396-adf85162c112-logs\") pod \"nova-metadata-0\" (UID: \"42b8cea2-5a21-47d1-a396-adf85162c112\") " pod="openstack/nova-metadata-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.137268 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2965j\" (UniqueName: \"kubernetes.io/projected/a3584331-f147-4f05-9652-397c75f881db-kube-api-access-2965j\") pod \"nova-scheduler-0\" (UID: \"a3584331-f147-4f05-9652-397c75f881db\") " pod="openstack/nova-scheduler-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.137309 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3584331-f147-4f05-9652-397c75f881db-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a3584331-f147-4f05-9652-397c75f881db\") " pod="openstack/nova-scheduler-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.140960 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.142532 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.150928 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.151437 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3584331-f147-4f05-9652-397c75f881db-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a3584331-f147-4f05-9652-397c75f881db\") " pod="openstack/nova-scheduler-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.164217 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3584331-f147-4f05-9652-397c75f881db-config-data\") pod \"nova-scheduler-0\" (UID: \"a3584331-f147-4f05-9652-397c75f881db\") " pod="openstack/nova-scheduler-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.174652 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-vvmp7"] Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.181511 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.200641 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-vvmp7"] Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.201091 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2965j\" (UniqueName: \"kubernetes.io/projected/a3584331-f147-4f05-9652-397c75f881db-kube-api-access-2965j\") pod \"nova-scheduler-0\" (UID: \"a3584331-f147-4f05-9652-397c75f881db\") " pod="openstack/nova-scheduler-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.233975 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.239760 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caeaf051-b0fe-4fe8-b5e2-730f3d55ffea-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"caeaf051-b0fe-4fe8-b5e2-730f3d55ffea\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.239826 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-dns-svc\") pod \"dnsmasq-dns-bccf8f775-vvmp7\" (UID: \"e45241bb-3031-428d-aa6b-370eb2da07b0\") " pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.239842 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-vvmp7\" (UID: \"e45241bb-3031-428d-aa6b-370eb2da07b0\") " pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.239890 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-vvmp7\" (UID: \"e45241bb-3031-428d-aa6b-370eb2da07b0\") " pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.239907 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2tjhd\" (UniqueName: \"kubernetes.io/projected/caeaf051-b0fe-4fe8-b5e2-730f3d55ffea-kube-api-access-2tjhd\") pod \"nova-cell1-novncproxy-0\" (UID: \"caeaf051-b0fe-4fe8-b5e2-730f3d55ffea\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.239930 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pcr62\" (UniqueName: \"kubernetes.io/projected/42b8cea2-5a21-47d1-a396-adf85162c112-kube-api-access-pcr62\") pod \"nova-metadata-0\" (UID: \"42b8cea2-5a21-47d1-a396-adf85162c112\") " pod="openstack/nova-metadata-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.239954 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42b8cea2-5a21-47d1-a396-adf85162c112-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"42b8cea2-5a21-47d1-a396-adf85162c112\") " pod="openstack/nova-metadata-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.239980 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4hh96\" (UniqueName: \"kubernetes.io/projected/e45241bb-3031-428d-aa6b-370eb2da07b0-kube-api-access-4hh96\") pod \"dnsmasq-dns-bccf8f775-vvmp7\" (UID: \"e45241bb-3031-428d-aa6b-370eb2da07b0\") " pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.240008 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42b8cea2-5a21-47d1-a396-adf85162c112-config-data\") pod \"nova-metadata-0\" (UID: \"42b8cea2-5a21-47d1-a396-adf85162c112\") " pod="openstack/nova-metadata-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.240033 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-vvmp7\" (UID: \"e45241bb-3031-428d-aa6b-370eb2da07b0\") " pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.240059 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-config\") pod \"dnsmasq-dns-bccf8f775-vvmp7\" (UID: \"e45241bb-3031-428d-aa6b-370eb2da07b0\") " pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.240076 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caeaf051-b0fe-4fe8-b5e2-730f3d55ffea-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"caeaf051-b0fe-4fe8-b5e2-730f3d55ffea\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.240121 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42b8cea2-5a21-47d1-a396-adf85162c112-logs\") pod \"nova-metadata-0\" (UID: \"42b8cea2-5a21-47d1-a396-adf85162c112\") " pod="openstack/nova-metadata-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.241579 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42b8cea2-5a21-47d1-a396-adf85162c112-logs\") pod \"nova-metadata-0\" (UID: \"42b8cea2-5a21-47d1-a396-adf85162c112\") " pod="openstack/nova-metadata-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.250468 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42b8cea2-5a21-47d1-a396-adf85162c112-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"42b8cea2-5a21-47d1-a396-adf85162c112\") " pod="openstack/nova-metadata-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.266862 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.274683 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42b8cea2-5a21-47d1-a396-adf85162c112-config-data\") pod \"nova-metadata-0\" (UID: \"42b8cea2-5a21-47d1-a396-adf85162c112\") " pod="openstack/nova-metadata-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.274778 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pcr62\" (UniqueName: \"kubernetes.io/projected/42b8cea2-5a21-47d1-a396-adf85162c112-kube-api-access-pcr62\") pod \"nova-metadata-0\" (UID: \"42b8cea2-5a21-47d1-a396-adf85162c112\") " pod="openstack/nova-metadata-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.308032 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.341758 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-vvmp7\" (UID: \"e45241bb-3031-428d-aa6b-370eb2da07b0\") " pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.341813 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2tjhd\" (UniqueName: \"kubernetes.io/projected/caeaf051-b0fe-4fe8-b5e2-730f3d55ffea-kube-api-access-2tjhd\") pod \"nova-cell1-novncproxy-0\" (UID: \"caeaf051-b0fe-4fe8-b5e2-730f3d55ffea\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.341851 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4hh96\" (UniqueName: \"kubernetes.io/projected/e45241bb-3031-428d-aa6b-370eb2da07b0-kube-api-access-4hh96\") pod \"dnsmasq-dns-bccf8f775-vvmp7\" (UID: \"e45241bb-3031-428d-aa6b-370eb2da07b0\") " pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.341886 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-vvmp7\" (UID: \"e45241bb-3031-428d-aa6b-370eb2da07b0\") " pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.341913 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-config\") pod \"dnsmasq-dns-bccf8f775-vvmp7\" (UID: \"e45241bb-3031-428d-aa6b-370eb2da07b0\") " pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.341934 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caeaf051-b0fe-4fe8-b5e2-730f3d55ffea-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"caeaf051-b0fe-4fe8-b5e2-730f3d55ffea\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.342016 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caeaf051-b0fe-4fe8-b5e2-730f3d55ffea-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"caeaf051-b0fe-4fe8-b5e2-730f3d55ffea\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.342063 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-dns-svc\") pod \"dnsmasq-dns-bccf8f775-vvmp7\" (UID: \"e45241bb-3031-428d-aa6b-370eb2da07b0\") " pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.342081 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-vvmp7\" (UID: \"e45241bb-3031-428d-aa6b-370eb2da07b0\") " pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.343209 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-vvmp7\" (UID: \"e45241bb-3031-428d-aa6b-370eb2da07b0\") " pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.344088 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-vvmp7\" (UID: \"e45241bb-3031-428d-aa6b-370eb2da07b0\") " pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.345381 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-config\") pod \"dnsmasq-dns-bccf8f775-vvmp7\" (UID: \"e45241bb-3031-428d-aa6b-370eb2da07b0\") " pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.345929 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-vvmp7\" (UID: \"e45241bb-3031-428d-aa6b-370eb2da07b0\") " pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.346038 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-dns-svc\") pod \"dnsmasq-dns-bccf8f775-vvmp7\" (UID: \"e45241bb-3031-428d-aa6b-370eb2da07b0\") " pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.353930 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caeaf051-b0fe-4fe8-b5e2-730f3d55ffea-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"caeaf051-b0fe-4fe8-b5e2-730f3d55ffea\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.356134 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caeaf051-b0fe-4fe8-b5e2-730f3d55ffea-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"caeaf051-b0fe-4fe8-b5e2-730f3d55ffea\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.368591 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4hh96\" (UniqueName: \"kubernetes.io/projected/e45241bb-3031-428d-aa6b-370eb2da07b0-kube-api-access-4hh96\") pod \"dnsmasq-dns-bccf8f775-vvmp7\" (UID: \"e45241bb-3031-428d-aa6b-370eb2da07b0\") " pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.371117 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2tjhd\" (UniqueName: \"kubernetes.io/projected/caeaf051-b0fe-4fe8-b5e2-730f3d55ffea-kube-api-access-2tjhd\") pod \"nova-cell1-novncproxy-0\" (UID: \"caeaf051-b0fe-4fe8-b5e2-730f3d55ffea\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.518045 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.518586 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.555796 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.561707 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.679331 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-j52wg"] Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.889129 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.993644 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-lwv2w"] Nov 23 15:02:38 crc kubenswrapper[5050]: I1123 15:02:38.995194 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-lwv2w" Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.002260 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.002502 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.009512 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-lwv2w"] Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.028156 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 15:02:39 crc kubenswrapper[5050]: W1123 15:02:39.049808 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda3584331_f147_4f05_9652_397c75f881db.slice/crio-bca61fd1fc407cff9e25428cf7595b1190554cdcd8dc40953f61c0223893c2fc WatchSource:0}: Error finding container bca61fd1fc407cff9e25428cf7595b1190554cdcd8dc40953f61c0223893c2fc: Status 404 returned error can't find the container with id bca61fd1fc407cff9e25428cf7595b1190554cdcd8dc40953f61c0223893c2fc Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.172903 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dkvs2\" (UniqueName: \"kubernetes.io/projected/8d6abc54-c88f-403a-b9ca-8645646c6356-kube-api-access-dkvs2\") pod \"nova-cell1-conductor-db-sync-lwv2w\" (UID: \"8d6abc54-c88f-403a-b9ca-8645646c6356\") " pod="openstack/nova-cell1-conductor-db-sync-lwv2w" Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.173433 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d6abc54-c88f-403a-b9ca-8645646c6356-config-data\") pod \"nova-cell1-conductor-db-sync-lwv2w\" (UID: \"8d6abc54-c88f-403a-b9ca-8645646c6356\") " pod="openstack/nova-cell1-conductor-db-sync-lwv2w" Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.173596 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d6abc54-c88f-403a-b9ca-8645646c6356-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-lwv2w\" (UID: \"8d6abc54-c88f-403a-b9ca-8645646c6356\") " pod="openstack/nova-cell1-conductor-db-sync-lwv2w" Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.173630 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d6abc54-c88f-403a-b9ca-8645646c6356-scripts\") pod \"nova-cell1-conductor-db-sync-lwv2w\" (UID: \"8d6abc54-c88f-403a-b9ca-8645646c6356\") " pod="openstack/nova-cell1-conductor-db-sync-lwv2w" Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.178236 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 15:02:39 crc kubenswrapper[5050]: W1123 15:02:39.185680 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod42b8cea2_5a21_47d1_a396_adf85162c112.slice/crio-b8fa462bd3a94d9c57f70f7d28f33fedf6ab89f8c31978ca99a889ee0419da59 WatchSource:0}: Error finding container b8fa462bd3a94d9c57f70f7d28f33fedf6ab89f8c31978ca99a889ee0419da59: Status 404 returned error can't find the container with id b8fa462bd3a94d9c57f70f7d28f33fedf6ab89f8c31978ca99a889ee0419da59 Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.276081 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d6abc54-c88f-403a-b9ca-8645646c6356-scripts\") pod \"nova-cell1-conductor-db-sync-lwv2w\" (UID: \"8d6abc54-c88f-403a-b9ca-8645646c6356\") " pod="openstack/nova-cell1-conductor-db-sync-lwv2w" Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.276156 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dkvs2\" (UniqueName: \"kubernetes.io/projected/8d6abc54-c88f-403a-b9ca-8645646c6356-kube-api-access-dkvs2\") pod \"nova-cell1-conductor-db-sync-lwv2w\" (UID: \"8d6abc54-c88f-403a-b9ca-8645646c6356\") " pod="openstack/nova-cell1-conductor-db-sync-lwv2w" Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.276275 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d6abc54-c88f-403a-b9ca-8645646c6356-config-data\") pod \"nova-cell1-conductor-db-sync-lwv2w\" (UID: \"8d6abc54-c88f-403a-b9ca-8645646c6356\") " pod="openstack/nova-cell1-conductor-db-sync-lwv2w" Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.276319 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d6abc54-c88f-403a-b9ca-8645646c6356-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-lwv2w\" (UID: \"8d6abc54-c88f-403a-b9ca-8645646c6356\") " pod="openstack/nova-cell1-conductor-db-sync-lwv2w" Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.284985 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d6abc54-c88f-403a-b9ca-8645646c6356-scripts\") pod \"nova-cell1-conductor-db-sync-lwv2w\" (UID: \"8d6abc54-c88f-403a-b9ca-8645646c6356\") " pod="openstack/nova-cell1-conductor-db-sync-lwv2w" Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.285584 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d6abc54-c88f-403a-b9ca-8645646c6356-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-lwv2w\" (UID: \"8d6abc54-c88f-403a-b9ca-8645646c6356\") " pod="openstack/nova-cell1-conductor-db-sync-lwv2w" Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.287642 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d6abc54-c88f-403a-b9ca-8645646c6356-config-data\") pod \"nova-cell1-conductor-db-sync-lwv2w\" (UID: \"8d6abc54-c88f-403a-b9ca-8645646c6356\") " pod="openstack/nova-cell1-conductor-db-sync-lwv2w" Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.298051 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dkvs2\" (UniqueName: \"kubernetes.io/projected/8d6abc54-c88f-403a-b9ca-8645646c6356-kube-api-access-dkvs2\") pod \"nova-cell1-conductor-db-sync-lwv2w\" (UID: \"8d6abc54-c88f-403a-b9ca-8645646c6356\") " pod="openstack/nova-cell1-conductor-db-sync-lwv2w" Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.327163 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 23 15:02:39 crc kubenswrapper[5050]: W1123 15:02:39.333942 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcaeaf051_b0fe_4fe8_b5e2_730f3d55ffea.slice/crio-a52f15da27d000deb369c493ae0415039302bc75233beeb595823a6f5d4986f7 WatchSource:0}: Error finding container a52f15da27d000deb369c493ae0415039302bc75233beeb595823a6f5d4986f7: Status 404 returned error can't find the container with id a52f15da27d000deb369c493ae0415039302bc75233beeb595823a6f5d4986f7 Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.334358 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-lwv2w" Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.343513 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-vvmp7"] Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.602924 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-j52wg" event={"ID":"948389bb-db2c-40c2-a458-b54cb0fa94ee","Type":"ContainerStarted","Data":"dcf12c08d4bf1811af924b4b17ab832619b6e87539c56aca5f56d1ebdc5f3fa1"} Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.603409 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-j52wg" event={"ID":"948389bb-db2c-40c2-a458-b54cb0fa94ee","Type":"ContainerStarted","Data":"837ae0a119ac7f11e15f136249846bd454790503c96143267bf8829657cf0658"} Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.603427 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"42b8cea2-5a21-47d1-a396-adf85162c112","Type":"ContainerStarted","Data":"b8fa462bd3a94d9c57f70f7d28f33fedf6ab89f8c31978ca99a889ee0419da59"} Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.603457 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" event={"ID":"e45241bb-3031-428d-aa6b-370eb2da07b0","Type":"ContainerStarted","Data":"04c5842ec69e7f0d00218bb7f1a31b2cd5f89f5941377d52d8aac40411cd959b"} Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.604396 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a3584331-f147-4f05-9652-397c75f881db","Type":"ContainerStarted","Data":"bca61fd1fc407cff9e25428cf7595b1190554cdcd8dc40953f61c0223893c2fc"} Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.607426 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"275dac05-e3bc-4f6d-ab7b-f95acbb50692","Type":"ContainerStarted","Data":"85831ce8008f5c8fbd7e39fe5ca39a0c9902a0df794d35068c61750e2b8c35de"} Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.617350 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"caeaf051-b0fe-4fe8-b5e2-730f3d55ffea","Type":"ContainerStarted","Data":"a52f15da27d000deb369c493ae0415039302bc75233beeb595823a6f5d4986f7"} Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.623677 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-j52wg" podStartSLOduration=2.623650124 podStartE2EDuration="2.623650124s" podCreationTimestamp="2025-11-23 15:02:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:02:39.617785857 +0000 UTC m=+1254.784782342" watchObservedRunningTime="2025-11-23 15:02:39.623650124 +0000 UTC m=+1254.790646609" Nov 23 15:02:39 crc kubenswrapper[5050]: I1123 15:02:39.901607 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-lwv2w"] Nov 23 15:02:40 crc kubenswrapper[5050]: I1123 15:02:40.637289 5050 generic.go:334] "Generic (PLEG): container finished" podID="e45241bb-3031-428d-aa6b-370eb2da07b0" containerID="3574580b7c087b3a9edf9ced911fd3c8c37ab2156ac82e545e63949cf9900fe4" exitCode=0 Nov 23 15:02:40 crc kubenswrapper[5050]: I1123 15:02:40.637863 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" event={"ID":"e45241bb-3031-428d-aa6b-370eb2da07b0","Type":"ContainerDied","Data":"3574580b7c087b3a9edf9ced911fd3c8c37ab2156ac82e545e63949cf9900fe4"} Nov 23 15:02:40 crc kubenswrapper[5050]: I1123 15:02:40.643745 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-lwv2w" event={"ID":"8d6abc54-c88f-403a-b9ca-8645646c6356","Type":"ContainerStarted","Data":"4addc92803e0a7fa069a404a032c72c2edc38d23a3ca83cea0920a981320b2c9"} Nov 23 15:02:40 crc kubenswrapper[5050]: I1123 15:02:40.644072 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-lwv2w" event={"ID":"8d6abc54-c88f-403a-b9ca-8645646c6356","Type":"ContainerStarted","Data":"de446882d02113cccd961ec8838df1b1ece6d9f0245a8db902801f0cd4901012"} Nov 23 15:02:40 crc kubenswrapper[5050]: I1123 15:02:40.701497 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-lwv2w" podStartSLOduration=2.701474565 podStartE2EDuration="2.701474565s" podCreationTimestamp="2025-11-23 15:02:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:02:40.683743041 +0000 UTC m=+1255.850739526" watchObservedRunningTime="2025-11-23 15:02:40.701474565 +0000 UTC m=+1255.868471050" Nov 23 15:02:42 crc kubenswrapper[5050]: I1123 15:02:42.609594 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 15:02:42 crc kubenswrapper[5050]: I1123 15:02:42.633759 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 23 15:02:43 crc kubenswrapper[5050]: I1123 15:02:43.686257 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"caeaf051-b0fe-4fe8-b5e2-730f3d55ffea","Type":"ContainerStarted","Data":"9f803b54b404ac268f060fd8bd8081ce74ab291b70f2179751f9593b66f757ed"} Nov 23 15:02:43 crc kubenswrapper[5050]: I1123 15:02:43.686402 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="caeaf051-b0fe-4fe8-b5e2-730f3d55ffea" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://9f803b54b404ac268f060fd8bd8081ce74ab291b70f2179751f9593b66f757ed" gracePeriod=30 Nov 23 15:02:43 crc kubenswrapper[5050]: I1123 15:02:43.697924 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"42b8cea2-5a21-47d1-a396-adf85162c112","Type":"ContainerStarted","Data":"968328401f917e84f63fc1901a5525c9adb36802468f022bf5d3019d9d98bf10"} Nov 23 15:02:43 crc kubenswrapper[5050]: I1123 15:02:43.704280 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" event={"ID":"e45241bb-3031-428d-aa6b-370eb2da07b0","Type":"ContainerStarted","Data":"2f9d72ef841a0d2c3f7445d31d2346eca403597faa2f4d79199c4bee1a01cdb9"} Nov 23 15:02:43 crc kubenswrapper[5050]: I1123 15:02:43.705386 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" Nov 23 15:02:43 crc kubenswrapper[5050]: I1123 15:02:43.714978 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a3584331-f147-4f05-9652-397c75f881db","Type":"ContainerStarted","Data":"4697a6223c18966f8b2511b5be8e28ee2b37cdeaaeff04a38673897b95bcd990"} Nov 23 15:02:43 crc kubenswrapper[5050]: I1123 15:02:43.731997 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"275dac05-e3bc-4f6d-ab7b-f95acbb50692","Type":"ContainerStarted","Data":"d2f15cce3da3c5052a0fcc24b40f9ff904f283f118a7418f605a1e21276839e4"} Nov 23 15:02:43 crc kubenswrapper[5050]: I1123 15:02:43.743288 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.019908494 podStartE2EDuration="5.743265182s" podCreationTimestamp="2025-11-23 15:02:38 +0000 UTC" firstStartedPulling="2025-11-23 15:02:39.337224503 +0000 UTC m=+1254.504220988" lastFinishedPulling="2025-11-23 15:02:43.060581191 +0000 UTC m=+1258.227577676" observedRunningTime="2025-11-23 15:02:43.725116286 +0000 UTC m=+1258.892112771" watchObservedRunningTime="2025-11-23 15:02:43.743265182 +0000 UTC m=+1258.910261667" Nov 23 15:02:43 crc kubenswrapper[5050]: I1123 15:02:43.750923 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.7544652899999997 podStartE2EDuration="6.750896468s" podCreationTimestamp="2025-11-23 15:02:37 +0000 UTC" firstStartedPulling="2025-11-23 15:02:39.062691401 +0000 UTC m=+1254.229687886" lastFinishedPulling="2025-11-23 15:02:43.059122579 +0000 UTC m=+1258.226119064" observedRunningTime="2025-11-23 15:02:43.741050679 +0000 UTC m=+1258.908047154" watchObservedRunningTime="2025-11-23 15:02:43.750896468 +0000 UTC m=+1258.917892953" Nov 23 15:02:43 crc kubenswrapper[5050]: I1123 15:02:43.767359 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" podStartSLOduration=5.767334486 podStartE2EDuration="5.767334486s" podCreationTimestamp="2025-11-23 15:02:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:02:43.759135713 +0000 UTC m=+1258.926132198" watchObservedRunningTime="2025-11-23 15:02:43.767334486 +0000 UTC m=+1258.934330971" Nov 23 15:02:43 crc kubenswrapper[5050]: I1123 15:02:43.790407 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.643470496 podStartE2EDuration="6.790383231s" podCreationTimestamp="2025-11-23 15:02:37 +0000 UTC" firstStartedPulling="2025-11-23 15:02:38.915355834 +0000 UTC m=+1254.082352339" lastFinishedPulling="2025-11-23 15:02:43.062268589 +0000 UTC m=+1258.229265074" observedRunningTime="2025-11-23 15:02:43.7790872 +0000 UTC m=+1258.946083695" watchObservedRunningTime="2025-11-23 15:02:43.790383231 +0000 UTC m=+1258.957379716" Nov 23 15:02:44 crc kubenswrapper[5050]: I1123 15:02:44.744114 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"42b8cea2-5a21-47d1-a396-adf85162c112","Type":"ContainerStarted","Data":"326fe87bfaeb7a2e07ab165fc0a2799be4b74ac5195e58626f07c6170d844c08"} Nov 23 15:02:44 crc kubenswrapper[5050]: I1123 15:02:44.744363 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="42b8cea2-5a21-47d1-a396-adf85162c112" containerName="nova-metadata-log" containerID="cri-o://968328401f917e84f63fc1901a5525c9adb36802468f022bf5d3019d9d98bf10" gracePeriod=30 Nov 23 15:02:44 crc kubenswrapper[5050]: I1123 15:02:44.745242 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="42b8cea2-5a21-47d1-a396-adf85162c112" containerName="nova-metadata-metadata" containerID="cri-o://326fe87bfaeb7a2e07ab165fc0a2799be4b74ac5195e58626f07c6170d844c08" gracePeriod=30 Nov 23 15:02:44 crc kubenswrapper[5050]: I1123 15:02:44.755810 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"275dac05-e3bc-4f6d-ab7b-f95acbb50692","Type":"ContainerStarted","Data":"97453c1c0289846eb214645e2ffc35025120692c5b4ce5d3b7e6b518b04dba89"} Nov 23 15:02:44 crc kubenswrapper[5050]: I1123 15:02:44.791833 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.927637472 podStartE2EDuration="7.791799131s" podCreationTimestamp="2025-11-23 15:02:37 +0000 UTC" firstStartedPulling="2025-11-23 15:02:39.19496393 +0000 UTC m=+1254.361960415" lastFinishedPulling="2025-11-23 15:02:43.059125579 +0000 UTC m=+1258.226122074" observedRunningTime="2025-11-23 15:02:44.781726065 +0000 UTC m=+1259.948722570" watchObservedRunningTime="2025-11-23 15:02:44.791799131 +0000 UTC m=+1259.958795626" Nov 23 15:02:44 crc kubenswrapper[5050]: I1123 15:02:44.832229 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 23 15:02:44 crc kubenswrapper[5050]: I1123 15:02:44.832841 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="3782bc88-94a6-4434-bd20-a48caad31d31" containerName="kube-state-metrics" containerID="cri-o://a325cd944d3e8806653ffd40ad1ee7a791f66ddbf95d52aae6b757339a46df2c" gracePeriod=30 Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.435912 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.441015 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.537035 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcr62\" (UniqueName: \"kubernetes.io/projected/42b8cea2-5a21-47d1-a396-adf85162c112-kube-api-access-pcr62\") pod \"42b8cea2-5a21-47d1-a396-adf85162c112\" (UID: \"42b8cea2-5a21-47d1-a396-adf85162c112\") " Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.537422 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42b8cea2-5a21-47d1-a396-adf85162c112-logs\") pod \"42b8cea2-5a21-47d1-a396-adf85162c112\" (UID: \"42b8cea2-5a21-47d1-a396-adf85162c112\") " Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.537488 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42b8cea2-5a21-47d1-a396-adf85162c112-config-data\") pod \"42b8cea2-5a21-47d1-a396-adf85162c112\" (UID: \"42b8cea2-5a21-47d1-a396-adf85162c112\") " Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.537545 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d8dj5\" (UniqueName: \"kubernetes.io/projected/3782bc88-94a6-4434-bd20-a48caad31d31-kube-api-access-d8dj5\") pod \"3782bc88-94a6-4434-bd20-a48caad31d31\" (UID: \"3782bc88-94a6-4434-bd20-a48caad31d31\") " Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.537721 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42b8cea2-5a21-47d1-a396-adf85162c112-combined-ca-bundle\") pod \"42b8cea2-5a21-47d1-a396-adf85162c112\" (UID: \"42b8cea2-5a21-47d1-a396-adf85162c112\") " Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.537772 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/42b8cea2-5a21-47d1-a396-adf85162c112-logs" (OuterVolumeSpecName: "logs") pod "42b8cea2-5a21-47d1-a396-adf85162c112" (UID: "42b8cea2-5a21-47d1-a396-adf85162c112"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.538506 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42b8cea2-5a21-47d1-a396-adf85162c112-logs\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.547635 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3782bc88-94a6-4434-bd20-a48caad31d31-kube-api-access-d8dj5" (OuterVolumeSpecName: "kube-api-access-d8dj5") pod "3782bc88-94a6-4434-bd20-a48caad31d31" (UID: "3782bc88-94a6-4434-bd20-a48caad31d31"). InnerVolumeSpecName "kube-api-access-d8dj5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.569436 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42b8cea2-5a21-47d1-a396-adf85162c112-kube-api-access-pcr62" (OuterVolumeSpecName: "kube-api-access-pcr62") pod "42b8cea2-5a21-47d1-a396-adf85162c112" (UID: "42b8cea2-5a21-47d1-a396-adf85162c112"). InnerVolumeSpecName "kube-api-access-pcr62". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.638125 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42b8cea2-5a21-47d1-a396-adf85162c112-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "42b8cea2-5a21-47d1-a396-adf85162c112" (UID: "42b8cea2-5a21-47d1-a396-adf85162c112"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.641684 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d8dj5\" (UniqueName: \"kubernetes.io/projected/3782bc88-94a6-4434-bd20-a48caad31d31-kube-api-access-d8dj5\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.641720 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42b8cea2-5a21-47d1-a396-adf85162c112-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.641731 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcr62\" (UniqueName: \"kubernetes.io/projected/42b8cea2-5a21-47d1-a396-adf85162c112-kube-api-access-pcr62\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.648734 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42b8cea2-5a21-47d1-a396-adf85162c112-config-data" (OuterVolumeSpecName: "config-data") pod "42b8cea2-5a21-47d1-a396-adf85162c112" (UID: "42b8cea2-5a21-47d1-a396-adf85162c112"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.744608 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42b8cea2-5a21-47d1-a396-adf85162c112-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.786555 5050 generic.go:334] "Generic (PLEG): container finished" podID="3782bc88-94a6-4434-bd20-a48caad31d31" containerID="a325cd944d3e8806653ffd40ad1ee7a791f66ddbf95d52aae6b757339a46df2c" exitCode=2 Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.786690 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"3782bc88-94a6-4434-bd20-a48caad31d31","Type":"ContainerDied","Data":"a325cd944d3e8806653ffd40ad1ee7a791f66ddbf95d52aae6b757339a46df2c"} Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.786717 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.786753 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"3782bc88-94a6-4434-bd20-a48caad31d31","Type":"ContainerDied","Data":"bf1e4171575efaa26d2cb4a06de28504e1cfb9153b7ac0e02def9b607bb0f494"} Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.786784 5050 scope.go:117] "RemoveContainer" containerID="a325cd944d3e8806653ffd40ad1ee7a791f66ddbf95d52aae6b757339a46df2c" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.790072 5050 generic.go:334] "Generic (PLEG): container finished" podID="42b8cea2-5a21-47d1-a396-adf85162c112" containerID="326fe87bfaeb7a2e07ab165fc0a2799be4b74ac5195e58626f07c6170d844c08" exitCode=0 Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.790104 5050 generic.go:334] "Generic (PLEG): container finished" podID="42b8cea2-5a21-47d1-a396-adf85162c112" containerID="968328401f917e84f63fc1901a5525c9adb36802468f022bf5d3019d9d98bf10" exitCode=143 Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.790990 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.791856 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"42b8cea2-5a21-47d1-a396-adf85162c112","Type":"ContainerDied","Data":"326fe87bfaeb7a2e07ab165fc0a2799be4b74ac5195e58626f07c6170d844c08"} Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.791926 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"42b8cea2-5a21-47d1-a396-adf85162c112","Type":"ContainerDied","Data":"968328401f917e84f63fc1901a5525c9adb36802468f022bf5d3019d9d98bf10"} Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.791947 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"42b8cea2-5a21-47d1-a396-adf85162c112","Type":"ContainerDied","Data":"b8fa462bd3a94d9c57f70f7d28f33fedf6ab89f8c31978ca99a889ee0419da59"} Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.815217 5050 scope.go:117] "RemoveContainer" containerID="a325cd944d3e8806653ffd40ad1ee7a791f66ddbf95d52aae6b757339a46df2c" Nov 23 15:02:45 crc kubenswrapper[5050]: E1123 15:02:45.815839 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a325cd944d3e8806653ffd40ad1ee7a791f66ddbf95d52aae6b757339a46df2c\": container with ID starting with a325cd944d3e8806653ffd40ad1ee7a791f66ddbf95d52aae6b757339a46df2c not found: ID does not exist" containerID="a325cd944d3e8806653ffd40ad1ee7a791f66ddbf95d52aae6b757339a46df2c" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.815883 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a325cd944d3e8806653ffd40ad1ee7a791f66ddbf95d52aae6b757339a46df2c"} err="failed to get container status \"a325cd944d3e8806653ffd40ad1ee7a791f66ddbf95d52aae6b757339a46df2c\": rpc error: code = NotFound desc = could not find container \"a325cd944d3e8806653ffd40ad1ee7a791f66ddbf95d52aae6b757339a46df2c\": container with ID starting with a325cd944d3e8806653ffd40ad1ee7a791f66ddbf95d52aae6b757339a46df2c not found: ID does not exist" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.815913 5050 scope.go:117] "RemoveContainer" containerID="326fe87bfaeb7a2e07ab165fc0a2799be4b74ac5195e58626f07c6170d844c08" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.831276 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.852394 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.860229 5050 scope.go:117] "RemoveContainer" containerID="968328401f917e84f63fc1901a5525c9adb36802468f022bf5d3019d9d98bf10" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.877395 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 23 15:02:45 crc kubenswrapper[5050]: E1123 15:02:45.878249 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3782bc88-94a6-4434-bd20-a48caad31d31" containerName="kube-state-metrics" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.878346 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="3782bc88-94a6-4434-bd20-a48caad31d31" containerName="kube-state-metrics" Nov 23 15:02:45 crc kubenswrapper[5050]: E1123 15:02:45.878432 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42b8cea2-5a21-47d1-a396-adf85162c112" containerName="nova-metadata-metadata" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.878512 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="42b8cea2-5a21-47d1-a396-adf85162c112" containerName="nova-metadata-metadata" Nov 23 15:02:45 crc kubenswrapper[5050]: E1123 15:02:45.878611 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42b8cea2-5a21-47d1-a396-adf85162c112" containerName="nova-metadata-log" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.878677 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="42b8cea2-5a21-47d1-a396-adf85162c112" containerName="nova-metadata-log" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.878946 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="42b8cea2-5a21-47d1-a396-adf85162c112" containerName="nova-metadata-log" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.879014 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="42b8cea2-5a21-47d1-a396-adf85162c112" containerName="nova-metadata-metadata" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.879086 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="3782bc88-94a6-4434-bd20-a48caad31d31" containerName="kube-state-metrics" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.880028 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.884037 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.902994 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.907759 5050 scope.go:117] "RemoveContainer" containerID="326fe87bfaeb7a2e07ab165fc0a2799be4b74ac5195e58626f07c6170d844c08" Nov 23 15:02:45 crc kubenswrapper[5050]: E1123 15:02:45.909397 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"326fe87bfaeb7a2e07ab165fc0a2799be4b74ac5195e58626f07c6170d844c08\": container with ID starting with 326fe87bfaeb7a2e07ab165fc0a2799be4b74ac5195e58626f07c6170d844c08 not found: ID does not exist" containerID="326fe87bfaeb7a2e07ab165fc0a2799be4b74ac5195e58626f07c6170d844c08" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.909450 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"326fe87bfaeb7a2e07ab165fc0a2799be4b74ac5195e58626f07c6170d844c08"} err="failed to get container status \"326fe87bfaeb7a2e07ab165fc0a2799be4b74ac5195e58626f07c6170d844c08\": rpc error: code = NotFound desc = could not find container \"326fe87bfaeb7a2e07ab165fc0a2799be4b74ac5195e58626f07c6170d844c08\": container with ID starting with 326fe87bfaeb7a2e07ab165fc0a2799be4b74ac5195e58626f07c6170d844c08 not found: ID does not exist" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.909480 5050 scope.go:117] "RemoveContainer" containerID="968328401f917e84f63fc1901a5525c9adb36802468f022bf5d3019d9d98bf10" Nov 23 15:02:45 crc kubenswrapper[5050]: E1123 15:02:45.909726 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"968328401f917e84f63fc1901a5525c9adb36802468f022bf5d3019d9d98bf10\": container with ID starting with 968328401f917e84f63fc1901a5525c9adb36802468f022bf5d3019d9d98bf10 not found: ID does not exist" containerID="968328401f917e84f63fc1901a5525c9adb36802468f022bf5d3019d9d98bf10" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.909753 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"968328401f917e84f63fc1901a5525c9adb36802468f022bf5d3019d9d98bf10"} err="failed to get container status \"968328401f917e84f63fc1901a5525c9adb36802468f022bf5d3019d9d98bf10\": rpc error: code = NotFound desc = could not find container \"968328401f917e84f63fc1901a5525c9adb36802468f022bf5d3019d9d98bf10\": container with ID starting with 968328401f917e84f63fc1901a5525c9adb36802468f022bf5d3019d9d98bf10 not found: ID does not exist" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.909772 5050 scope.go:117] "RemoveContainer" containerID="326fe87bfaeb7a2e07ab165fc0a2799be4b74ac5195e58626f07c6170d844c08" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.910505 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"326fe87bfaeb7a2e07ab165fc0a2799be4b74ac5195e58626f07c6170d844c08"} err="failed to get container status \"326fe87bfaeb7a2e07ab165fc0a2799be4b74ac5195e58626f07c6170d844c08\": rpc error: code = NotFound desc = could not find container \"326fe87bfaeb7a2e07ab165fc0a2799be4b74ac5195e58626f07c6170d844c08\": container with ID starting with 326fe87bfaeb7a2e07ab165fc0a2799be4b74ac5195e58626f07c6170d844c08 not found: ID does not exist" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.910531 5050 scope.go:117] "RemoveContainer" containerID="968328401f917e84f63fc1901a5525c9adb36802468f022bf5d3019d9d98bf10" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.910725 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"968328401f917e84f63fc1901a5525c9adb36802468f022bf5d3019d9d98bf10"} err="failed to get container status \"968328401f917e84f63fc1901a5525c9adb36802468f022bf5d3019d9d98bf10\": rpc error: code = NotFound desc = could not find container \"968328401f917e84f63fc1901a5525c9adb36802468f022bf5d3019d9d98bf10\": container with ID starting with 968328401f917e84f63fc1901a5525c9adb36802468f022bf5d3019d9d98bf10 not found: ID does not exist" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.917779 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.959838 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94bbca67-2998-4727-bbe1-95d54277f4aa-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"94bbca67-2998-4727-bbe1-95d54277f4aa\") " pod="openstack/kube-state-metrics-0" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.960010 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mrbk\" (UniqueName: \"kubernetes.io/projected/94bbca67-2998-4727-bbe1-95d54277f4aa-kube-api-access-7mrbk\") pod \"kube-state-metrics-0\" (UID: \"94bbca67-2998-4727-bbe1-95d54277f4aa\") " pod="openstack/kube-state-metrics-0" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.960175 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/94bbca67-2998-4727-bbe1-95d54277f4aa-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"94bbca67-2998-4727-bbe1-95d54277f4aa\") " pod="openstack/kube-state-metrics-0" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.960211 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/94bbca67-2998-4727-bbe1-95d54277f4aa-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"94bbca67-2998-4727-bbe1-95d54277f4aa\") " pod="openstack/kube-state-metrics-0" Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.989939 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 15:02:45 crc kubenswrapper[5050]: I1123 15:02:45.997676 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.015026 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.021037 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.023887 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.033681 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.033900 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.062242 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/94bbca67-2998-4727-bbe1-95d54277f4aa-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"94bbca67-2998-4727-bbe1-95d54277f4aa\") " pod="openstack/kube-state-metrics-0" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.062617 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/94bbca67-2998-4727-bbe1-95d54277f4aa-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"94bbca67-2998-4727-bbe1-95d54277f4aa\") " pod="openstack/kube-state-metrics-0" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.062758 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94bbca67-2998-4727-bbe1-95d54277f4aa-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"94bbca67-2998-4727-bbe1-95d54277f4aa\") " pod="openstack/kube-state-metrics-0" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.062959 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mrbk\" (UniqueName: \"kubernetes.io/projected/94bbca67-2998-4727-bbe1-95d54277f4aa-kube-api-access-7mrbk\") pod \"kube-state-metrics-0\" (UID: \"94bbca67-2998-4727-bbe1-95d54277f4aa\") " pod="openstack/kube-state-metrics-0" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.070619 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94bbca67-2998-4727-bbe1-95d54277f4aa-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"94bbca67-2998-4727-bbe1-95d54277f4aa\") " pod="openstack/kube-state-metrics-0" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.073074 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/94bbca67-2998-4727-bbe1-95d54277f4aa-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"94bbca67-2998-4727-bbe1-95d54277f4aa\") " pod="openstack/kube-state-metrics-0" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.073120 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/94bbca67-2998-4727-bbe1-95d54277f4aa-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"94bbca67-2998-4727-bbe1-95d54277f4aa\") " pod="openstack/kube-state-metrics-0" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.087662 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mrbk\" (UniqueName: \"kubernetes.io/projected/94bbca67-2998-4727-bbe1-95d54277f4aa-kube-api-access-7mrbk\") pod \"kube-state-metrics-0\" (UID: \"94bbca67-2998-4727-bbe1-95d54277f4aa\") " pod="openstack/kube-state-metrics-0" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.165909 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c45b316d-f5ad-4371-a968-a3927907eb63-logs\") pod \"nova-metadata-0\" (UID: \"c45b316d-f5ad-4371-a968-a3927907eb63\") " pod="openstack/nova-metadata-0" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.166470 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c45b316d-f5ad-4371-a968-a3927907eb63-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"c45b316d-f5ad-4371-a968-a3927907eb63\") " pod="openstack/nova-metadata-0" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.166683 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c45b316d-f5ad-4371-a968-a3927907eb63-config-data\") pod \"nova-metadata-0\" (UID: \"c45b316d-f5ad-4371-a968-a3927907eb63\") " pod="openstack/nova-metadata-0" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.166745 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/c45b316d-f5ad-4371-a968-a3927907eb63-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"c45b316d-f5ad-4371-a968-a3927907eb63\") " pod="openstack/nova-metadata-0" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.166926 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jhpqj\" (UniqueName: \"kubernetes.io/projected/c45b316d-f5ad-4371-a968-a3927907eb63-kube-api-access-jhpqj\") pod \"nova-metadata-0\" (UID: \"c45b316d-f5ad-4371-a968-a3927907eb63\") " pod="openstack/nova-metadata-0" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.268729 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c45b316d-f5ad-4371-a968-a3927907eb63-config-data\") pod \"nova-metadata-0\" (UID: \"c45b316d-f5ad-4371-a968-a3927907eb63\") " pod="openstack/nova-metadata-0" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.268816 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/c45b316d-f5ad-4371-a968-a3927907eb63-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"c45b316d-f5ad-4371-a968-a3927907eb63\") " pod="openstack/nova-metadata-0" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.268884 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhpqj\" (UniqueName: \"kubernetes.io/projected/c45b316d-f5ad-4371-a968-a3927907eb63-kube-api-access-jhpqj\") pod \"nova-metadata-0\" (UID: \"c45b316d-f5ad-4371-a968-a3927907eb63\") " pod="openstack/nova-metadata-0" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.268919 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c45b316d-f5ad-4371-a968-a3927907eb63-logs\") pod \"nova-metadata-0\" (UID: \"c45b316d-f5ad-4371-a968-a3927907eb63\") " pod="openstack/nova-metadata-0" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.269041 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c45b316d-f5ad-4371-a968-a3927907eb63-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"c45b316d-f5ad-4371-a968-a3927907eb63\") " pod="openstack/nova-metadata-0" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.269736 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c45b316d-f5ad-4371-a968-a3927907eb63-logs\") pod \"nova-metadata-0\" (UID: \"c45b316d-f5ad-4371-a968-a3927907eb63\") " pod="openstack/nova-metadata-0" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.273623 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/c45b316d-f5ad-4371-a968-a3927907eb63-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"c45b316d-f5ad-4371-a968-a3927907eb63\") " pod="openstack/nova-metadata-0" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.274523 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c45b316d-f5ad-4371-a968-a3927907eb63-config-data\") pod \"nova-metadata-0\" (UID: \"c45b316d-f5ad-4371-a968-a3927907eb63\") " pod="openstack/nova-metadata-0" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.277306 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c45b316d-f5ad-4371-a968-a3927907eb63-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"c45b316d-f5ad-4371-a968-a3927907eb63\") " pod="openstack/nova-metadata-0" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.289207 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.295009 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jhpqj\" (UniqueName: \"kubernetes.io/projected/c45b316d-f5ad-4371-a968-a3927907eb63-kube-api-access-jhpqj\") pod \"nova-metadata-0\" (UID: \"c45b316d-f5ad-4371-a968-a3927907eb63\") " pod="openstack/nova-metadata-0" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.356324 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.814951 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 23 15:02:46 crc kubenswrapper[5050]: I1123 15:02:46.942232 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 15:02:47 crc kubenswrapper[5050]: I1123 15:02:47.237410 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:02:47 crc kubenswrapper[5050]: I1123 15:02:47.238327 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="502f3f9b-d57f-4f0b-bca8-c74f139cb28e" containerName="ceilometer-central-agent" containerID="cri-o://e3edf6026dbde8c1a65c0a3b72bbcf4aca60b485fbef6d305cbf2300aeae879f" gracePeriod=30 Nov 23 15:02:47 crc kubenswrapper[5050]: I1123 15:02:47.238463 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="502f3f9b-d57f-4f0b-bca8-c74f139cb28e" containerName="ceilometer-notification-agent" containerID="cri-o://15dcfb17c69865a099d9ce6907920ef90f8b98c5fe76f727295785a19af0728e" gracePeriod=30 Nov 23 15:02:47 crc kubenswrapper[5050]: I1123 15:02:47.238536 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="502f3f9b-d57f-4f0b-bca8-c74f139cb28e" containerName="proxy-httpd" containerID="cri-o://ac42ebbf67a8dcb7ddb5df6c37469b0efbf7f6b0a30370b30b97ed374ffdf9c6" gracePeriod=30 Nov 23 15:02:47 crc kubenswrapper[5050]: I1123 15:02:47.239362 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="502f3f9b-d57f-4f0b-bca8-c74f139cb28e" containerName="sg-core" containerID="cri-o://54bca3f2ac608d9e6f781c539a46416d75c9afb9a9f7929f5f053c30b3f9c130" gracePeriod=30 Nov 23 15:02:47 crc kubenswrapper[5050]: I1123 15:02:47.570597 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3782bc88-94a6-4434-bd20-a48caad31d31" path="/var/lib/kubelet/pods/3782bc88-94a6-4434-bd20-a48caad31d31/volumes" Nov 23 15:02:47 crc kubenswrapper[5050]: I1123 15:02:47.576066 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42b8cea2-5a21-47d1-a396-adf85162c112" path="/var/lib/kubelet/pods/42b8cea2-5a21-47d1-a396-adf85162c112/volumes" Nov 23 15:02:47 crc kubenswrapper[5050]: I1123 15:02:47.820097 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c45b316d-f5ad-4371-a968-a3927907eb63","Type":"ContainerStarted","Data":"9ee80eb80cbd17c14058c2b6eace52a03ffd4a27178ff174aa48b78fafb8d129"} Nov 23 15:02:47 crc kubenswrapper[5050]: I1123 15:02:47.820167 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c45b316d-f5ad-4371-a968-a3927907eb63","Type":"ContainerStarted","Data":"4aa6cc0c3077e4fcc0d2127d516146fdf9ca651a7b2d1826c650f3e5a0ad7522"} Nov 23 15:02:47 crc kubenswrapper[5050]: I1123 15:02:47.820182 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c45b316d-f5ad-4371-a968-a3927907eb63","Type":"ContainerStarted","Data":"a5646679d72792d51f3378148477c380beef58d3ec0af26f139be77e9e94959e"} Nov 23 15:02:47 crc kubenswrapper[5050]: I1123 15:02:47.831382 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"94bbca67-2998-4727-bbe1-95d54277f4aa","Type":"ContainerStarted","Data":"7ddde7be0ab80cfcd7eacb38e126a697a0d2f4d855ad70062b839e6d933396d5"} Nov 23 15:02:47 crc kubenswrapper[5050]: I1123 15:02:47.831427 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"94bbca67-2998-4727-bbe1-95d54277f4aa","Type":"ContainerStarted","Data":"2adb3c2f6378a26e445982e9e114aa13e08fbdc11745d0270575d6708316c8c9"} Nov 23 15:02:47 crc kubenswrapper[5050]: I1123 15:02:47.831844 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 23 15:02:47 crc kubenswrapper[5050]: I1123 15:02:47.853529 5050 generic.go:334] "Generic (PLEG): container finished" podID="502f3f9b-d57f-4f0b-bca8-c74f139cb28e" containerID="ac42ebbf67a8dcb7ddb5df6c37469b0efbf7f6b0a30370b30b97ed374ffdf9c6" exitCode=0 Nov 23 15:02:47 crc kubenswrapper[5050]: I1123 15:02:47.853575 5050 generic.go:334] "Generic (PLEG): container finished" podID="502f3f9b-d57f-4f0b-bca8-c74f139cb28e" containerID="54bca3f2ac608d9e6f781c539a46416d75c9afb9a9f7929f5f053c30b3f9c130" exitCode=2 Nov 23 15:02:47 crc kubenswrapper[5050]: I1123 15:02:47.853587 5050 generic.go:334] "Generic (PLEG): container finished" podID="502f3f9b-d57f-4f0b-bca8-c74f139cb28e" containerID="e3edf6026dbde8c1a65c0a3b72bbcf4aca60b485fbef6d305cbf2300aeae879f" exitCode=0 Nov 23 15:02:47 crc kubenswrapper[5050]: I1123 15:02:47.853619 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"502f3f9b-d57f-4f0b-bca8-c74f139cb28e","Type":"ContainerDied","Data":"ac42ebbf67a8dcb7ddb5df6c37469b0efbf7f6b0a30370b30b97ed374ffdf9c6"} Nov 23 15:02:47 crc kubenswrapper[5050]: I1123 15:02:47.853656 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"502f3f9b-d57f-4f0b-bca8-c74f139cb28e","Type":"ContainerDied","Data":"54bca3f2ac608d9e6f781c539a46416d75c9afb9a9f7929f5f053c30b3f9c130"} Nov 23 15:02:47 crc kubenswrapper[5050]: I1123 15:02:47.853671 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"502f3f9b-d57f-4f0b-bca8-c74f139cb28e","Type":"ContainerDied","Data":"e3edf6026dbde8c1a65c0a3b72bbcf4aca60b485fbef6d305cbf2300aeae879f"} Nov 23 15:02:47 crc kubenswrapper[5050]: I1123 15:02:47.867148 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.867124551 podStartE2EDuration="2.867124551s" podCreationTimestamp="2025-11-23 15:02:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:02:47.857623081 +0000 UTC m=+1263.024619556" watchObservedRunningTime="2025-11-23 15:02:47.867124551 +0000 UTC m=+1263.034121036" Nov 23 15:02:47 crc kubenswrapper[5050]: I1123 15:02:47.906893 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.48313678 podStartE2EDuration="2.906870241s" podCreationTimestamp="2025-11-23 15:02:45 +0000 UTC" firstStartedPulling="2025-11-23 15:02:46.825315224 +0000 UTC m=+1261.992311709" lastFinishedPulling="2025-11-23 15:02:47.249048695 +0000 UTC m=+1262.416045170" observedRunningTime="2025-11-23 15:02:47.900726686 +0000 UTC m=+1263.067723171" watchObservedRunningTime="2025-11-23 15:02:47.906870241 +0000 UTC m=+1263.073866726" Nov 23 15:02:48 crc kubenswrapper[5050]: I1123 15:02:48.267496 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 23 15:02:48 crc kubenswrapper[5050]: I1123 15:02:48.267858 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 23 15:02:48 crc kubenswrapper[5050]: I1123 15:02:48.308485 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 23 15:02:48 crc kubenswrapper[5050]: I1123 15:02:48.308554 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 23 15:02:48 crc kubenswrapper[5050]: I1123 15:02:48.370840 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 23 15:02:48 crc kubenswrapper[5050]: I1123 15:02:48.557284 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:02:48 crc kubenswrapper[5050]: I1123 15:02:48.563774 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" Nov 23 15:02:48 crc kubenswrapper[5050]: I1123 15:02:48.683961 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-fsg9f"] Nov 23 15:02:48 crc kubenswrapper[5050]: I1123 15:02:48.684937 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" podUID="e7c35996-d4df-4886-afb0-bb77ddcce582" containerName="dnsmasq-dns" containerID="cri-o://2af7e607e49b66afd56010e6ff512ce5c8e5aa25061c01f470156f5bd99de973" gracePeriod=10 Nov 23 15:02:48 crc kubenswrapper[5050]: I1123 15:02:48.881190 5050 generic.go:334] "Generic (PLEG): container finished" podID="e7c35996-d4df-4886-afb0-bb77ddcce582" containerID="2af7e607e49b66afd56010e6ff512ce5c8e5aa25061c01f470156f5bd99de973" exitCode=0 Nov 23 15:02:48 crc kubenswrapper[5050]: I1123 15:02:48.883688 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" event={"ID":"e7c35996-d4df-4886-afb0-bb77ddcce582","Type":"ContainerDied","Data":"2af7e607e49b66afd56010e6ff512ce5c8e5aa25061c01f470156f5bd99de973"} Nov 23 15:02:48 crc kubenswrapper[5050]: I1123 15:02:48.919329 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.335289 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.350716 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="275dac05-e3bc-4f6d-ab7b-f95acbb50692" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.184:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.350841 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="275dac05-e3bc-4f6d-ab7b-f95acbb50692" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.184:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.384776 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-config\") pod \"e7c35996-d4df-4886-afb0-bb77ddcce582\" (UID: \"e7c35996-d4df-4886-afb0-bb77ddcce582\") " Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.385031 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-dns-svc\") pod \"e7c35996-d4df-4886-afb0-bb77ddcce582\" (UID: \"e7c35996-d4df-4886-afb0-bb77ddcce582\") " Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.385242 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-ovsdbserver-nb\") pod \"e7c35996-d4df-4886-afb0-bb77ddcce582\" (UID: \"e7c35996-d4df-4886-afb0-bb77ddcce582\") " Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.385276 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-dns-swift-storage-0\") pod \"e7c35996-d4df-4886-afb0-bb77ddcce582\" (UID: \"e7c35996-d4df-4886-afb0-bb77ddcce582\") " Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.385345 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-ovsdbserver-sb\") pod \"e7c35996-d4df-4886-afb0-bb77ddcce582\" (UID: \"e7c35996-d4df-4886-afb0-bb77ddcce582\") " Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.385386 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dzxhn\" (UniqueName: \"kubernetes.io/projected/e7c35996-d4df-4886-afb0-bb77ddcce582-kube-api-access-dzxhn\") pod \"e7c35996-d4df-4886-afb0-bb77ddcce582\" (UID: \"e7c35996-d4df-4886-afb0-bb77ddcce582\") " Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.394520 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7c35996-d4df-4886-afb0-bb77ddcce582-kube-api-access-dzxhn" (OuterVolumeSpecName: "kube-api-access-dzxhn") pod "e7c35996-d4df-4886-afb0-bb77ddcce582" (UID: "e7c35996-d4df-4886-afb0-bb77ddcce582"). InnerVolumeSpecName "kube-api-access-dzxhn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.487540 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dzxhn\" (UniqueName: \"kubernetes.io/projected/e7c35996-d4df-4886-afb0-bb77ddcce582-kube-api-access-dzxhn\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.489612 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e7c35996-d4df-4886-afb0-bb77ddcce582" (UID: "e7c35996-d4df-4886-afb0-bb77ddcce582"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.491416 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e7c35996-d4df-4886-afb0-bb77ddcce582" (UID: "e7c35996-d4df-4886-afb0-bb77ddcce582"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.514978 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e7c35996-d4df-4886-afb0-bb77ddcce582" (UID: "e7c35996-d4df-4886-afb0-bb77ddcce582"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.520929 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e7c35996-d4df-4886-afb0-bb77ddcce582" (UID: "e7c35996-d4df-4886-afb0-bb77ddcce582"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.543600 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-config" (OuterVolumeSpecName: "config") pod "e7c35996-d4df-4886-afb0-bb77ddcce582" (UID: "e7c35996-d4df-4886-afb0-bb77ddcce582"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.589976 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.590015 5050 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.590027 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.590040 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-config\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.590051 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e7c35996-d4df-4886-afb0-bb77ddcce582-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.895514 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" event={"ID":"e7c35996-d4df-4886-afb0-bb77ddcce582","Type":"ContainerDied","Data":"b17e940bb4e5cd3e205a69c18c23bb1cbbbe240c7731622c47327906c38db1f2"} Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.895617 5050 scope.go:117] "RemoveContainer" containerID="2af7e607e49b66afd56010e6ff512ce5c8e5aa25061c01f470156f5bd99de973" Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.895830 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-fsg9f" Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.900120 5050 generic.go:334] "Generic (PLEG): container finished" podID="8d6abc54-c88f-403a-b9ca-8645646c6356" containerID="4addc92803e0a7fa069a404a032c72c2edc38d23a3ca83cea0920a981320b2c9" exitCode=0 Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.900219 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-lwv2w" event={"ID":"8d6abc54-c88f-403a-b9ca-8645646c6356","Type":"ContainerDied","Data":"4addc92803e0a7fa069a404a032c72c2edc38d23a3ca83cea0920a981320b2c9"} Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.906780 5050 generic.go:334] "Generic (PLEG): container finished" podID="948389bb-db2c-40c2-a458-b54cb0fa94ee" containerID="dcf12c08d4bf1811af924b4b17ab832619b6e87539c56aca5f56d1ebdc5f3fa1" exitCode=0 Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.906876 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-j52wg" event={"ID":"948389bb-db2c-40c2-a458-b54cb0fa94ee","Type":"ContainerDied","Data":"dcf12c08d4bf1811af924b4b17ab832619b6e87539c56aca5f56d1ebdc5f3fa1"} Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.926549 5050 scope.go:117] "RemoveContainer" containerID="63f3f91319babce14e1ed652d7d8092e6fc2e31738c0165be8ce19658f3b5175" Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.968459 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-fsg9f"] Nov 23 15:02:49 crc kubenswrapper[5050]: I1123 15:02:49.979166 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-fsg9f"] Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.356950 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.357520 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.366693 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-j52wg" Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.375985 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-lwv2w" Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.551250 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dhmbc\" (UniqueName: \"kubernetes.io/projected/948389bb-db2c-40c2-a458-b54cb0fa94ee-kube-api-access-dhmbc\") pod \"948389bb-db2c-40c2-a458-b54cb0fa94ee\" (UID: \"948389bb-db2c-40c2-a458-b54cb0fa94ee\") " Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.552696 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/948389bb-db2c-40c2-a458-b54cb0fa94ee-config-data\") pod \"948389bb-db2c-40c2-a458-b54cb0fa94ee\" (UID: \"948389bb-db2c-40c2-a458-b54cb0fa94ee\") " Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.552879 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dkvs2\" (UniqueName: \"kubernetes.io/projected/8d6abc54-c88f-403a-b9ca-8645646c6356-kube-api-access-dkvs2\") pod \"8d6abc54-c88f-403a-b9ca-8645646c6356\" (UID: \"8d6abc54-c88f-403a-b9ca-8645646c6356\") " Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.553233 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d6abc54-c88f-403a-b9ca-8645646c6356-config-data\") pod \"8d6abc54-c88f-403a-b9ca-8645646c6356\" (UID: \"8d6abc54-c88f-403a-b9ca-8645646c6356\") " Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.553323 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/948389bb-db2c-40c2-a458-b54cb0fa94ee-combined-ca-bundle\") pod \"948389bb-db2c-40c2-a458-b54cb0fa94ee\" (UID: \"948389bb-db2c-40c2-a458-b54cb0fa94ee\") " Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.553557 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d6abc54-c88f-403a-b9ca-8645646c6356-combined-ca-bundle\") pod \"8d6abc54-c88f-403a-b9ca-8645646c6356\" (UID: \"8d6abc54-c88f-403a-b9ca-8645646c6356\") " Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.553724 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/948389bb-db2c-40c2-a458-b54cb0fa94ee-scripts\") pod \"948389bb-db2c-40c2-a458-b54cb0fa94ee\" (UID: \"948389bb-db2c-40c2-a458-b54cb0fa94ee\") " Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.553837 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d6abc54-c88f-403a-b9ca-8645646c6356-scripts\") pod \"8d6abc54-c88f-403a-b9ca-8645646c6356\" (UID: \"8d6abc54-c88f-403a-b9ca-8645646c6356\") " Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.561504 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/948389bb-db2c-40c2-a458-b54cb0fa94ee-kube-api-access-dhmbc" (OuterVolumeSpecName: "kube-api-access-dhmbc") pod "948389bb-db2c-40c2-a458-b54cb0fa94ee" (UID: "948389bb-db2c-40c2-a458-b54cb0fa94ee"). InnerVolumeSpecName "kube-api-access-dhmbc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.561621 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d6abc54-c88f-403a-b9ca-8645646c6356-kube-api-access-dkvs2" (OuterVolumeSpecName: "kube-api-access-dkvs2") pod "8d6abc54-c88f-403a-b9ca-8645646c6356" (UID: "8d6abc54-c88f-403a-b9ca-8645646c6356"). InnerVolumeSpecName "kube-api-access-dkvs2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.562143 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/948389bb-db2c-40c2-a458-b54cb0fa94ee-scripts" (OuterVolumeSpecName: "scripts") pod "948389bb-db2c-40c2-a458-b54cb0fa94ee" (UID: "948389bb-db2c-40c2-a458-b54cb0fa94ee"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.564305 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d6abc54-c88f-403a-b9ca-8645646c6356-scripts" (OuterVolumeSpecName: "scripts") pod "8d6abc54-c88f-403a-b9ca-8645646c6356" (UID: "8d6abc54-c88f-403a-b9ca-8645646c6356"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.564575 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7c35996-d4df-4886-afb0-bb77ddcce582" path="/var/lib/kubelet/pods/e7c35996-d4df-4886-afb0-bb77ddcce582/volumes" Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.594177 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/948389bb-db2c-40c2-a458-b54cb0fa94ee-config-data" (OuterVolumeSpecName: "config-data") pod "948389bb-db2c-40c2-a458-b54cb0fa94ee" (UID: "948389bb-db2c-40c2-a458-b54cb0fa94ee"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.595354 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/948389bb-db2c-40c2-a458-b54cb0fa94ee-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "948389bb-db2c-40c2-a458-b54cb0fa94ee" (UID: "948389bb-db2c-40c2-a458-b54cb0fa94ee"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.598134 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d6abc54-c88f-403a-b9ca-8645646c6356-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8d6abc54-c88f-403a-b9ca-8645646c6356" (UID: "8d6abc54-c88f-403a-b9ca-8645646c6356"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.600596 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d6abc54-c88f-403a-b9ca-8645646c6356-config-data" (OuterVolumeSpecName: "config-data") pod "8d6abc54-c88f-403a-b9ca-8645646c6356" (UID: "8d6abc54-c88f-403a-b9ca-8645646c6356"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.656853 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/948389bb-db2c-40c2-a458-b54cb0fa94ee-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.656897 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d6abc54-c88f-403a-b9ca-8645646c6356-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.656911 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dhmbc\" (UniqueName: \"kubernetes.io/projected/948389bb-db2c-40c2-a458-b54cb0fa94ee-kube-api-access-dhmbc\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.656929 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/948389bb-db2c-40c2-a458-b54cb0fa94ee-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.656943 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dkvs2\" (UniqueName: \"kubernetes.io/projected/8d6abc54-c88f-403a-b9ca-8645646c6356-kube-api-access-dkvs2\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.656958 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d6abc54-c88f-403a-b9ca-8645646c6356-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.656971 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/948389bb-db2c-40c2-a458-b54cb0fa94ee-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.657015 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d6abc54-c88f-403a-b9ca-8645646c6356-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.944278 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-lwv2w" Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.944258 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-lwv2w" event={"ID":"8d6abc54-c88f-403a-b9ca-8645646c6356","Type":"ContainerDied","Data":"de446882d02113cccd961ec8838df1b1ece6d9f0245a8db902801f0cd4901012"} Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.944506 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="de446882d02113cccd961ec8838df1b1ece6d9f0245a8db902801f0cd4901012" Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.948036 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-j52wg" event={"ID":"948389bb-db2c-40c2-a458-b54cb0fa94ee","Type":"ContainerDied","Data":"837ae0a119ac7f11e15f136249846bd454790503c96143267bf8829657cf0658"} Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.948122 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="837ae0a119ac7f11e15f136249846bd454790503c96143267bf8829657cf0658" Nov 23 15:02:51 crc kubenswrapper[5050]: I1123 15:02:51.948096 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-j52wg" Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.060625 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 23 15:02:52 crc kubenswrapper[5050]: E1123 15:02:52.061115 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7c35996-d4df-4886-afb0-bb77ddcce582" containerName="init" Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.061139 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7c35996-d4df-4886-afb0-bb77ddcce582" containerName="init" Nov 23 15:02:52 crc kubenswrapper[5050]: E1123 15:02:52.061168 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d6abc54-c88f-403a-b9ca-8645646c6356" containerName="nova-cell1-conductor-db-sync" Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.061180 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d6abc54-c88f-403a-b9ca-8645646c6356" containerName="nova-cell1-conductor-db-sync" Nov 23 15:02:52 crc kubenswrapper[5050]: E1123 15:02:52.061209 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7c35996-d4df-4886-afb0-bb77ddcce582" containerName="dnsmasq-dns" Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.061245 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7c35996-d4df-4886-afb0-bb77ddcce582" containerName="dnsmasq-dns" Nov 23 15:02:52 crc kubenswrapper[5050]: E1123 15:02:52.061271 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="948389bb-db2c-40c2-a458-b54cb0fa94ee" containerName="nova-manage" Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.061362 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="948389bb-db2c-40c2-a458-b54cb0fa94ee" containerName="nova-manage" Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.061584 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d6abc54-c88f-403a-b9ca-8645646c6356" containerName="nova-cell1-conductor-db-sync" Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.061605 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7c35996-d4df-4886-afb0-bb77ddcce582" containerName="dnsmasq-dns" Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.061620 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="948389bb-db2c-40c2-a458-b54cb0fa94ee" containerName="nova-manage" Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.062663 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.067786 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.071588 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.134237 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.134798 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="275dac05-e3bc-4f6d-ab7b-f95acbb50692" containerName="nova-api-log" containerID="cri-o://d2f15cce3da3c5052a0fcc24b40f9ff904f283f118a7418f605a1e21276839e4" gracePeriod=30 Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.135539 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="275dac05-e3bc-4f6d-ab7b-f95acbb50692" containerName="nova-api-api" containerID="cri-o://97453c1c0289846eb214645e2ffc35025120692c5b4ce5d3b7e6b518b04dba89" gracePeriod=30 Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.154334 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.154601 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="a3584331-f147-4f05-9652-397c75f881db" containerName="nova-scheduler-scheduler" containerID="cri-o://4697a6223c18966f8b2511b5be8e28ee2b37cdeaaeff04a38673897b95bcd990" gracePeriod=30 Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.172423 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75e360ca-c84a-4806-a86a-86924a639cfc-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"75e360ca-c84a-4806-a86a-86924a639cfc\") " pod="openstack/nova-cell1-conductor-0" Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.172516 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6v5qt\" (UniqueName: \"kubernetes.io/projected/75e360ca-c84a-4806-a86a-86924a639cfc-kube-api-access-6v5qt\") pod \"nova-cell1-conductor-0\" (UID: \"75e360ca-c84a-4806-a86a-86924a639cfc\") " pod="openstack/nova-cell1-conductor-0" Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.172602 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75e360ca-c84a-4806-a86a-86924a639cfc-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"75e360ca-c84a-4806-a86a-86924a639cfc\") " pod="openstack/nova-cell1-conductor-0" Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.209151 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.209839 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="c45b316d-f5ad-4371-a968-a3927907eb63" containerName="nova-metadata-log" containerID="cri-o://4aa6cc0c3077e4fcc0d2127d516146fdf9ca651a7b2d1826c650f3e5a0ad7522" gracePeriod=30 Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.209984 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="c45b316d-f5ad-4371-a968-a3927907eb63" containerName="nova-metadata-metadata" containerID="cri-o://9ee80eb80cbd17c14058c2b6eace52a03ffd4a27178ff174aa48b78fafb8d129" gracePeriod=30 Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.274347 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75e360ca-c84a-4806-a86a-86924a639cfc-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"75e360ca-c84a-4806-a86a-86924a639cfc\") " pod="openstack/nova-cell1-conductor-0" Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.274422 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6v5qt\" (UniqueName: \"kubernetes.io/projected/75e360ca-c84a-4806-a86a-86924a639cfc-kube-api-access-6v5qt\") pod \"nova-cell1-conductor-0\" (UID: \"75e360ca-c84a-4806-a86a-86924a639cfc\") " pod="openstack/nova-cell1-conductor-0" Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.274548 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75e360ca-c84a-4806-a86a-86924a639cfc-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"75e360ca-c84a-4806-a86a-86924a639cfc\") " pod="openstack/nova-cell1-conductor-0" Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.284615 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75e360ca-c84a-4806-a86a-86924a639cfc-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"75e360ca-c84a-4806-a86a-86924a639cfc\") " pod="openstack/nova-cell1-conductor-0" Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.285097 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75e360ca-c84a-4806-a86a-86924a639cfc-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"75e360ca-c84a-4806-a86a-86924a639cfc\") " pod="openstack/nova-cell1-conductor-0" Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.298073 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6v5qt\" (UniqueName: \"kubernetes.io/projected/75e360ca-c84a-4806-a86a-86924a639cfc-kube-api-access-6v5qt\") pod \"nova-cell1-conductor-0\" (UID: \"75e360ca-c84a-4806-a86a-86924a639cfc\") " pod="openstack/nova-cell1-conductor-0" Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.391178 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.897399 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.971359 5050 generic.go:334] "Generic (PLEG): container finished" podID="275dac05-e3bc-4f6d-ab7b-f95acbb50692" containerID="d2f15cce3da3c5052a0fcc24b40f9ff904f283f118a7418f605a1e21276839e4" exitCode=143 Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.971456 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"275dac05-e3bc-4f6d-ab7b-f95acbb50692","Type":"ContainerDied","Data":"d2f15cce3da3c5052a0fcc24b40f9ff904f283f118a7418f605a1e21276839e4"} Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.988725 5050 generic.go:334] "Generic (PLEG): container finished" podID="c45b316d-f5ad-4371-a968-a3927907eb63" containerID="9ee80eb80cbd17c14058c2b6eace52a03ffd4a27178ff174aa48b78fafb8d129" exitCode=0 Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.988767 5050 generic.go:334] "Generic (PLEG): container finished" podID="c45b316d-f5ad-4371-a968-a3927907eb63" containerID="4aa6cc0c3077e4fcc0d2127d516146fdf9ca651a7b2d1826c650f3e5a0ad7522" exitCode=143 Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.988804 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c45b316d-f5ad-4371-a968-a3927907eb63","Type":"ContainerDied","Data":"9ee80eb80cbd17c14058c2b6eace52a03ffd4a27178ff174aa48b78fafb8d129"} Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.988842 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c45b316d-f5ad-4371-a968-a3927907eb63","Type":"ContainerDied","Data":"4aa6cc0c3077e4fcc0d2127d516146fdf9ca651a7b2d1826c650f3e5a0ad7522"} Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.988853 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c45b316d-f5ad-4371-a968-a3927907eb63","Type":"ContainerDied","Data":"a5646679d72792d51f3378148477c380beef58d3ec0af26f139be77e9e94959e"} Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.988876 5050 scope.go:117] "RemoveContainer" containerID="9ee80eb80cbd17c14058c2b6eace52a03ffd4a27178ff174aa48b78fafb8d129" Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.989120 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.991249 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c45b316d-f5ad-4371-a968-a3927907eb63-combined-ca-bundle\") pod \"c45b316d-f5ad-4371-a968-a3927907eb63\" (UID: \"c45b316d-f5ad-4371-a968-a3927907eb63\") " Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.991340 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/c45b316d-f5ad-4371-a968-a3927907eb63-nova-metadata-tls-certs\") pod \"c45b316d-f5ad-4371-a968-a3927907eb63\" (UID: \"c45b316d-f5ad-4371-a968-a3927907eb63\") " Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.991453 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhpqj\" (UniqueName: \"kubernetes.io/projected/c45b316d-f5ad-4371-a968-a3927907eb63-kube-api-access-jhpqj\") pod \"c45b316d-f5ad-4371-a968-a3927907eb63\" (UID: \"c45b316d-f5ad-4371-a968-a3927907eb63\") " Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.991547 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c45b316d-f5ad-4371-a968-a3927907eb63-logs\") pod \"c45b316d-f5ad-4371-a968-a3927907eb63\" (UID: \"c45b316d-f5ad-4371-a968-a3927907eb63\") " Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.991608 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c45b316d-f5ad-4371-a968-a3927907eb63-config-data\") pod \"c45b316d-f5ad-4371-a968-a3927907eb63\" (UID: \"c45b316d-f5ad-4371-a968-a3927907eb63\") " Nov 23 15:02:52 crc kubenswrapper[5050]: I1123 15:02:52.992297 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c45b316d-f5ad-4371-a968-a3927907eb63-logs" (OuterVolumeSpecName: "logs") pod "c45b316d-f5ad-4371-a968-a3927907eb63" (UID: "c45b316d-f5ad-4371-a968-a3927907eb63"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:52.999652 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c45b316d-f5ad-4371-a968-a3927907eb63-kube-api-access-jhpqj" (OuterVolumeSpecName: "kube-api-access-jhpqj") pod "c45b316d-f5ad-4371-a968-a3927907eb63" (UID: "c45b316d-f5ad-4371-a968-a3927907eb63"). InnerVolumeSpecName "kube-api-access-jhpqj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.009202 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.025273 5050 scope.go:117] "RemoveContainer" containerID="4aa6cc0c3077e4fcc0d2127d516146fdf9ca651a7b2d1826c650f3e5a0ad7522" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.027611 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c45b316d-f5ad-4371-a968-a3927907eb63-config-data" (OuterVolumeSpecName: "config-data") pod "c45b316d-f5ad-4371-a968-a3927907eb63" (UID: "c45b316d-f5ad-4371-a968-a3927907eb63"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.031317 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c45b316d-f5ad-4371-a968-a3927907eb63-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c45b316d-f5ad-4371-a968-a3927907eb63" (UID: "c45b316d-f5ad-4371-a968-a3927907eb63"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.048136 5050 scope.go:117] "RemoveContainer" containerID="9ee80eb80cbd17c14058c2b6eace52a03ffd4a27178ff174aa48b78fafb8d129" Nov 23 15:02:53 crc kubenswrapper[5050]: E1123 15:02:53.049773 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ee80eb80cbd17c14058c2b6eace52a03ffd4a27178ff174aa48b78fafb8d129\": container with ID starting with 9ee80eb80cbd17c14058c2b6eace52a03ffd4a27178ff174aa48b78fafb8d129 not found: ID does not exist" containerID="9ee80eb80cbd17c14058c2b6eace52a03ffd4a27178ff174aa48b78fafb8d129" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.049868 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ee80eb80cbd17c14058c2b6eace52a03ffd4a27178ff174aa48b78fafb8d129"} err="failed to get container status \"9ee80eb80cbd17c14058c2b6eace52a03ffd4a27178ff174aa48b78fafb8d129\": rpc error: code = NotFound desc = could not find container \"9ee80eb80cbd17c14058c2b6eace52a03ffd4a27178ff174aa48b78fafb8d129\": container with ID starting with 9ee80eb80cbd17c14058c2b6eace52a03ffd4a27178ff174aa48b78fafb8d129 not found: ID does not exist" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.049902 5050 scope.go:117] "RemoveContainer" containerID="4aa6cc0c3077e4fcc0d2127d516146fdf9ca651a7b2d1826c650f3e5a0ad7522" Nov 23 15:02:53 crc kubenswrapper[5050]: E1123 15:02:53.050439 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4aa6cc0c3077e4fcc0d2127d516146fdf9ca651a7b2d1826c650f3e5a0ad7522\": container with ID starting with 4aa6cc0c3077e4fcc0d2127d516146fdf9ca651a7b2d1826c650f3e5a0ad7522 not found: ID does not exist" containerID="4aa6cc0c3077e4fcc0d2127d516146fdf9ca651a7b2d1826c650f3e5a0ad7522" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.050481 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4aa6cc0c3077e4fcc0d2127d516146fdf9ca651a7b2d1826c650f3e5a0ad7522"} err="failed to get container status \"4aa6cc0c3077e4fcc0d2127d516146fdf9ca651a7b2d1826c650f3e5a0ad7522\": rpc error: code = NotFound desc = could not find container \"4aa6cc0c3077e4fcc0d2127d516146fdf9ca651a7b2d1826c650f3e5a0ad7522\": container with ID starting with 4aa6cc0c3077e4fcc0d2127d516146fdf9ca651a7b2d1826c650f3e5a0ad7522 not found: ID does not exist" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.050499 5050 scope.go:117] "RemoveContainer" containerID="9ee80eb80cbd17c14058c2b6eace52a03ffd4a27178ff174aa48b78fafb8d129" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.050921 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ee80eb80cbd17c14058c2b6eace52a03ffd4a27178ff174aa48b78fafb8d129"} err="failed to get container status \"9ee80eb80cbd17c14058c2b6eace52a03ffd4a27178ff174aa48b78fafb8d129\": rpc error: code = NotFound desc = could not find container \"9ee80eb80cbd17c14058c2b6eace52a03ffd4a27178ff174aa48b78fafb8d129\": container with ID starting with 9ee80eb80cbd17c14058c2b6eace52a03ffd4a27178ff174aa48b78fafb8d129 not found: ID does not exist" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.050980 5050 scope.go:117] "RemoveContainer" containerID="4aa6cc0c3077e4fcc0d2127d516146fdf9ca651a7b2d1826c650f3e5a0ad7522" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.053549 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4aa6cc0c3077e4fcc0d2127d516146fdf9ca651a7b2d1826c650f3e5a0ad7522"} err="failed to get container status \"4aa6cc0c3077e4fcc0d2127d516146fdf9ca651a7b2d1826c650f3e5a0ad7522\": rpc error: code = NotFound desc = could not find container \"4aa6cc0c3077e4fcc0d2127d516146fdf9ca651a7b2d1826c650f3e5a0ad7522\": container with ID starting with 4aa6cc0c3077e4fcc0d2127d516146fdf9ca651a7b2d1826c650f3e5a0ad7522 not found: ID does not exist" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.063025 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c45b316d-f5ad-4371-a968-a3927907eb63-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "c45b316d-f5ad-4371-a968-a3927907eb63" (UID: "c45b316d-f5ad-4371-a968-a3927907eb63"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.093900 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c45b316d-f5ad-4371-a968-a3927907eb63-logs\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.093931 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c45b316d-f5ad-4371-a968-a3927907eb63-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.093941 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c45b316d-f5ad-4371-a968-a3927907eb63-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.093952 5050 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/c45b316d-f5ad-4371-a968-a3927907eb63-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.093963 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhpqj\" (UniqueName: \"kubernetes.io/projected/c45b316d-f5ad-4371-a968-a3927907eb63-kube-api-access-jhpqj\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:53 crc kubenswrapper[5050]: E1123 15:02:53.316741 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4697a6223c18966f8b2511b5be8e28ee2b37cdeaaeff04a38673897b95bcd990" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 23 15:02:53 crc kubenswrapper[5050]: E1123 15:02:53.318810 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4697a6223c18966f8b2511b5be8e28ee2b37cdeaaeff04a38673897b95bcd990" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 23 15:02:53 crc kubenswrapper[5050]: E1123 15:02:53.332898 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4697a6223c18966f8b2511b5be8e28ee2b37cdeaaeff04a38673897b95bcd990" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 23 15:02:53 crc kubenswrapper[5050]: E1123 15:02:53.332963 5050 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="a3584331-f147-4f05-9652-397c75f881db" containerName="nova-scheduler-scheduler" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.395914 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.404754 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.425043 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 23 15:02:53 crc kubenswrapper[5050]: E1123 15:02:53.425566 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c45b316d-f5ad-4371-a968-a3927907eb63" containerName="nova-metadata-metadata" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.425588 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="c45b316d-f5ad-4371-a968-a3927907eb63" containerName="nova-metadata-metadata" Nov 23 15:02:53 crc kubenswrapper[5050]: E1123 15:02:53.425607 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c45b316d-f5ad-4371-a968-a3927907eb63" containerName="nova-metadata-log" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.425615 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="c45b316d-f5ad-4371-a968-a3927907eb63" containerName="nova-metadata-log" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.425818 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="c45b316d-f5ad-4371-a968-a3927907eb63" containerName="nova-metadata-metadata" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.425839 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="c45b316d-f5ad-4371-a968-a3927907eb63" containerName="nova-metadata-log" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.426992 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.430580 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.430856 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.438960 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.503683 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7jhsd\" (UniqueName: \"kubernetes.io/projected/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-kube-api-access-7jhsd\") pod \"nova-metadata-0\" (UID: \"f4de9500-7d8f-4619-8ae3-579d82bd7f1c\") " pod="openstack/nova-metadata-0" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.504273 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-config-data\") pod \"nova-metadata-0\" (UID: \"f4de9500-7d8f-4619-8ae3-579d82bd7f1c\") " pod="openstack/nova-metadata-0" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.504322 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-logs\") pod \"nova-metadata-0\" (UID: \"f4de9500-7d8f-4619-8ae3-579d82bd7f1c\") " pod="openstack/nova-metadata-0" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.504395 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"f4de9500-7d8f-4619-8ae3-579d82bd7f1c\") " pod="openstack/nova-metadata-0" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.504505 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"f4de9500-7d8f-4619-8ae3-579d82bd7f1c\") " pod="openstack/nova-metadata-0" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.564368 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c45b316d-f5ad-4371-a968-a3927907eb63" path="/var/lib/kubelet/pods/c45b316d-f5ad-4371-a968-a3927907eb63/volumes" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.607023 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7jhsd\" (UniqueName: \"kubernetes.io/projected/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-kube-api-access-7jhsd\") pod \"nova-metadata-0\" (UID: \"f4de9500-7d8f-4619-8ae3-579d82bd7f1c\") " pod="openstack/nova-metadata-0" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.607129 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-config-data\") pod \"nova-metadata-0\" (UID: \"f4de9500-7d8f-4619-8ae3-579d82bd7f1c\") " pod="openstack/nova-metadata-0" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.607157 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-logs\") pod \"nova-metadata-0\" (UID: \"f4de9500-7d8f-4619-8ae3-579d82bd7f1c\") " pod="openstack/nova-metadata-0" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.607196 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"f4de9500-7d8f-4619-8ae3-579d82bd7f1c\") " pod="openstack/nova-metadata-0" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.607236 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"f4de9500-7d8f-4619-8ae3-579d82bd7f1c\") " pod="openstack/nova-metadata-0" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.608408 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-logs\") pod \"nova-metadata-0\" (UID: \"f4de9500-7d8f-4619-8ae3-579d82bd7f1c\") " pod="openstack/nova-metadata-0" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.617140 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"f4de9500-7d8f-4619-8ae3-579d82bd7f1c\") " pod="openstack/nova-metadata-0" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.619621 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"f4de9500-7d8f-4619-8ae3-579d82bd7f1c\") " pod="openstack/nova-metadata-0" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.627174 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-config-data\") pod \"nova-metadata-0\" (UID: \"f4de9500-7d8f-4619-8ae3-579d82bd7f1c\") " pod="openstack/nova-metadata-0" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.630848 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7jhsd\" (UniqueName: \"kubernetes.io/projected/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-kube-api-access-7jhsd\") pod \"nova-metadata-0\" (UID: \"f4de9500-7d8f-4619-8ae3-579d82bd7f1c\") " pod="openstack/nova-metadata-0" Nov 23 15:02:53 crc kubenswrapper[5050]: I1123 15:02:53.745338 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.011885 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"75e360ca-c84a-4806-a86a-86924a639cfc","Type":"ContainerStarted","Data":"dc18dcf10f3de24a9d22f69583ddef25bdb552099564e7a29e69ffc7a506a441"} Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.012358 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"75e360ca-c84a-4806-a86a-86924a639cfc","Type":"ContainerStarted","Data":"db7d68dc7333fc40bc147bb79c9d5959119d3d682ba64ee4ad8ec86aa29faffc"} Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.012624 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.018279 5050 generic.go:334] "Generic (PLEG): container finished" podID="502f3f9b-d57f-4f0b-bca8-c74f139cb28e" containerID="15dcfb17c69865a099d9ce6907920ef90f8b98c5fe76f727295785a19af0728e" exitCode=0 Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.018327 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"502f3f9b-d57f-4f0b-bca8-c74f139cb28e","Type":"ContainerDied","Data":"15dcfb17c69865a099d9ce6907920ef90f8b98c5fe76f727295785a19af0728e"} Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.203145 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.228843 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.22879952 podStartE2EDuration="2.22879952s" podCreationTimestamp="2025-11-23 15:02:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:02:54.034908989 +0000 UTC m=+1269.201905464" watchObservedRunningTime="2025-11-23 15:02:54.22879952 +0000 UTC m=+1269.395796015" Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.328160 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-log-httpd\") pod \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.328320 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n886n\" (UniqueName: \"kubernetes.io/projected/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-kube-api-access-n886n\") pod \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.328425 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-config-data\") pod \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.328555 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-run-httpd\") pod \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.328608 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-scripts\") pod \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.328629 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-sg-core-conf-yaml\") pod \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.328674 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-combined-ca-bundle\") pod \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\" (UID: \"502f3f9b-d57f-4f0b-bca8-c74f139cb28e\") " Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.329331 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "502f3f9b-d57f-4f0b-bca8-c74f139cb28e" (UID: "502f3f9b-d57f-4f0b-bca8-c74f139cb28e"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.329986 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "502f3f9b-d57f-4f0b-bca8-c74f139cb28e" (UID: "502f3f9b-d57f-4f0b-bca8-c74f139cb28e"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.337580 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-scripts" (OuterVolumeSpecName: "scripts") pod "502f3f9b-d57f-4f0b-bca8-c74f139cb28e" (UID: "502f3f9b-d57f-4f0b-bca8-c74f139cb28e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.338848 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-kube-api-access-n886n" (OuterVolumeSpecName: "kube-api-access-n886n") pod "502f3f9b-d57f-4f0b-bca8-c74f139cb28e" (UID: "502f3f9b-d57f-4f0b-bca8-c74f139cb28e"). InnerVolumeSpecName "kube-api-access-n886n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.377223 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.379667 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "502f3f9b-d57f-4f0b-bca8-c74f139cb28e" (UID: "502f3f9b-d57f-4f0b-bca8-c74f139cb28e"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.434158 5050 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.434200 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.434213 5050 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.434224 5050 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.434235 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n886n\" (UniqueName: \"kubernetes.io/projected/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-kube-api-access-n886n\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.452575 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "502f3f9b-d57f-4f0b-bca8-c74f139cb28e" (UID: "502f3f9b-d57f-4f0b-bca8-c74f139cb28e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.474610 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-config-data" (OuterVolumeSpecName: "config-data") pod "502f3f9b-d57f-4f0b-bca8-c74f139cb28e" (UID: "502f3f9b-d57f-4f0b-bca8-c74f139cb28e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.537699 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:54 crc kubenswrapper[5050]: I1123 15:02:54.537755 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/502f3f9b-d57f-4f0b-bca8-c74f139cb28e-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.041780 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"502f3f9b-d57f-4f0b-bca8-c74f139cb28e","Type":"ContainerDied","Data":"2842553d3264cb20a1183862ab46d75413f078b9db0d773f351658ea0eebb971"} Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.041800 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.041908 5050 scope.go:117] "RemoveContainer" containerID="ac42ebbf67a8dcb7ddb5df6c37469b0efbf7f6b0a30370b30b97ed374ffdf9c6" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.045958 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f4de9500-7d8f-4619-8ae3-579d82bd7f1c","Type":"ContainerStarted","Data":"1ee1ee6664dd9194d64de4206b0ed4e97fb294a0083812d930930b7bf8f4b006"} Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.046142 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f4de9500-7d8f-4619-8ae3-579d82bd7f1c","Type":"ContainerStarted","Data":"e16dd4c5f277e9f18697d190962419a79baf44289a3f0d3db53a7fee7cd11b17"} Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.046174 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f4de9500-7d8f-4619-8ae3-579d82bd7f1c","Type":"ContainerStarted","Data":"036fdf0e0ab5937434f51543d7c9fa914cdfe5d2773c436a85129647783dbd75"} Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.097515 5050 scope.go:117] "RemoveContainer" containerID="54bca3f2ac608d9e6f781c539a46416d75c9afb9a9f7929f5f053c30b3f9c130" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.102266 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.102212831 podStartE2EDuration="2.102212831s" podCreationTimestamp="2025-11-23 15:02:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:02:55.073706361 +0000 UTC m=+1270.240702866" watchObservedRunningTime="2025-11-23 15:02:55.102212831 +0000 UTC m=+1270.269209356" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.121576 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.133579 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.147130 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:02:55 crc kubenswrapper[5050]: E1123 15:02:55.147631 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="502f3f9b-d57f-4f0b-bca8-c74f139cb28e" containerName="ceilometer-notification-agent" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.147647 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="502f3f9b-d57f-4f0b-bca8-c74f139cb28e" containerName="ceilometer-notification-agent" Nov 23 15:02:55 crc kubenswrapper[5050]: E1123 15:02:55.147691 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="502f3f9b-d57f-4f0b-bca8-c74f139cb28e" containerName="ceilometer-central-agent" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.147697 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="502f3f9b-d57f-4f0b-bca8-c74f139cb28e" containerName="ceilometer-central-agent" Nov 23 15:02:55 crc kubenswrapper[5050]: E1123 15:02:55.147710 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="502f3f9b-d57f-4f0b-bca8-c74f139cb28e" containerName="sg-core" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.147716 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="502f3f9b-d57f-4f0b-bca8-c74f139cb28e" containerName="sg-core" Nov 23 15:02:55 crc kubenswrapper[5050]: E1123 15:02:55.147980 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="502f3f9b-d57f-4f0b-bca8-c74f139cb28e" containerName="proxy-httpd" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.147990 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="502f3f9b-d57f-4f0b-bca8-c74f139cb28e" containerName="proxy-httpd" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.148185 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="502f3f9b-d57f-4f0b-bca8-c74f139cb28e" containerName="ceilometer-central-agent" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.148197 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="502f3f9b-d57f-4f0b-bca8-c74f139cb28e" containerName="sg-core" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.148206 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="502f3f9b-d57f-4f0b-bca8-c74f139cb28e" containerName="proxy-httpd" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.148222 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="502f3f9b-d57f-4f0b-bca8-c74f139cb28e" containerName="ceilometer-notification-agent" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.154913 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.162487 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.162782 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.169093 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.193504 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.196805 5050 scope.go:117] "RemoveContainer" containerID="15dcfb17c69865a099d9ce6907920ef90f8b98c5fe76f727295785a19af0728e" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.226163 5050 scope.go:117] "RemoveContainer" containerID="e3edf6026dbde8c1a65c0a3b72bbcf4aca60b485fbef6d305cbf2300aeae879f" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.255792 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmr77\" (UniqueName: \"kubernetes.io/projected/3023809c-c0ef-4911-8dcc-7cdd678e71c3-kube-api-access-vmr77\") pod \"ceilometer-0\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " pod="openstack/ceilometer-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.255890 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3023809c-c0ef-4911-8dcc-7cdd678e71c3-run-httpd\") pod \"ceilometer-0\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " pod="openstack/ceilometer-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.255948 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " pod="openstack/ceilometer-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.256012 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " pod="openstack/ceilometer-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.256036 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3023809c-c0ef-4911-8dcc-7cdd678e71c3-log-httpd\") pod \"ceilometer-0\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " pod="openstack/ceilometer-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.256085 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " pod="openstack/ceilometer-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.256105 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-scripts\") pod \"ceilometer-0\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " pod="openstack/ceilometer-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.256125 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-config-data\") pod \"ceilometer-0\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " pod="openstack/ceilometer-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.357578 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " pod="openstack/ceilometer-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.357642 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-scripts\") pod \"ceilometer-0\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " pod="openstack/ceilometer-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.357693 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-config-data\") pod \"ceilometer-0\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " pod="openstack/ceilometer-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.357739 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmr77\" (UniqueName: \"kubernetes.io/projected/3023809c-c0ef-4911-8dcc-7cdd678e71c3-kube-api-access-vmr77\") pod \"ceilometer-0\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " pod="openstack/ceilometer-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.357795 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3023809c-c0ef-4911-8dcc-7cdd678e71c3-run-httpd\") pod \"ceilometer-0\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " pod="openstack/ceilometer-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.357839 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " pod="openstack/ceilometer-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.357891 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " pod="openstack/ceilometer-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.357917 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3023809c-c0ef-4911-8dcc-7cdd678e71c3-log-httpd\") pod \"ceilometer-0\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " pod="openstack/ceilometer-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.358684 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3023809c-c0ef-4911-8dcc-7cdd678e71c3-log-httpd\") pod \"ceilometer-0\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " pod="openstack/ceilometer-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.358954 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3023809c-c0ef-4911-8dcc-7cdd678e71c3-run-httpd\") pod \"ceilometer-0\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " pod="openstack/ceilometer-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.363506 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-config-data\") pod \"ceilometer-0\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " pod="openstack/ceilometer-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.366173 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " pod="openstack/ceilometer-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.366208 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " pod="openstack/ceilometer-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.366225 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " pod="openstack/ceilometer-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.366727 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-scripts\") pod \"ceilometer-0\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " pod="openstack/ceilometer-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.385341 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmr77\" (UniqueName: \"kubernetes.io/projected/3023809c-c0ef-4911-8dcc-7cdd678e71c3-kube-api-access-vmr77\") pod \"ceilometer-0\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " pod="openstack/ceilometer-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.505561 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.607132 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="502f3f9b-d57f-4f0b-bca8-c74f139cb28e" path="/var/lib/kubelet/pods/502f3f9b-d57f-4f0b-bca8-c74f139cb28e/volumes" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.753772 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.872123 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/275dac05-e3bc-4f6d-ab7b-f95acbb50692-config-data\") pod \"275dac05-e3bc-4f6d-ab7b-f95acbb50692\" (UID: \"275dac05-e3bc-4f6d-ab7b-f95acbb50692\") " Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.872301 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/275dac05-e3bc-4f6d-ab7b-f95acbb50692-combined-ca-bundle\") pod \"275dac05-e3bc-4f6d-ab7b-f95acbb50692\" (UID: \"275dac05-e3bc-4f6d-ab7b-f95acbb50692\") " Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.872395 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n2g9f\" (UniqueName: \"kubernetes.io/projected/275dac05-e3bc-4f6d-ab7b-f95acbb50692-kube-api-access-n2g9f\") pod \"275dac05-e3bc-4f6d-ab7b-f95acbb50692\" (UID: \"275dac05-e3bc-4f6d-ab7b-f95acbb50692\") " Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.873318 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/275dac05-e3bc-4f6d-ab7b-f95acbb50692-logs\") pod \"275dac05-e3bc-4f6d-ab7b-f95acbb50692\" (UID: \"275dac05-e3bc-4f6d-ab7b-f95acbb50692\") " Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.873955 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/275dac05-e3bc-4f6d-ab7b-f95acbb50692-logs" (OuterVolumeSpecName: "logs") pod "275dac05-e3bc-4f6d-ab7b-f95acbb50692" (UID: "275dac05-e3bc-4f6d-ab7b-f95acbb50692"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.890840 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/275dac05-e3bc-4f6d-ab7b-f95acbb50692-kube-api-access-n2g9f" (OuterVolumeSpecName: "kube-api-access-n2g9f") pod "275dac05-e3bc-4f6d-ab7b-f95acbb50692" (UID: "275dac05-e3bc-4f6d-ab7b-f95acbb50692"). InnerVolumeSpecName "kube-api-access-n2g9f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.913664 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/275dac05-e3bc-4f6d-ab7b-f95acbb50692-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "275dac05-e3bc-4f6d-ab7b-f95acbb50692" (UID: "275dac05-e3bc-4f6d-ab7b-f95acbb50692"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.923507 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/275dac05-e3bc-4f6d-ab7b-f95acbb50692-config-data" (OuterVolumeSpecName: "config-data") pod "275dac05-e3bc-4f6d-ab7b-f95acbb50692" (UID: "275dac05-e3bc-4f6d-ab7b-f95acbb50692"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.976294 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/275dac05-e3bc-4f6d-ab7b-f95acbb50692-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.976333 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/275dac05-e3bc-4f6d-ab7b-f95acbb50692-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.976346 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n2g9f\" (UniqueName: \"kubernetes.io/projected/275dac05-e3bc-4f6d-ab7b-f95acbb50692-kube-api-access-n2g9f\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.976356 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/275dac05-e3bc-4f6d-ab7b-f95acbb50692-logs\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:55 crc kubenswrapper[5050]: I1123 15:02:55.981388 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.033809 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.059830 5050 generic.go:334] "Generic (PLEG): container finished" podID="275dac05-e3bc-4f6d-ab7b-f95acbb50692" containerID="97453c1c0289846eb214645e2ffc35025120692c5b4ce5d3b7e6b518b04dba89" exitCode=0 Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.059904 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"275dac05-e3bc-4f6d-ab7b-f95acbb50692","Type":"ContainerDied","Data":"97453c1c0289846eb214645e2ffc35025120692c5b4ce5d3b7e6b518b04dba89"} Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.059942 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"275dac05-e3bc-4f6d-ab7b-f95acbb50692","Type":"ContainerDied","Data":"85831ce8008f5c8fbd7e39fe5ca39a0c9902a0df794d35068c61750e2b8c35de"} Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.059961 5050 scope.go:117] "RemoveContainer" containerID="97453c1c0289846eb214645e2ffc35025120692c5b4ce5d3b7e6b518b04dba89" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.060102 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.084900 5050 generic.go:334] "Generic (PLEG): container finished" podID="a3584331-f147-4f05-9652-397c75f881db" containerID="4697a6223c18966f8b2511b5be8e28ee2b37cdeaaeff04a38673897b95bcd990" exitCode=0 Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.084977 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a3584331-f147-4f05-9652-397c75f881db","Type":"ContainerDied","Data":"4697a6223c18966f8b2511b5be8e28ee2b37cdeaaeff04a38673897b95bcd990"} Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.085010 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a3584331-f147-4f05-9652-397c75f881db","Type":"ContainerDied","Data":"bca61fd1fc407cff9e25428cf7595b1190554cdcd8dc40953f61c0223893c2fc"} Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.085079 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.087770 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3023809c-c0ef-4911-8dcc-7cdd678e71c3","Type":"ContainerStarted","Data":"46b3fbfe84fd5573dd279adf4db4bc0149c22655aa155ba1a49f92dd5c40b2e8"} Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.136017 5050 scope.go:117] "RemoveContainer" containerID="d2f15cce3da3c5052a0fcc24b40f9ff904f283f118a7418f605a1e21276839e4" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.156586 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.166905 5050 scope.go:117] "RemoveContainer" containerID="97453c1c0289846eb214645e2ffc35025120692c5b4ce5d3b7e6b518b04dba89" Nov 23 15:02:56 crc kubenswrapper[5050]: E1123 15:02:56.167721 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"97453c1c0289846eb214645e2ffc35025120692c5b4ce5d3b7e6b518b04dba89\": container with ID starting with 97453c1c0289846eb214645e2ffc35025120692c5b4ce5d3b7e6b518b04dba89 not found: ID does not exist" containerID="97453c1c0289846eb214645e2ffc35025120692c5b4ce5d3b7e6b518b04dba89" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.167780 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97453c1c0289846eb214645e2ffc35025120692c5b4ce5d3b7e6b518b04dba89"} err="failed to get container status \"97453c1c0289846eb214645e2ffc35025120692c5b4ce5d3b7e6b518b04dba89\": rpc error: code = NotFound desc = could not find container \"97453c1c0289846eb214645e2ffc35025120692c5b4ce5d3b7e6b518b04dba89\": container with ID starting with 97453c1c0289846eb214645e2ffc35025120692c5b4ce5d3b7e6b518b04dba89 not found: ID does not exist" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.167818 5050 scope.go:117] "RemoveContainer" containerID="d2f15cce3da3c5052a0fcc24b40f9ff904f283f118a7418f605a1e21276839e4" Nov 23 15:02:56 crc kubenswrapper[5050]: E1123 15:02:56.169153 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2f15cce3da3c5052a0fcc24b40f9ff904f283f118a7418f605a1e21276839e4\": container with ID starting with d2f15cce3da3c5052a0fcc24b40f9ff904f283f118a7418f605a1e21276839e4 not found: ID does not exist" containerID="d2f15cce3da3c5052a0fcc24b40f9ff904f283f118a7418f605a1e21276839e4" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.169184 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2f15cce3da3c5052a0fcc24b40f9ff904f283f118a7418f605a1e21276839e4"} err="failed to get container status \"d2f15cce3da3c5052a0fcc24b40f9ff904f283f118a7418f605a1e21276839e4\": rpc error: code = NotFound desc = could not find container \"d2f15cce3da3c5052a0fcc24b40f9ff904f283f118a7418f605a1e21276839e4\": container with ID starting with d2f15cce3da3c5052a0fcc24b40f9ff904f283f118a7418f605a1e21276839e4 not found: ID does not exist" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.169201 5050 scope.go:117] "RemoveContainer" containerID="4697a6223c18966f8b2511b5be8e28ee2b37cdeaaeff04a38673897b95bcd990" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.175304 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.187834 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 23 15:02:56 crc kubenswrapper[5050]: E1123 15:02:56.188398 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="275dac05-e3bc-4f6d-ab7b-f95acbb50692" containerName="nova-api-api" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.188420 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="275dac05-e3bc-4f6d-ab7b-f95acbb50692" containerName="nova-api-api" Nov 23 15:02:56 crc kubenswrapper[5050]: E1123 15:02:56.188464 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3584331-f147-4f05-9652-397c75f881db" containerName="nova-scheduler-scheduler" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.188474 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3584331-f147-4f05-9652-397c75f881db" containerName="nova-scheduler-scheduler" Nov 23 15:02:56 crc kubenswrapper[5050]: E1123 15:02:56.188494 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="275dac05-e3bc-4f6d-ab7b-f95acbb50692" containerName="nova-api-log" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.188500 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="275dac05-e3bc-4f6d-ab7b-f95acbb50692" containerName="nova-api-log" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.188732 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="275dac05-e3bc-4f6d-ab7b-f95acbb50692" containerName="nova-api-log" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.188751 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="275dac05-e3bc-4f6d-ab7b-f95acbb50692" containerName="nova-api-api" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.188762 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3584331-f147-4f05-9652-397c75f881db" containerName="nova-scheduler-scheduler" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.189883 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.194317 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2965j\" (UniqueName: \"kubernetes.io/projected/a3584331-f147-4f05-9652-397c75f881db-kube-api-access-2965j\") pod \"a3584331-f147-4f05-9652-397c75f881db\" (UID: \"a3584331-f147-4f05-9652-397c75f881db\") " Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.194434 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3584331-f147-4f05-9652-397c75f881db-combined-ca-bundle\") pod \"a3584331-f147-4f05-9652-397c75f881db\" (UID: \"a3584331-f147-4f05-9652-397c75f881db\") " Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.194671 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3584331-f147-4f05-9652-397c75f881db-config-data\") pod \"a3584331-f147-4f05-9652-397c75f881db\" (UID: \"a3584331-f147-4f05-9652-397c75f881db\") " Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.196416 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.196993 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.204044 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a3584331-f147-4f05-9652-397c75f881db-kube-api-access-2965j" (OuterVolumeSpecName: "kube-api-access-2965j") pod "a3584331-f147-4f05-9652-397c75f881db" (UID: "a3584331-f147-4f05-9652-397c75f881db"). InnerVolumeSpecName "kube-api-access-2965j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.214598 5050 scope.go:117] "RemoveContainer" containerID="4697a6223c18966f8b2511b5be8e28ee2b37cdeaaeff04a38673897b95bcd990" Nov 23 15:02:56 crc kubenswrapper[5050]: E1123 15:02:56.215615 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4697a6223c18966f8b2511b5be8e28ee2b37cdeaaeff04a38673897b95bcd990\": container with ID starting with 4697a6223c18966f8b2511b5be8e28ee2b37cdeaaeff04a38673897b95bcd990 not found: ID does not exist" containerID="4697a6223c18966f8b2511b5be8e28ee2b37cdeaaeff04a38673897b95bcd990" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.215686 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4697a6223c18966f8b2511b5be8e28ee2b37cdeaaeff04a38673897b95bcd990"} err="failed to get container status \"4697a6223c18966f8b2511b5be8e28ee2b37cdeaaeff04a38673897b95bcd990\": rpc error: code = NotFound desc = could not find container \"4697a6223c18966f8b2511b5be8e28ee2b37cdeaaeff04a38673897b95bcd990\": container with ID starting with 4697a6223c18966f8b2511b5be8e28ee2b37cdeaaeff04a38673897b95bcd990 not found: ID does not exist" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.230245 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3584331-f147-4f05-9652-397c75f881db-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a3584331-f147-4f05-9652-397c75f881db" (UID: "a3584331-f147-4f05-9652-397c75f881db"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.237416 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3584331-f147-4f05-9652-397c75f881db-config-data" (OuterVolumeSpecName: "config-data") pod "a3584331-f147-4f05-9652-397c75f881db" (UID: "a3584331-f147-4f05-9652-397c75f881db"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.298826 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d-logs\") pod \"nova-api-0\" (UID: \"e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d\") " pod="openstack/nova-api-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.298944 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qz5dn\" (UniqueName: \"kubernetes.io/projected/e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d-kube-api-access-qz5dn\") pod \"nova-api-0\" (UID: \"e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d\") " pod="openstack/nova-api-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.299023 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d\") " pod="openstack/nova-api-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.299404 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d-config-data\") pod \"nova-api-0\" (UID: \"e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d\") " pod="openstack/nova-api-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.299506 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3584331-f147-4f05-9652-397c75f881db-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.299525 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2965j\" (UniqueName: \"kubernetes.io/projected/a3584331-f147-4f05-9652-397c75f881db-kube-api-access-2965j\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.299562 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3584331-f147-4f05-9652-397c75f881db-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.313818 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.401858 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d\") " pod="openstack/nova-api-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.404331 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d-config-data\") pod \"nova-api-0\" (UID: \"e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d\") " pod="openstack/nova-api-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.411135 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d-logs\") pod \"nova-api-0\" (UID: \"e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d\") " pod="openstack/nova-api-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.411372 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qz5dn\" (UniqueName: \"kubernetes.io/projected/e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d-kube-api-access-qz5dn\") pod \"nova-api-0\" (UID: \"e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d\") " pod="openstack/nova-api-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.414280 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d-logs\") pod \"nova-api-0\" (UID: \"e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d\") " pod="openstack/nova-api-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.415294 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d-config-data\") pod \"nova-api-0\" (UID: \"e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d\") " pod="openstack/nova-api-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.415722 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d\") " pod="openstack/nova-api-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.436699 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qz5dn\" (UniqueName: \"kubernetes.io/projected/e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d-kube-api-access-qz5dn\") pod \"nova-api-0\" (UID: \"e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d\") " pod="openstack/nova-api-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.437240 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.463352 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.492753 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.500795 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.505175 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.509238 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.530787 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.645192 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mw777\" (UniqueName: \"kubernetes.io/projected/ebc90782-9288-409b-97b6-06eedca6af69-kube-api-access-mw777\") pod \"nova-scheduler-0\" (UID: \"ebc90782-9288-409b-97b6-06eedca6af69\") " pod="openstack/nova-scheduler-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.645302 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebc90782-9288-409b-97b6-06eedca6af69-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ebc90782-9288-409b-97b6-06eedca6af69\") " pod="openstack/nova-scheduler-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.645376 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ebc90782-9288-409b-97b6-06eedca6af69-config-data\") pod \"nova-scheduler-0\" (UID: \"ebc90782-9288-409b-97b6-06eedca6af69\") " pod="openstack/nova-scheduler-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.748174 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ebc90782-9288-409b-97b6-06eedca6af69-config-data\") pod \"nova-scheduler-0\" (UID: \"ebc90782-9288-409b-97b6-06eedca6af69\") " pod="openstack/nova-scheduler-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.748259 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mw777\" (UniqueName: \"kubernetes.io/projected/ebc90782-9288-409b-97b6-06eedca6af69-kube-api-access-mw777\") pod \"nova-scheduler-0\" (UID: \"ebc90782-9288-409b-97b6-06eedca6af69\") " pod="openstack/nova-scheduler-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.748407 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebc90782-9288-409b-97b6-06eedca6af69-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ebc90782-9288-409b-97b6-06eedca6af69\") " pod="openstack/nova-scheduler-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.756689 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebc90782-9288-409b-97b6-06eedca6af69-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ebc90782-9288-409b-97b6-06eedca6af69\") " pod="openstack/nova-scheduler-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.758860 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ebc90782-9288-409b-97b6-06eedca6af69-config-data\") pod \"nova-scheduler-0\" (UID: \"ebc90782-9288-409b-97b6-06eedca6af69\") " pod="openstack/nova-scheduler-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.770250 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mw777\" (UniqueName: \"kubernetes.io/projected/ebc90782-9288-409b-97b6-06eedca6af69-kube-api-access-mw777\") pod \"nova-scheduler-0\" (UID: \"ebc90782-9288-409b-97b6-06eedca6af69\") " pod="openstack/nova-scheduler-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.915372 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 15:02:56 crc kubenswrapper[5050]: I1123 15:02:56.992856 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 23 15:02:57 crc kubenswrapper[5050]: I1123 15:02:57.109413 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d","Type":"ContainerStarted","Data":"b5b3c1368e59165a35d258332b1f23a5f44294a50081f5dde9d74d23d541f9bc"} Nov 23 15:02:57 crc kubenswrapper[5050]: I1123 15:02:57.158176 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3023809c-c0ef-4911-8dcc-7cdd678e71c3","Type":"ContainerStarted","Data":"efd9102008f33ce4b28d7c9c6a0fbc1001c14e99a0ece0af0a6e35b9e9062161"} Nov 23 15:02:57 crc kubenswrapper[5050]: I1123 15:02:57.474315 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 15:02:57 crc kubenswrapper[5050]: I1123 15:02:57.572654 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="275dac05-e3bc-4f6d-ab7b-f95acbb50692" path="/var/lib/kubelet/pods/275dac05-e3bc-4f6d-ab7b-f95acbb50692/volumes" Nov 23 15:02:57 crc kubenswrapper[5050]: I1123 15:02:57.573583 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a3584331-f147-4f05-9652-397c75f881db" path="/var/lib/kubelet/pods/a3584331-f147-4f05-9652-397c75f881db/volumes" Nov 23 15:02:58 crc kubenswrapper[5050]: I1123 15:02:58.177368 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3023809c-c0ef-4911-8dcc-7cdd678e71c3","Type":"ContainerStarted","Data":"55aa601d0049f276c0ae585090ac3d9812f82dd2ee3827db00dd15d9ff7f7f94"} Nov 23 15:02:58 crc kubenswrapper[5050]: I1123 15:02:58.179666 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ebc90782-9288-409b-97b6-06eedca6af69","Type":"ContainerStarted","Data":"28797149a593494315d5778047a603218f729df770a8fe6758c9321d3bf2ef2e"} Nov 23 15:02:58 crc kubenswrapper[5050]: I1123 15:02:58.179703 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ebc90782-9288-409b-97b6-06eedca6af69","Type":"ContainerStarted","Data":"866a94a802d2c4398c5c3f315e312e6ca12312e71616f1ea2bdd915ebec060bd"} Nov 23 15:02:58 crc kubenswrapper[5050]: I1123 15:02:58.182214 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d","Type":"ContainerStarted","Data":"2be0f7e13323ed3b94345fa2e9c64a5885580a2c8bcbfb005d4144dd16705054"} Nov 23 15:02:58 crc kubenswrapper[5050]: I1123 15:02:58.182282 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d","Type":"ContainerStarted","Data":"d2931ae007bc8543cad15627c35800ea000cc388ab6a6b562277bd995e34a4b7"} Nov 23 15:02:58 crc kubenswrapper[5050]: I1123 15:02:58.216966 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.216940381 podStartE2EDuration="2.216940381s" podCreationTimestamp="2025-11-23 15:02:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:02:58.204566909 +0000 UTC m=+1273.371563394" watchObservedRunningTime="2025-11-23 15:02:58.216940381 +0000 UTC m=+1273.383936866" Nov 23 15:02:58 crc kubenswrapper[5050]: I1123 15:02:58.228121 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.228100228 podStartE2EDuration="2.228100228s" podCreationTimestamp="2025-11-23 15:02:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:02:58.223075895 +0000 UTC m=+1273.390072400" watchObservedRunningTime="2025-11-23 15:02:58.228100228 +0000 UTC m=+1273.395096713" Nov 23 15:02:58 crc kubenswrapper[5050]: I1123 15:02:58.746059 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 23 15:02:58 crc kubenswrapper[5050]: I1123 15:02:58.746518 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 23 15:02:59 crc kubenswrapper[5050]: I1123 15:02:59.201832 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3023809c-c0ef-4911-8dcc-7cdd678e71c3","Type":"ContainerStarted","Data":"2846726b4a01de78ddce3c273b5cb13f09fe8655c34c83dbca6ba1ca50437720"} Nov 23 15:02:59 crc kubenswrapper[5050]: I1123 15:02:59.224560 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:02:59 crc kubenswrapper[5050]: I1123 15:02:59.224680 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:03:00 crc kubenswrapper[5050]: I1123 15:03:00.241153 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3023809c-c0ef-4911-8dcc-7cdd678e71c3","Type":"ContainerStarted","Data":"f9593a90f1a76e06505bfefaef0f5644dc8b4d4115d5ae465e03292937435228"} Nov 23 15:03:00 crc kubenswrapper[5050]: I1123 15:03:00.242344 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 23 15:03:00 crc kubenswrapper[5050]: I1123 15:03:00.263955 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.041063734 podStartE2EDuration="5.263939197s" podCreationTimestamp="2025-11-23 15:02:55 +0000 UTC" firstStartedPulling="2025-11-23 15:02:55.992798272 +0000 UTC m=+1271.159794767" lastFinishedPulling="2025-11-23 15:02:59.215673735 +0000 UTC m=+1274.382670230" observedRunningTime="2025-11-23 15:03:00.261015484 +0000 UTC m=+1275.428011969" watchObservedRunningTime="2025-11-23 15:03:00.263939197 +0000 UTC m=+1275.430935682" Nov 23 15:03:01 crc kubenswrapper[5050]: I1123 15:03:01.915778 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 23 15:03:02 crc kubenswrapper[5050]: I1123 15:03:02.430212 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 23 15:03:03 crc kubenswrapper[5050]: I1123 15:03:03.745662 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 23 15:03:03 crc kubenswrapper[5050]: I1123 15:03:03.750261 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 23 15:03:04 crc kubenswrapper[5050]: I1123 15:03:04.763607 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="f4de9500-7d8f-4619-8ae3-579d82bd7f1c" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.193:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 23 15:03:04 crc kubenswrapper[5050]: I1123 15:03:04.763707 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="f4de9500-7d8f-4619-8ae3-579d82bd7f1c" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.193:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 23 15:03:06 crc kubenswrapper[5050]: I1123 15:03:06.511103 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 23 15:03:06 crc kubenswrapper[5050]: I1123 15:03:06.511885 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 23 15:03:06 crc kubenswrapper[5050]: I1123 15:03:06.915979 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 23 15:03:06 crc kubenswrapper[5050]: I1123 15:03:06.990765 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 23 15:03:07 crc kubenswrapper[5050]: I1123 15:03:07.356320 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 23 15:03:07 crc kubenswrapper[5050]: I1123 15:03:07.593696 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.195:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 23 15:03:07 crc kubenswrapper[5050]: I1123 15:03:07.593713 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.195:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 23 15:03:09 crc kubenswrapper[5050]: E1123 15:03:09.680518 5050 fsHandler.go:119] failed to collect filesystem stats - rootDiskErr: could not stat "/var/lib/containers/storage/overlay/d325d68d525c3b55ad45a1647773032717c8556813b4d458ac209a2b7bf9ccb2/diff" to get inode usage: stat /var/lib/containers/storage/overlay/d325d68d525c3b55ad45a1647773032717c8556813b4d458ac209a2b7bf9ccb2/diff: no such file or directory, extraDiskErr: could not stat "/var/log/pods/openstack_ceilometer-0_502f3f9b-d57f-4f0b-bca8-c74f139cb28e/ceilometer-central-agent/0.log" to get inode usage: stat /var/log/pods/openstack_ceilometer-0_502f3f9b-d57f-4f0b-bca8-c74f139cb28e/ceilometer-central-agent/0.log: no such file or directory Nov 23 15:03:10 crc kubenswrapper[5050]: E1123 15:03:10.584929 5050 fsHandler.go:119] failed to collect filesystem stats - rootDiskErr: could not stat "/var/lib/containers/storage/overlay/0fac6990db504183c9024651322c8be37eab9446d2ac863b528c38f323821d28/diff" to get inode usage: stat /var/lib/containers/storage/overlay/0fac6990db504183c9024651322c8be37eab9446d2ac863b528c38f323821d28/diff: no such file or directory, extraDiskErr: could not stat "/var/log/pods/openstack_ceilometer-0_502f3f9b-d57f-4f0b-bca8-c74f139cb28e/ceilometer-notification-agent/0.log" to get inode usage: stat /var/log/pods/openstack_ceilometer-0_502f3f9b-d57f-4f0b-bca8-c74f139cb28e/ceilometer-notification-agent/0.log: no such file or directory Nov 23 15:03:11 crc kubenswrapper[5050]: E1123 15:03:11.275361 5050 fsHandler.go:119] failed to collect filesystem stats - rootDiskErr: could not stat "/var/lib/containers/storage/overlay/9d344dd822500b3389ac425f563a6d0e78adcc1e7a836346f0a17bd3a692f902/diff" to get inode usage: stat /var/lib/containers/storage/overlay/9d344dd822500b3389ac425f563a6d0e78adcc1e7a836346f0a17bd3a692f902/diff: no such file or directory, extraDiskErr: could not stat "/var/log/pods/openstack_ceilometer-0_502f3f9b-d57f-4f0b-bca8-c74f139cb28e/sg-core/0.log" to get inode usage: stat /var/log/pods/openstack_ceilometer-0_502f3f9b-d57f-4f0b-bca8-c74f139cb28e/sg-core/0.log: no such file or directory Nov 23 15:03:13 crc kubenswrapper[5050]: E1123 15:03:13.382403 5050 fsHandler.go:119] failed to collect filesystem stats - rootDiskErr: could not stat "/var/lib/containers/storage/overlay/448ad7d3a9800296377594efe6631c791ce0211fd04297e50a277af35d814ac0/diff" to get inode usage: stat /var/lib/containers/storage/overlay/448ad7d3a9800296377594efe6631c791ce0211fd04297e50a277af35d814ac0/diff: no such file or directory, extraDiskErr: could not stat "/var/log/pods/openstack_ceilometer-0_502f3f9b-d57f-4f0b-bca8-c74f139cb28e/proxy-httpd/0.log" to get inode usage: stat /var/log/pods/openstack_ceilometer-0_502f3f9b-d57f-4f0b-bca8-c74f139cb28e/proxy-httpd/0.log: no such file or directory Nov 23 15:03:13 crc kubenswrapper[5050]: I1123 15:03:13.812044 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 23 15:03:13 crc kubenswrapper[5050]: I1123 15:03:13.815524 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 23 15:03:13 crc kubenswrapper[5050]: I1123 15:03:13.816762 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 23 15:03:13 crc kubenswrapper[5050]: W1123 15:03:13.933880 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc45b316d_f5ad_4371_a968_a3927907eb63.slice/crio-a5646679d72792d51f3378148477c380beef58d3ec0af26f139be77e9e94959e WatchSource:0}: Error finding container a5646679d72792d51f3378148477c380beef58d3ec0af26f139be77e9e94959e: Status 404 returned error can't find the container with id a5646679d72792d51f3378148477c380beef58d3ec0af26f139be77e9e94959e Nov 23 15:03:13 crc kubenswrapper[5050]: W1123 15:03:13.936609 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc45b316d_f5ad_4371_a968_a3927907eb63.slice/crio-4aa6cc0c3077e4fcc0d2127d516146fdf9ca651a7b2d1826c650f3e5a0ad7522.scope WatchSource:0}: Error finding container 4aa6cc0c3077e4fcc0d2127d516146fdf9ca651a7b2d1826c650f3e5a0ad7522: Status 404 returned error can't find the container with id 4aa6cc0c3077e4fcc0d2127d516146fdf9ca651a7b2d1826c650f3e5a0ad7522 Nov 23 15:03:13 crc kubenswrapper[5050]: W1123 15:03:13.937152 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc45b316d_f5ad_4371_a968_a3927907eb63.slice/crio-9ee80eb80cbd17c14058c2b6eace52a03ffd4a27178ff174aa48b78fafb8d129.scope WatchSource:0}: Error finding container 9ee80eb80cbd17c14058c2b6eace52a03ffd4a27178ff174aa48b78fafb8d129: Status 404 returned error can't find the container with id 9ee80eb80cbd17c14058c2b6eace52a03ffd4a27178ff174aa48b78fafb8d129 Nov 23 15:03:14 crc kubenswrapper[5050]: E1123 15:03:14.184214 5050 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod948389bb_db2c_40c2_a458_b54cb0fa94ee.slice/crio-837ae0a119ac7f11e15f136249846bd454790503c96143267bf8829657cf0658\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc45b316d_f5ad_4371_a968_a3927907eb63.slice/crio-conmon-9ee80eb80cbd17c14058c2b6eace52a03ffd4a27178ff174aa48b78fafb8d129.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8d6abc54_c88f_403a_b9ca_8645646c6356.slice/crio-conmon-4addc92803e0a7fa069a404a032c72c2edc38d23a3ca83cea0920a981320b2c9.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode7c35996_d4df_4886_afb0_bb77ddcce582.slice/crio-b17e940bb4e5cd3e205a69c18c23bb1cbbbe240c7731622c47327906c38db1f2\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8d6abc54_c88f_403a_b9ca_8645646c6356.slice/crio-de446882d02113cccd961ec8838df1b1ece6d9f0245a8db902801f0cd4901012\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod502f3f9b_d57f_4f0b_bca8_c74f139cb28e.slice/crio-e3edf6026dbde8c1a65c0a3b72bbcf4aca60b485fbef6d305cbf2300aeae879f.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc45b316d_f5ad_4371_a968_a3927907eb63.slice/crio-conmon-4aa6cc0c3077e4fcc0d2127d516146fdf9ca651a7b2d1826c650f3e5a0ad7522.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcaeaf051_b0fe_4fe8_b5e2_730f3d55ffea.slice/crio-9f803b54b404ac268f060fd8bd8081ce74ab291b70f2179751f9593b66f757ed.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8d6abc54_c88f_403a_b9ca_8645646c6356.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod275dac05_e3bc_4f6d_ab7b_f95acbb50692.slice/crio-conmon-97453c1c0289846eb214645e2ffc35025120692c5b4ce5d3b7e6b518b04dba89.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod275dac05_e3bc_4f6d_ab7b_f95acbb50692.slice/crio-85831ce8008f5c8fbd7e39fe5ca39a0c9902a0df794d35068c61750e2b8c35de\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod275dac05_e3bc_4f6d_ab7b_f95acbb50692.slice/crio-d2f15cce3da3c5052a0fcc24b40f9ff904f283f118a7418f605a1e21276839e4.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod502f3f9b_d57f_4f0b_bca8_c74f139cb28e.slice/crio-2842553d3264cb20a1183862ab46d75413f078b9db0d773f351658ea0eebb971\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod948389bb_db2c_40c2_a458_b54cb0fa94ee.slice/crio-conmon-dcf12c08d4bf1811af924b4b17ab832619b6e87539c56aca5f56d1ebdc5f3fa1.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod948389bb_db2c_40c2_a458_b54cb0fa94ee.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda3584331_f147_4f05_9652_397c75f881db.slice/crio-conmon-4697a6223c18966f8b2511b5be8e28ee2b37cdeaaeff04a38673897b95bcd990.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode7c35996_d4df_4886_afb0_bb77ddcce582.slice/crio-conmon-2af7e607e49b66afd56010e6ff512ce5c8e5aa25061c01f470156f5bd99de973.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc45b316d_f5ad_4371_a968_a3927907eb63.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode7c35996_d4df_4886_afb0_bb77ddcce582.slice/crio-2af7e607e49b66afd56010e6ff512ce5c8e5aa25061c01f470156f5bd99de973.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod502f3f9b_d57f_4f0b_bca8_c74f139cb28e.slice/crio-15dcfb17c69865a099d9ce6907920ef90f8b98c5fe76f727295785a19af0728e.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod502f3f9b_d57f_4f0b_bca8_c74f139cb28e.slice/crio-conmon-e3edf6026dbde8c1a65c0a3b72bbcf4aca60b485fbef6d305cbf2300aeae879f.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod502f3f9b_d57f_4f0b_bca8_c74f139cb28e.slice/crio-conmon-15dcfb17c69865a099d9ce6907920ef90f8b98c5fe76f727295785a19af0728e.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda3584331_f147_4f05_9652_397c75f881db.slice/crio-bca61fd1fc407cff9e25428cf7595b1190554cdcd8dc40953f61c0223893c2fc\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod948389bb_db2c_40c2_a458_b54cb0fa94ee.slice/crio-dcf12c08d4bf1811af924b4b17ab832619b6e87539c56aca5f56d1ebdc5f3fa1.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode7c35996_d4df_4886_afb0_bb77ddcce582.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod275dac05_e3bc_4f6d_ab7b_f95acbb50692.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcaeaf051_b0fe_4fe8_b5e2_730f3d55ffea.slice/crio-conmon-9f803b54b404ac268f060fd8bd8081ce74ab291b70f2179751f9593b66f757ed.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda3584331_f147_4f05_9652_397c75f881db.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8d6abc54_c88f_403a_b9ca_8645646c6356.slice/crio-4addc92803e0a7fa069a404a032c72c2edc38d23a3ca83cea0920a981320b2c9.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod502f3f9b_d57f_4f0b_bca8_c74f139cb28e.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod275dac05_e3bc_4f6d_ab7b_f95acbb50692.slice/crio-97453c1c0289846eb214645e2ffc35025120692c5b4ce5d3b7e6b518b04dba89.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda3584331_f147_4f05_9652_397c75f881db.slice/crio-4697a6223c18966f8b2511b5be8e28ee2b37cdeaaeff04a38673897b95bcd990.scope\": RecentStats: unable to find data in memory cache]" Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.369027 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.402014 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caeaf051-b0fe-4fe8-b5e2-730f3d55ffea-combined-ca-bundle\") pod \"caeaf051-b0fe-4fe8-b5e2-730f3d55ffea\" (UID: \"caeaf051-b0fe-4fe8-b5e2-730f3d55ffea\") " Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.402098 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2tjhd\" (UniqueName: \"kubernetes.io/projected/caeaf051-b0fe-4fe8-b5e2-730f3d55ffea-kube-api-access-2tjhd\") pod \"caeaf051-b0fe-4fe8-b5e2-730f3d55ffea\" (UID: \"caeaf051-b0fe-4fe8-b5e2-730f3d55ffea\") " Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.402315 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caeaf051-b0fe-4fe8-b5e2-730f3d55ffea-config-data\") pod \"caeaf051-b0fe-4fe8-b5e2-730f3d55ffea\" (UID: \"caeaf051-b0fe-4fe8-b5e2-730f3d55ffea\") " Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.419400 5050 generic.go:334] "Generic (PLEG): container finished" podID="caeaf051-b0fe-4fe8-b5e2-730f3d55ffea" containerID="9f803b54b404ac268f060fd8bd8081ce74ab291b70f2179751f9593b66f757ed" exitCode=137 Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.419518 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.419517 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"caeaf051-b0fe-4fe8-b5e2-730f3d55ffea","Type":"ContainerDied","Data":"9f803b54b404ac268f060fd8bd8081ce74ab291b70f2179751f9593b66f757ed"} Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.419598 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"caeaf051-b0fe-4fe8-b5e2-730f3d55ffea","Type":"ContainerDied","Data":"a52f15da27d000deb369c493ae0415039302bc75233beeb595823a6f5d4986f7"} Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.419621 5050 scope.go:117] "RemoveContainer" containerID="9f803b54b404ac268f060fd8bd8081ce74ab291b70f2179751f9593b66f757ed" Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.432155 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/caeaf051-b0fe-4fe8-b5e2-730f3d55ffea-kube-api-access-2tjhd" (OuterVolumeSpecName: "kube-api-access-2tjhd") pod "caeaf051-b0fe-4fe8-b5e2-730f3d55ffea" (UID: "caeaf051-b0fe-4fe8-b5e2-730f3d55ffea"). InnerVolumeSpecName "kube-api-access-2tjhd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.438167 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.456793 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/caeaf051-b0fe-4fe8-b5e2-730f3d55ffea-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "caeaf051-b0fe-4fe8-b5e2-730f3d55ffea" (UID: "caeaf051-b0fe-4fe8-b5e2-730f3d55ffea"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.469905 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/caeaf051-b0fe-4fe8-b5e2-730f3d55ffea-config-data" (OuterVolumeSpecName: "config-data") pod "caeaf051-b0fe-4fe8-b5e2-730f3d55ffea" (UID: "caeaf051-b0fe-4fe8-b5e2-730f3d55ffea"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.505811 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caeaf051-b0fe-4fe8-b5e2-730f3d55ffea-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.505853 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2tjhd\" (UniqueName: \"kubernetes.io/projected/caeaf051-b0fe-4fe8-b5e2-730f3d55ffea-kube-api-access-2tjhd\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.505868 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caeaf051-b0fe-4fe8-b5e2-730f3d55ffea-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.557520 5050 scope.go:117] "RemoveContainer" containerID="9f803b54b404ac268f060fd8bd8081ce74ab291b70f2179751f9593b66f757ed" Nov 23 15:03:14 crc kubenswrapper[5050]: E1123 15:03:14.558132 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f803b54b404ac268f060fd8bd8081ce74ab291b70f2179751f9593b66f757ed\": container with ID starting with 9f803b54b404ac268f060fd8bd8081ce74ab291b70f2179751f9593b66f757ed not found: ID does not exist" containerID="9f803b54b404ac268f060fd8bd8081ce74ab291b70f2179751f9593b66f757ed" Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.558165 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f803b54b404ac268f060fd8bd8081ce74ab291b70f2179751f9593b66f757ed"} err="failed to get container status \"9f803b54b404ac268f060fd8bd8081ce74ab291b70f2179751f9593b66f757ed\": rpc error: code = NotFound desc = could not find container \"9f803b54b404ac268f060fd8bd8081ce74ab291b70f2179751f9593b66f757ed\": container with ID starting with 9f803b54b404ac268f060fd8bd8081ce74ab291b70f2179751f9593b66f757ed not found: ID does not exist" Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.772557 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.786937 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.814326 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 23 15:03:14 crc kubenswrapper[5050]: E1123 15:03:14.814850 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="caeaf051-b0fe-4fe8-b5e2-730f3d55ffea" containerName="nova-cell1-novncproxy-novncproxy" Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.814868 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="caeaf051-b0fe-4fe8-b5e2-730f3d55ffea" containerName="nova-cell1-novncproxy-novncproxy" Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.815123 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="caeaf051-b0fe-4fe8-b5e2-730f3d55ffea" containerName="nova-cell1-novncproxy-novncproxy" Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.815957 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.818744 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.820371 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.821107 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.825858 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.916117 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3d96734-832d-489e-9fed-a4eb705f41d7-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d3d96734-832d-489e-9fed-a4eb705f41d7\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.916176 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3d96734-832d-489e-9fed-a4eb705f41d7-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d3d96734-832d-489e-9fed-a4eb705f41d7\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.916211 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7k9k\" (UniqueName: \"kubernetes.io/projected/d3d96734-832d-489e-9fed-a4eb705f41d7-kube-api-access-n7k9k\") pod \"nova-cell1-novncproxy-0\" (UID: \"d3d96734-832d-489e-9fed-a4eb705f41d7\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.916246 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3d96734-832d-489e-9fed-a4eb705f41d7-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d3d96734-832d-489e-9fed-a4eb705f41d7\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:03:14 crc kubenswrapper[5050]: I1123 15:03:14.916272 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3d96734-832d-489e-9fed-a4eb705f41d7-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d3d96734-832d-489e-9fed-a4eb705f41d7\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:03:15 crc kubenswrapper[5050]: I1123 15:03:15.019330 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n7k9k\" (UniqueName: \"kubernetes.io/projected/d3d96734-832d-489e-9fed-a4eb705f41d7-kube-api-access-n7k9k\") pod \"nova-cell1-novncproxy-0\" (UID: \"d3d96734-832d-489e-9fed-a4eb705f41d7\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:03:15 crc kubenswrapper[5050]: I1123 15:03:15.019431 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3d96734-832d-489e-9fed-a4eb705f41d7-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d3d96734-832d-489e-9fed-a4eb705f41d7\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:03:15 crc kubenswrapper[5050]: I1123 15:03:15.019496 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3d96734-832d-489e-9fed-a4eb705f41d7-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d3d96734-832d-489e-9fed-a4eb705f41d7\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:03:15 crc kubenswrapper[5050]: I1123 15:03:15.019711 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3d96734-832d-489e-9fed-a4eb705f41d7-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d3d96734-832d-489e-9fed-a4eb705f41d7\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:03:15 crc kubenswrapper[5050]: I1123 15:03:15.019744 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3d96734-832d-489e-9fed-a4eb705f41d7-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d3d96734-832d-489e-9fed-a4eb705f41d7\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:03:15 crc kubenswrapper[5050]: I1123 15:03:15.028532 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3d96734-832d-489e-9fed-a4eb705f41d7-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d3d96734-832d-489e-9fed-a4eb705f41d7\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:03:15 crc kubenswrapper[5050]: I1123 15:03:15.029901 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3d96734-832d-489e-9fed-a4eb705f41d7-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d3d96734-832d-489e-9fed-a4eb705f41d7\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:03:15 crc kubenswrapper[5050]: I1123 15:03:15.029950 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3d96734-832d-489e-9fed-a4eb705f41d7-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d3d96734-832d-489e-9fed-a4eb705f41d7\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:03:15 crc kubenswrapper[5050]: I1123 15:03:15.033649 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3d96734-832d-489e-9fed-a4eb705f41d7-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d3d96734-832d-489e-9fed-a4eb705f41d7\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:03:15 crc kubenswrapper[5050]: I1123 15:03:15.048688 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7k9k\" (UniqueName: \"kubernetes.io/projected/d3d96734-832d-489e-9fed-a4eb705f41d7-kube-api-access-n7k9k\") pod \"nova-cell1-novncproxy-0\" (UID: \"d3d96734-832d-489e-9fed-a4eb705f41d7\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:03:15 crc kubenswrapper[5050]: I1123 15:03:15.137916 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:03:15 crc kubenswrapper[5050]: I1123 15:03:15.564264 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="caeaf051-b0fe-4fe8-b5e2-730f3d55ffea" path="/var/lib/kubelet/pods/caeaf051-b0fe-4fe8-b5e2-730f3d55ffea/volumes" Nov 23 15:03:15 crc kubenswrapper[5050]: I1123 15:03:15.645362 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 23 15:03:15 crc kubenswrapper[5050]: W1123 15:03:15.653962 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd3d96734_832d_489e_9fed_a4eb705f41d7.slice/crio-c3ccda55e8a7cc4faf0cb8b489ad3654e6ccbae866c0291dff60a6ad34b673e9 WatchSource:0}: Error finding container c3ccda55e8a7cc4faf0cb8b489ad3654e6ccbae866c0291dff60a6ad34b673e9: Status 404 returned error can't find the container with id c3ccda55e8a7cc4faf0cb8b489ad3654e6ccbae866c0291dff60a6ad34b673e9 Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.450579 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d3d96734-832d-489e-9fed-a4eb705f41d7","Type":"ContainerStarted","Data":"dc0413953c30be6f1a27357706432a328f29ebeedb2176f058654bf6f70fcf23"} Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.451007 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d3d96734-832d-489e-9fed-a4eb705f41d7","Type":"ContainerStarted","Data":"c3ccda55e8a7cc4faf0cb8b489ad3654e6ccbae866c0291dff60a6ad34b673e9"} Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.473172 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.473142149 podStartE2EDuration="2.473142149s" podCreationTimestamp="2025-11-23 15:03:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:03:16.472424209 +0000 UTC m=+1291.639420724" watchObservedRunningTime="2025-11-23 15:03:16.473142149 +0000 UTC m=+1291.640138634" Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.517420 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.517552 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.518596 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.518704 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.522740 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.524480 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.790172 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-vptxh"] Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.792590 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.820828 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-vptxh"] Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.864871 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-vptxh\" (UID: \"e7a4774e-f304-40ca-8595-bbe9c381f466\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.864929 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxqvj\" (UniqueName: \"kubernetes.io/projected/e7a4774e-f304-40ca-8595-bbe9c381f466-kube-api-access-nxqvj\") pod \"dnsmasq-dns-cd5cbd7b9-vptxh\" (UID: \"e7a4774e-f304-40ca-8595-bbe9c381f466\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.864957 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-config\") pod \"dnsmasq-dns-cd5cbd7b9-vptxh\" (UID: \"e7a4774e-f304-40ca-8595-bbe9c381f466\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.864988 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-vptxh\" (UID: \"e7a4774e-f304-40ca-8595-bbe9c381f466\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.865019 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-vptxh\" (UID: \"e7a4774e-f304-40ca-8595-bbe9c381f466\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.865050 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-vptxh\" (UID: \"e7a4774e-f304-40ca-8595-bbe9c381f466\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.967338 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-vptxh\" (UID: \"e7a4774e-f304-40ca-8595-bbe9c381f466\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.967396 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxqvj\" (UniqueName: \"kubernetes.io/projected/e7a4774e-f304-40ca-8595-bbe9c381f466-kube-api-access-nxqvj\") pod \"dnsmasq-dns-cd5cbd7b9-vptxh\" (UID: \"e7a4774e-f304-40ca-8595-bbe9c381f466\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.967420 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-config\") pod \"dnsmasq-dns-cd5cbd7b9-vptxh\" (UID: \"e7a4774e-f304-40ca-8595-bbe9c381f466\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.967462 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-vptxh\" (UID: \"e7a4774e-f304-40ca-8595-bbe9c381f466\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.967490 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-vptxh\" (UID: \"e7a4774e-f304-40ca-8595-bbe9c381f466\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.967522 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-vptxh\" (UID: \"e7a4774e-f304-40ca-8595-bbe9c381f466\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.968886 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-vptxh\" (UID: \"e7a4774e-f304-40ca-8595-bbe9c381f466\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.968956 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-vptxh\" (UID: \"e7a4774e-f304-40ca-8595-bbe9c381f466\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.969038 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-vptxh\" (UID: \"e7a4774e-f304-40ca-8595-bbe9c381f466\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.969592 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-config\") pod \"dnsmasq-dns-cd5cbd7b9-vptxh\" (UID: \"e7a4774e-f304-40ca-8595-bbe9c381f466\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.970044 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-vptxh\" (UID: \"e7a4774e-f304-40ca-8595-bbe9c381f466\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" Nov 23 15:03:16 crc kubenswrapper[5050]: I1123 15:03:16.987799 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxqvj\" (UniqueName: \"kubernetes.io/projected/e7a4774e-f304-40ca-8595-bbe9c381f466-kube-api-access-nxqvj\") pod \"dnsmasq-dns-cd5cbd7b9-vptxh\" (UID: \"e7a4774e-f304-40ca-8595-bbe9c381f466\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" Nov 23 15:03:17 crc kubenswrapper[5050]: I1123 15:03:17.122774 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" Nov 23 15:03:17 crc kubenswrapper[5050]: I1123 15:03:17.648213 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-vptxh"] Nov 23 15:03:18 crc kubenswrapper[5050]: I1123 15:03:18.504784 5050 generic.go:334] "Generic (PLEG): container finished" podID="e7a4774e-f304-40ca-8595-bbe9c381f466" containerID="ff35e18b1c173ef8289abf48d1745ad3d991c922a44ba70cf39ceac46dec1cc2" exitCode=0 Nov 23 15:03:18 crc kubenswrapper[5050]: I1123 15:03:18.504980 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" event={"ID":"e7a4774e-f304-40ca-8595-bbe9c381f466","Type":"ContainerDied","Data":"ff35e18b1c173ef8289abf48d1745ad3d991c922a44ba70cf39ceac46dec1cc2"} Nov 23 15:03:18 crc kubenswrapper[5050]: I1123 15:03:18.508744 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" event={"ID":"e7a4774e-f304-40ca-8595-bbe9c381f466","Type":"ContainerStarted","Data":"7ed36c96e2576c13dab1973b1bc4f2e61749170e1aa055bd02a656954ba7520c"} Nov 23 15:03:19 crc kubenswrapper[5050]: I1123 15:03:19.294987 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:03:19 crc kubenswrapper[5050]: I1123 15:03:19.296379 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3023809c-c0ef-4911-8dcc-7cdd678e71c3" containerName="proxy-httpd" containerID="cri-o://f9593a90f1a76e06505bfefaef0f5644dc8b4d4115d5ae465e03292937435228" gracePeriod=30 Nov 23 15:03:19 crc kubenswrapper[5050]: I1123 15:03:19.296403 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3023809c-c0ef-4911-8dcc-7cdd678e71c3" containerName="ceilometer-notification-agent" containerID="cri-o://55aa601d0049f276c0ae585090ac3d9812f82dd2ee3827db00dd15d9ff7f7f94" gracePeriod=30 Nov 23 15:03:19 crc kubenswrapper[5050]: I1123 15:03:19.296381 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3023809c-c0ef-4911-8dcc-7cdd678e71c3" containerName="sg-core" containerID="cri-o://2846726b4a01de78ddce3c273b5cb13f09fe8655c34c83dbca6ba1ca50437720" gracePeriod=30 Nov 23 15:03:19 crc kubenswrapper[5050]: I1123 15:03:19.302822 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3023809c-c0ef-4911-8dcc-7cdd678e71c3" containerName="ceilometer-central-agent" containerID="cri-o://efd9102008f33ce4b28d7c9c6a0fbc1001c14e99a0ece0af0a6e35b9e9062161" gracePeriod=30 Nov 23 15:03:19 crc kubenswrapper[5050]: I1123 15:03:19.312545 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="3023809c-c0ef-4911-8dcc-7cdd678e71c3" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.194:3000/\": write tcp 10.217.0.2:52338->10.217.0.194:3000: write: broken pipe" Nov 23 15:03:19 crc kubenswrapper[5050]: I1123 15:03:19.533264 5050 generic.go:334] "Generic (PLEG): container finished" podID="3023809c-c0ef-4911-8dcc-7cdd678e71c3" containerID="2846726b4a01de78ddce3c273b5cb13f09fe8655c34c83dbca6ba1ca50437720" exitCode=2 Nov 23 15:03:19 crc kubenswrapper[5050]: I1123 15:03:19.533346 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3023809c-c0ef-4911-8dcc-7cdd678e71c3","Type":"ContainerDied","Data":"2846726b4a01de78ddce3c273b5cb13f09fe8655c34c83dbca6ba1ca50437720"} Nov 23 15:03:19 crc kubenswrapper[5050]: I1123 15:03:19.535724 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" event={"ID":"e7a4774e-f304-40ca-8595-bbe9c381f466","Type":"ContainerStarted","Data":"063525e2dec5a92b0de43140afc955072b70de2dd16ff0f882b43cc83a5c5cb3"} Nov 23 15:03:19 crc kubenswrapper[5050]: I1123 15:03:19.535895 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" Nov 23 15:03:20 crc kubenswrapper[5050]: I1123 15:03:20.028217 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" podStartSLOduration=4.028182383 podStartE2EDuration="4.028182383s" podCreationTimestamp="2025-11-23 15:03:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:03:19.572324368 +0000 UTC m=+1294.739320853" watchObservedRunningTime="2025-11-23 15:03:20.028182383 +0000 UTC m=+1295.195178878" Nov 23 15:03:20 crc kubenswrapper[5050]: I1123 15:03:20.033028 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 23 15:03:20 crc kubenswrapper[5050]: I1123 15:03:20.033401 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d" containerName="nova-api-log" containerID="cri-o://d2931ae007bc8543cad15627c35800ea000cc388ab6a6b562277bd995e34a4b7" gracePeriod=30 Nov 23 15:03:20 crc kubenswrapper[5050]: I1123 15:03:20.033503 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d" containerName="nova-api-api" containerID="cri-o://2be0f7e13323ed3b94345fa2e9c64a5885580a2c8bcbfb005d4144dd16705054" gracePeriod=30 Nov 23 15:03:20 crc kubenswrapper[5050]: I1123 15:03:20.138763 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:03:20 crc kubenswrapper[5050]: I1123 15:03:20.549557 5050 generic.go:334] "Generic (PLEG): container finished" podID="e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d" containerID="d2931ae007bc8543cad15627c35800ea000cc388ab6a6b562277bd995e34a4b7" exitCode=143 Nov 23 15:03:20 crc kubenswrapper[5050]: I1123 15:03:20.549641 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d","Type":"ContainerDied","Data":"d2931ae007bc8543cad15627c35800ea000cc388ab6a6b562277bd995e34a4b7"} Nov 23 15:03:20 crc kubenswrapper[5050]: I1123 15:03:20.554702 5050 generic.go:334] "Generic (PLEG): container finished" podID="3023809c-c0ef-4911-8dcc-7cdd678e71c3" containerID="f9593a90f1a76e06505bfefaef0f5644dc8b4d4115d5ae465e03292937435228" exitCode=0 Nov 23 15:03:20 crc kubenswrapper[5050]: I1123 15:03:20.554812 5050 generic.go:334] "Generic (PLEG): container finished" podID="3023809c-c0ef-4911-8dcc-7cdd678e71c3" containerID="efd9102008f33ce4b28d7c9c6a0fbc1001c14e99a0ece0af0a6e35b9e9062161" exitCode=0 Nov 23 15:03:20 crc kubenswrapper[5050]: I1123 15:03:20.555687 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3023809c-c0ef-4911-8dcc-7cdd678e71c3","Type":"ContainerDied","Data":"f9593a90f1a76e06505bfefaef0f5644dc8b4d4115d5ae465e03292937435228"} Nov 23 15:03:20 crc kubenswrapper[5050]: I1123 15:03:20.555794 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3023809c-c0ef-4911-8dcc-7cdd678e71c3","Type":"ContainerDied","Data":"efd9102008f33ce4b28d7c9c6a0fbc1001c14e99a0ece0af0a6e35b9e9062161"} Nov 23 15:03:23 crc kubenswrapper[5050]: I1123 15:03:23.607318 5050 generic.go:334] "Generic (PLEG): container finished" podID="e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d" containerID="2be0f7e13323ed3b94345fa2e9c64a5885580a2c8bcbfb005d4144dd16705054" exitCode=0 Nov 23 15:03:23 crc kubenswrapper[5050]: I1123 15:03:23.608293 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d","Type":"ContainerDied","Data":"2be0f7e13323ed3b94345fa2e9c64a5885580a2c8bcbfb005d4144dd16705054"} Nov 23 15:03:23 crc kubenswrapper[5050]: I1123 15:03:23.615424 5050 generic.go:334] "Generic (PLEG): container finished" podID="3023809c-c0ef-4911-8dcc-7cdd678e71c3" containerID="55aa601d0049f276c0ae585090ac3d9812f82dd2ee3827db00dd15d9ff7f7f94" exitCode=0 Nov 23 15:03:23 crc kubenswrapper[5050]: I1123 15:03:23.615493 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3023809c-c0ef-4911-8dcc-7cdd678e71c3","Type":"ContainerDied","Data":"55aa601d0049f276c0ae585090ac3d9812f82dd2ee3827db00dd15d9ff7f7f94"} Nov 23 15:03:23 crc kubenswrapper[5050]: I1123 15:03:23.758607 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 15:03:23 crc kubenswrapper[5050]: I1123 15:03:23.869330 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qz5dn\" (UniqueName: \"kubernetes.io/projected/e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d-kube-api-access-qz5dn\") pod \"e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d\" (UID: \"e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d\") " Nov 23 15:03:23 crc kubenswrapper[5050]: I1123 15:03:23.869622 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d-logs\") pod \"e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d\" (UID: \"e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d\") " Nov 23 15:03:23 crc kubenswrapper[5050]: I1123 15:03:23.869666 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d-config-data\") pod \"e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d\" (UID: \"e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d\") " Nov 23 15:03:23 crc kubenswrapper[5050]: I1123 15:03:23.869804 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d-combined-ca-bundle\") pod \"e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d\" (UID: \"e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d\") " Nov 23 15:03:23 crc kubenswrapper[5050]: I1123 15:03:23.870668 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d-logs" (OuterVolumeSpecName: "logs") pod "e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d" (UID: "e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:03:23 crc kubenswrapper[5050]: I1123 15:03:23.878315 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d-kube-api-access-qz5dn" (OuterVolumeSpecName: "kube-api-access-qz5dn") pod "e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d" (UID: "e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d"). InnerVolumeSpecName "kube-api-access-qz5dn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:03:23 crc kubenswrapper[5050]: I1123 15:03:23.924648 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 15:03:23 crc kubenswrapper[5050]: I1123 15:03:23.926589 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d" (UID: "e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:03:23 crc kubenswrapper[5050]: I1123 15:03:23.939628 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d-config-data" (OuterVolumeSpecName: "config-data") pod "e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d" (UID: "e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:03:23 crc kubenswrapper[5050]: I1123 15:03:23.973792 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qz5dn\" (UniqueName: \"kubernetes.io/projected/e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d-kube-api-access-qz5dn\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:23 crc kubenswrapper[5050]: I1123 15:03:23.974087 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d-logs\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:23 crc kubenswrapper[5050]: I1123 15:03:23.974194 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:23 crc kubenswrapper[5050]: I1123 15:03:23.974258 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.075696 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3023809c-c0ef-4911-8dcc-7cdd678e71c3-log-httpd\") pod \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.075877 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-scripts\") pod \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.075910 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vmr77\" (UniqueName: \"kubernetes.io/projected/3023809c-c0ef-4911-8dcc-7cdd678e71c3-kube-api-access-vmr77\") pod \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.075998 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-config-data\") pod \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.076034 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-ceilometer-tls-certs\") pod \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.076057 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-sg-core-conf-yaml\") pod \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.076086 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3023809c-c0ef-4911-8dcc-7cdd678e71c3-run-httpd\") pod \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.076127 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-combined-ca-bundle\") pod \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\" (UID: \"3023809c-c0ef-4911-8dcc-7cdd678e71c3\") " Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.091215 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3023809c-c0ef-4911-8dcc-7cdd678e71c3-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "3023809c-c0ef-4911-8dcc-7cdd678e71c3" (UID: "3023809c-c0ef-4911-8dcc-7cdd678e71c3"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.100620 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3023809c-c0ef-4911-8dcc-7cdd678e71c3-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "3023809c-c0ef-4911-8dcc-7cdd678e71c3" (UID: "3023809c-c0ef-4911-8dcc-7cdd678e71c3"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.109764 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-scripts" (OuterVolumeSpecName: "scripts") pod "3023809c-c0ef-4911-8dcc-7cdd678e71c3" (UID: "3023809c-c0ef-4911-8dcc-7cdd678e71c3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.121170 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3023809c-c0ef-4911-8dcc-7cdd678e71c3-kube-api-access-vmr77" (OuterVolumeSpecName: "kube-api-access-vmr77") pod "3023809c-c0ef-4911-8dcc-7cdd678e71c3" (UID: "3023809c-c0ef-4911-8dcc-7cdd678e71c3"). InnerVolumeSpecName "kube-api-access-vmr77". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.128011 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "3023809c-c0ef-4911-8dcc-7cdd678e71c3" (UID: "3023809c-c0ef-4911-8dcc-7cdd678e71c3"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.164169 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "3023809c-c0ef-4911-8dcc-7cdd678e71c3" (UID: "3023809c-c0ef-4911-8dcc-7cdd678e71c3"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.178859 5050 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.178896 5050 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.178907 5050 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3023809c-c0ef-4911-8dcc-7cdd678e71c3-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.178916 5050 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3023809c-c0ef-4911-8dcc-7cdd678e71c3-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.178924 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.178933 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vmr77\" (UniqueName: \"kubernetes.io/projected/3023809c-c0ef-4911-8dcc-7cdd678e71c3-kube-api-access-vmr77\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.196700 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3023809c-c0ef-4911-8dcc-7cdd678e71c3" (UID: "3023809c-c0ef-4911-8dcc-7cdd678e71c3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.241732 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-config-data" (OuterVolumeSpecName: "config-data") pod "3023809c-c0ef-4911-8dcc-7cdd678e71c3" (UID: "3023809c-c0ef-4911-8dcc-7cdd678e71c3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.281319 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.281381 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3023809c-c0ef-4911-8dcc-7cdd678e71c3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.645898 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.645904 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d","Type":"ContainerDied","Data":"b5b3c1368e59165a35d258332b1f23a5f44294a50081f5dde9d74d23d541f9bc"} Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.647170 5050 scope.go:117] "RemoveContainer" containerID="2be0f7e13323ed3b94345fa2e9c64a5885580a2c8bcbfb005d4144dd16705054" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.653538 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3023809c-c0ef-4911-8dcc-7cdd678e71c3","Type":"ContainerDied","Data":"46b3fbfe84fd5573dd279adf4db4bc0149c22655aa155ba1a49f92dd5c40b2e8"} Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.653670 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.691345 5050 scope.go:117] "RemoveContainer" containerID="d2931ae007bc8543cad15627c35800ea000cc388ab6a6b562277bd995e34a4b7" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.702088 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.715475 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.718904 5050 scope.go:117] "RemoveContainer" containerID="f9593a90f1a76e06505bfefaef0f5644dc8b4d4115d5ae465e03292937435228" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.731822 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.742589 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.753199 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 23 15:03:24 crc kubenswrapper[5050]: E1123 15:03:24.753734 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3023809c-c0ef-4911-8dcc-7cdd678e71c3" containerName="proxy-httpd" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.753752 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="3023809c-c0ef-4911-8dcc-7cdd678e71c3" containerName="proxy-httpd" Nov 23 15:03:24 crc kubenswrapper[5050]: E1123 15:03:24.753773 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3023809c-c0ef-4911-8dcc-7cdd678e71c3" containerName="sg-core" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.753781 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="3023809c-c0ef-4911-8dcc-7cdd678e71c3" containerName="sg-core" Nov 23 15:03:24 crc kubenswrapper[5050]: E1123 15:03:24.753792 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3023809c-c0ef-4911-8dcc-7cdd678e71c3" containerName="ceilometer-notification-agent" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.753799 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="3023809c-c0ef-4911-8dcc-7cdd678e71c3" containerName="ceilometer-notification-agent" Nov 23 15:03:24 crc kubenswrapper[5050]: E1123 15:03:24.753808 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3023809c-c0ef-4911-8dcc-7cdd678e71c3" containerName="ceilometer-central-agent" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.753815 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="3023809c-c0ef-4911-8dcc-7cdd678e71c3" containerName="ceilometer-central-agent" Nov 23 15:03:24 crc kubenswrapper[5050]: E1123 15:03:24.753827 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d" containerName="nova-api-api" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.753833 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d" containerName="nova-api-api" Nov 23 15:03:24 crc kubenswrapper[5050]: E1123 15:03:24.753847 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d" containerName="nova-api-log" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.753853 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d" containerName="nova-api-log" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.754037 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d" containerName="nova-api-log" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.754049 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="3023809c-c0ef-4911-8dcc-7cdd678e71c3" containerName="ceilometer-notification-agent" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.754061 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="3023809c-c0ef-4911-8dcc-7cdd678e71c3" containerName="sg-core" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.754076 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d" containerName="nova-api-api" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.754090 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="3023809c-c0ef-4911-8dcc-7cdd678e71c3" containerName="ceilometer-central-agent" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.754096 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="3023809c-c0ef-4911-8dcc-7cdd678e71c3" containerName="proxy-httpd" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.755228 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.760211 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.760701 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.764551 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.768688 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.769183 5050 scope.go:117] "RemoveContainer" containerID="2846726b4a01de78ddce3c273b5cb13f09fe8655c34c83dbca6ba1ca50437720" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.776021 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.779793 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.780014 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.780201 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.803720 5050 scope.go:117] "RemoveContainer" containerID="55aa601d0049f276c0ae585090ac3d9812f82dd2ee3827db00dd15d9ff7f7f94" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.822533 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.832738 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.833034 5050 scope.go:117] "RemoveContainer" containerID="efd9102008f33ce4b28d7c9c6a0fbc1001c14e99a0ece0af0a6e35b9e9062161" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.902203 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-logs\") pod \"nova-api-0\" (UID: \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\") " pod="openstack/nova-api-0" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.902282 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " pod="openstack/ceilometer-0" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.902313 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/528697e8-1ddc-4ab0-aa0e-008631905a4c-run-httpd\") pod \"ceilometer-0\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " pod="openstack/ceilometer-0" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.902388 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/528697e8-1ddc-4ab0-aa0e-008631905a4c-log-httpd\") pod \"ceilometer-0\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " pod="openstack/ceilometer-0" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.902418 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-config-data\") pod \"nova-api-0\" (UID: \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\") " pod="openstack/nova-api-0" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.902479 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " pod="openstack/ceilometer-0" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.902688 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\") " pod="openstack/nova-api-0" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.902875 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-scripts\") pod \"ceilometer-0\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " pod="openstack/ceilometer-0" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.903023 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " pod="openstack/ceilometer-0" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.903068 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5s6nb\" (UniqueName: \"kubernetes.io/projected/528697e8-1ddc-4ab0-aa0e-008631905a4c-kube-api-access-5s6nb\") pod \"ceilometer-0\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " pod="openstack/ceilometer-0" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.903113 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-public-tls-certs\") pod \"nova-api-0\" (UID: \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\") " pod="openstack/nova-api-0" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.903208 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\") " pod="openstack/nova-api-0" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.903251 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-config-data\") pod \"ceilometer-0\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " pod="openstack/ceilometer-0" Nov 23 15:03:24 crc kubenswrapper[5050]: I1123 15:03:24.903315 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdzjw\" (UniqueName: \"kubernetes.io/projected/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-kube-api-access-zdzjw\") pod \"nova-api-0\" (UID: \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\") " pod="openstack/nova-api-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.004670 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-config-data\") pod \"ceilometer-0\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " pod="openstack/ceilometer-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.004735 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdzjw\" (UniqueName: \"kubernetes.io/projected/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-kube-api-access-zdzjw\") pod \"nova-api-0\" (UID: \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\") " pod="openstack/nova-api-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.004781 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-logs\") pod \"nova-api-0\" (UID: \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\") " pod="openstack/nova-api-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.004802 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " pod="openstack/ceilometer-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.004818 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/528697e8-1ddc-4ab0-aa0e-008631905a4c-run-httpd\") pod \"ceilometer-0\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " pod="openstack/ceilometer-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.004838 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/528697e8-1ddc-4ab0-aa0e-008631905a4c-log-httpd\") pod \"ceilometer-0\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " pod="openstack/ceilometer-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.004979 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-config-data\") pod \"nova-api-0\" (UID: \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\") " pod="openstack/nova-api-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.005119 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " pod="openstack/ceilometer-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.005236 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\") " pod="openstack/nova-api-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.005293 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-scripts\") pod \"ceilometer-0\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " pod="openstack/ceilometer-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.005362 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " pod="openstack/ceilometer-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.005386 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5s6nb\" (UniqueName: \"kubernetes.io/projected/528697e8-1ddc-4ab0-aa0e-008631905a4c-kube-api-access-5s6nb\") pod \"ceilometer-0\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " pod="openstack/ceilometer-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.005471 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-public-tls-certs\") pod \"nova-api-0\" (UID: \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\") " pod="openstack/nova-api-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.005629 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\") " pod="openstack/nova-api-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.005886 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/528697e8-1ddc-4ab0-aa0e-008631905a4c-run-httpd\") pod \"ceilometer-0\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " pod="openstack/ceilometer-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.006414 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-logs\") pod \"nova-api-0\" (UID: \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\") " pod="openstack/nova-api-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.006501 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/528697e8-1ddc-4ab0-aa0e-008631905a4c-log-httpd\") pod \"ceilometer-0\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " pod="openstack/ceilometer-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.013988 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\") " pod="openstack/nova-api-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.015774 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " pod="openstack/ceilometer-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.016207 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-config-data\") pod \"ceilometer-0\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " pod="openstack/ceilometer-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.016254 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " pod="openstack/ceilometer-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.017347 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-scripts\") pod \"ceilometer-0\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " pod="openstack/ceilometer-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.023036 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\") " pod="openstack/nova-api-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.025794 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-public-tls-certs\") pod \"nova-api-0\" (UID: \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\") " pod="openstack/nova-api-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.027541 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-config-data\") pod \"nova-api-0\" (UID: \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\") " pod="openstack/nova-api-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.027854 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdzjw\" (UniqueName: \"kubernetes.io/projected/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-kube-api-access-zdzjw\") pod \"nova-api-0\" (UID: \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\") " pod="openstack/nova-api-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.028643 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5s6nb\" (UniqueName: \"kubernetes.io/projected/528697e8-1ddc-4ab0-aa0e-008631905a4c-kube-api-access-5s6nb\") pod \"ceilometer-0\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " pod="openstack/ceilometer-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.029108 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " pod="openstack/ceilometer-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.098645 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.104072 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.140000 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.223177 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.568326 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3023809c-c0ef-4911-8dcc-7cdd678e71c3" path="/var/lib/kubelet/pods/3023809c-c0ef-4911-8dcc-7cdd678e71c3/volumes" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.569563 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d" path="/var/lib/kubelet/pods/e1ed84f3-a226-4de3-b5f1-7ba557c1ae4d/volumes" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.703198 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.716516 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.717860 5050 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.733983 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.869247 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-q7jct"] Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.872074 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-q7jct" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.876041 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.876344 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 23 15:03:25 crc kubenswrapper[5050]: I1123 15:03:25.881398 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-q7jct"] Nov 23 15:03:26 crc kubenswrapper[5050]: I1123 15:03:26.031212 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cff4dbdd-ad13-4f74-beb1-9d9658c4b25e-scripts\") pod \"nova-cell1-cell-mapping-q7jct\" (UID: \"cff4dbdd-ad13-4f74-beb1-9d9658c4b25e\") " pod="openstack/nova-cell1-cell-mapping-q7jct" Nov 23 15:03:26 crc kubenswrapper[5050]: I1123 15:03:26.031288 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cff4dbdd-ad13-4f74-beb1-9d9658c4b25e-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-q7jct\" (UID: \"cff4dbdd-ad13-4f74-beb1-9d9658c4b25e\") " pod="openstack/nova-cell1-cell-mapping-q7jct" Nov 23 15:03:26 crc kubenswrapper[5050]: I1123 15:03:26.031424 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cff4dbdd-ad13-4f74-beb1-9d9658c4b25e-config-data\") pod \"nova-cell1-cell-mapping-q7jct\" (UID: \"cff4dbdd-ad13-4f74-beb1-9d9658c4b25e\") " pod="openstack/nova-cell1-cell-mapping-q7jct" Nov 23 15:03:26 crc kubenswrapper[5050]: I1123 15:03:26.031604 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nz8k\" (UniqueName: \"kubernetes.io/projected/cff4dbdd-ad13-4f74-beb1-9d9658c4b25e-kube-api-access-5nz8k\") pod \"nova-cell1-cell-mapping-q7jct\" (UID: \"cff4dbdd-ad13-4f74-beb1-9d9658c4b25e\") " pod="openstack/nova-cell1-cell-mapping-q7jct" Nov 23 15:03:26 crc kubenswrapper[5050]: I1123 15:03:26.133678 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cff4dbdd-ad13-4f74-beb1-9d9658c4b25e-config-data\") pod \"nova-cell1-cell-mapping-q7jct\" (UID: \"cff4dbdd-ad13-4f74-beb1-9d9658c4b25e\") " pod="openstack/nova-cell1-cell-mapping-q7jct" Nov 23 15:03:26 crc kubenswrapper[5050]: I1123 15:03:26.134075 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nz8k\" (UniqueName: \"kubernetes.io/projected/cff4dbdd-ad13-4f74-beb1-9d9658c4b25e-kube-api-access-5nz8k\") pod \"nova-cell1-cell-mapping-q7jct\" (UID: \"cff4dbdd-ad13-4f74-beb1-9d9658c4b25e\") " pod="openstack/nova-cell1-cell-mapping-q7jct" Nov 23 15:03:26 crc kubenswrapper[5050]: I1123 15:03:26.134151 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cff4dbdd-ad13-4f74-beb1-9d9658c4b25e-scripts\") pod \"nova-cell1-cell-mapping-q7jct\" (UID: \"cff4dbdd-ad13-4f74-beb1-9d9658c4b25e\") " pod="openstack/nova-cell1-cell-mapping-q7jct" Nov 23 15:03:26 crc kubenswrapper[5050]: I1123 15:03:26.134189 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cff4dbdd-ad13-4f74-beb1-9d9658c4b25e-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-q7jct\" (UID: \"cff4dbdd-ad13-4f74-beb1-9d9658c4b25e\") " pod="openstack/nova-cell1-cell-mapping-q7jct" Nov 23 15:03:26 crc kubenswrapper[5050]: I1123 15:03:26.138797 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cff4dbdd-ad13-4f74-beb1-9d9658c4b25e-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-q7jct\" (UID: \"cff4dbdd-ad13-4f74-beb1-9d9658c4b25e\") " pod="openstack/nova-cell1-cell-mapping-q7jct" Nov 23 15:03:26 crc kubenswrapper[5050]: I1123 15:03:26.139315 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cff4dbdd-ad13-4f74-beb1-9d9658c4b25e-config-data\") pod \"nova-cell1-cell-mapping-q7jct\" (UID: \"cff4dbdd-ad13-4f74-beb1-9d9658c4b25e\") " pod="openstack/nova-cell1-cell-mapping-q7jct" Nov 23 15:03:26 crc kubenswrapper[5050]: I1123 15:03:26.152205 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cff4dbdd-ad13-4f74-beb1-9d9658c4b25e-scripts\") pod \"nova-cell1-cell-mapping-q7jct\" (UID: \"cff4dbdd-ad13-4f74-beb1-9d9658c4b25e\") " pod="openstack/nova-cell1-cell-mapping-q7jct" Nov 23 15:03:26 crc kubenswrapper[5050]: I1123 15:03:26.161065 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nz8k\" (UniqueName: \"kubernetes.io/projected/cff4dbdd-ad13-4f74-beb1-9d9658c4b25e-kube-api-access-5nz8k\") pod \"nova-cell1-cell-mapping-q7jct\" (UID: \"cff4dbdd-ad13-4f74-beb1-9d9658c4b25e\") " pod="openstack/nova-cell1-cell-mapping-q7jct" Nov 23 15:03:26 crc kubenswrapper[5050]: I1123 15:03:26.242452 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-q7jct" Nov 23 15:03:26 crc kubenswrapper[5050]: I1123 15:03:26.686892 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e9a154a7-ee98-4732-bbc6-e0324fcf97c5","Type":"ContainerStarted","Data":"0988185576d9ff8866bc58410524df2d8104159e463e976e14db88026c0d28a3"} Nov 23 15:03:26 crc kubenswrapper[5050]: I1123 15:03:26.688097 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e9a154a7-ee98-4732-bbc6-e0324fcf97c5","Type":"ContainerStarted","Data":"e21136cc2438205b1f830210e02b1ea8a018ed293faf14a51aba45f916483d10"} Nov 23 15:03:26 crc kubenswrapper[5050]: I1123 15:03:26.688119 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e9a154a7-ee98-4732-bbc6-e0324fcf97c5","Type":"ContainerStarted","Data":"c0963a0a51ee16d950521f7cc86b01bf90e0892b3fde7261ba439513c098cb9d"} Nov 23 15:03:26 crc kubenswrapper[5050]: I1123 15:03:26.691362 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"528697e8-1ddc-4ab0-aa0e-008631905a4c","Type":"ContainerStarted","Data":"54bb6bc8b8fe2be064d6b1b7c3753c2cce5da608c4510b93b32b5cb3298cf7f7"} Nov 23 15:03:26 crc kubenswrapper[5050]: I1123 15:03:26.691403 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"528697e8-1ddc-4ab0-aa0e-008631905a4c","Type":"ContainerStarted","Data":"0460cd1868cf32b301bc534ae55e7cf03d3645a42adf46dedf5dd733b165c3c2"} Nov 23 15:03:26 crc kubenswrapper[5050]: I1123 15:03:26.725123 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.725093538 podStartE2EDuration="2.725093538s" podCreationTimestamp="2025-11-23 15:03:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:03:26.705396868 +0000 UTC m=+1301.872393353" watchObservedRunningTime="2025-11-23 15:03:26.725093538 +0000 UTC m=+1301.892090063" Nov 23 15:03:26 crc kubenswrapper[5050]: I1123 15:03:26.766769 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-q7jct"] Nov 23 15:03:26 crc kubenswrapper[5050]: W1123 15:03:26.768567 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcff4dbdd_ad13_4f74_beb1_9d9658c4b25e.slice/crio-7f39cbf0b27091ed3a0a75abcd403c5bf7443e24f591cba7470aa075bb1969b1 WatchSource:0}: Error finding container 7f39cbf0b27091ed3a0a75abcd403c5bf7443e24f591cba7470aa075bb1969b1: Status 404 returned error can't find the container with id 7f39cbf0b27091ed3a0a75abcd403c5bf7443e24f591cba7470aa075bb1969b1 Nov 23 15:03:27 crc kubenswrapper[5050]: I1123 15:03:27.126346 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" Nov 23 15:03:27 crc kubenswrapper[5050]: I1123 15:03:27.203880 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-vvmp7"] Nov 23 15:03:27 crc kubenswrapper[5050]: I1123 15:03:27.204153 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" podUID="e45241bb-3031-428d-aa6b-370eb2da07b0" containerName="dnsmasq-dns" containerID="cri-o://2f9d72ef841a0d2c3f7445d31d2346eca403597faa2f4d79199c4bee1a01cdb9" gracePeriod=10 Nov 23 15:03:27 crc kubenswrapper[5050]: I1123 15:03:27.725163 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"528697e8-1ddc-4ab0-aa0e-008631905a4c","Type":"ContainerStarted","Data":"888990a4380d2a527f44498ff0a147a611909bb72bf4f2fad48c2ae5096b8882"} Nov 23 15:03:27 crc kubenswrapper[5050]: I1123 15:03:27.729192 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-q7jct" event={"ID":"cff4dbdd-ad13-4f74-beb1-9d9658c4b25e","Type":"ContainerStarted","Data":"e5a03ed08028089ecf5d63d4900a3f9f939e1772c72456526c7bcdd023ba7ed7"} Nov 23 15:03:27 crc kubenswrapper[5050]: I1123 15:03:27.729300 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-q7jct" event={"ID":"cff4dbdd-ad13-4f74-beb1-9d9658c4b25e","Type":"ContainerStarted","Data":"7f39cbf0b27091ed3a0a75abcd403c5bf7443e24f591cba7470aa075bb1969b1"} Nov 23 15:03:27 crc kubenswrapper[5050]: I1123 15:03:27.732589 5050 generic.go:334] "Generic (PLEG): container finished" podID="e45241bb-3031-428d-aa6b-370eb2da07b0" containerID="2f9d72ef841a0d2c3f7445d31d2346eca403597faa2f4d79199c4bee1a01cdb9" exitCode=0 Nov 23 15:03:27 crc kubenswrapper[5050]: I1123 15:03:27.733070 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" event={"ID":"e45241bb-3031-428d-aa6b-370eb2da07b0","Type":"ContainerDied","Data":"2f9d72ef841a0d2c3f7445d31d2346eca403597faa2f4d79199c4bee1a01cdb9"} Nov 23 15:03:27 crc kubenswrapper[5050]: I1123 15:03:27.759921 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-q7jct" podStartSLOduration=2.759893627 podStartE2EDuration="2.759893627s" podCreationTimestamp="2025-11-23 15:03:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:03:27.750525031 +0000 UTC m=+1302.917521526" watchObservedRunningTime="2025-11-23 15:03:27.759893627 +0000 UTC m=+1302.926890112" Nov 23 15:03:27 crc kubenswrapper[5050]: I1123 15:03:27.825181 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" Nov 23 15:03:27 crc kubenswrapper[5050]: I1123 15:03:27.919491 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-dns-swift-storage-0\") pod \"e45241bb-3031-428d-aa6b-370eb2da07b0\" (UID: \"e45241bb-3031-428d-aa6b-370eb2da07b0\") " Nov 23 15:03:27 crc kubenswrapper[5050]: I1123 15:03:27.919606 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4hh96\" (UniqueName: \"kubernetes.io/projected/e45241bb-3031-428d-aa6b-370eb2da07b0-kube-api-access-4hh96\") pod \"e45241bb-3031-428d-aa6b-370eb2da07b0\" (UID: \"e45241bb-3031-428d-aa6b-370eb2da07b0\") " Nov 23 15:03:27 crc kubenswrapper[5050]: I1123 15:03:27.919683 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-ovsdbserver-sb\") pod \"e45241bb-3031-428d-aa6b-370eb2da07b0\" (UID: \"e45241bb-3031-428d-aa6b-370eb2da07b0\") " Nov 23 15:03:27 crc kubenswrapper[5050]: I1123 15:03:27.919725 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-dns-svc\") pod \"e45241bb-3031-428d-aa6b-370eb2da07b0\" (UID: \"e45241bb-3031-428d-aa6b-370eb2da07b0\") " Nov 23 15:03:27 crc kubenswrapper[5050]: I1123 15:03:27.919831 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-ovsdbserver-nb\") pod \"e45241bb-3031-428d-aa6b-370eb2da07b0\" (UID: \"e45241bb-3031-428d-aa6b-370eb2da07b0\") " Nov 23 15:03:27 crc kubenswrapper[5050]: I1123 15:03:27.919866 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-config\") pod \"e45241bb-3031-428d-aa6b-370eb2da07b0\" (UID: \"e45241bb-3031-428d-aa6b-370eb2da07b0\") " Nov 23 15:03:27 crc kubenswrapper[5050]: I1123 15:03:27.927203 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e45241bb-3031-428d-aa6b-370eb2da07b0-kube-api-access-4hh96" (OuterVolumeSpecName: "kube-api-access-4hh96") pod "e45241bb-3031-428d-aa6b-370eb2da07b0" (UID: "e45241bb-3031-428d-aa6b-370eb2da07b0"). InnerVolumeSpecName "kube-api-access-4hh96". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:03:27 crc kubenswrapper[5050]: I1123 15:03:27.975911 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e45241bb-3031-428d-aa6b-370eb2da07b0" (UID: "e45241bb-3031-428d-aa6b-370eb2da07b0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:03:27 crc kubenswrapper[5050]: I1123 15:03:27.985020 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-config" (OuterVolumeSpecName: "config") pod "e45241bb-3031-428d-aa6b-370eb2da07b0" (UID: "e45241bb-3031-428d-aa6b-370eb2da07b0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:03:27 crc kubenswrapper[5050]: I1123 15:03:27.992673 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e45241bb-3031-428d-aa6b-370eb2da07b0" (UID: "e45241bb-3031-428d-aa6b-370eb2da07b0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:03:28 crc kubenswrapper[5050]: I1123 15:03:27.999997 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e45241bb-3031-428d-aa6b-370eb2da07b0" (UID: "e45241bb-3031-428d-aa6b-370eb2da07b0"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:03:28 crc kubenswrapper[5050]: I1123 15:03:28.022283 5050 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:28 crc kubenswrapper[5050]: I1123 15:03:28.022329 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4hh96\" (UniqueName: \"kubernetes.io/projected/e45241bb-3031-428d-aa6b-370eb2da07b0-kube-api-access-4hh96\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:28 crc kubenswrapper[5050]: I1123 15:03:28.022341 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:28 crc kubenswrapper[5050]: I1123 15:03:28.022350 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:28 crc kubenswrapper[5050]: I1123 15:03:28.022363 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-config\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:28 crc kubenswrapper[5050]: I1123 15:03:28.029882 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e45241bb-3031-428d-aa6b-370eb2da07b0" (UID: "e45241bb-3031-428d-aa6b-370eb2da07b0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:03:28 crc kubenswrapper[5050]: I1123 15:03:28.124839 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e45241bb-3031-428d-aa6b-370eb2da07b0-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:28 crc kubenswrapper[5050]: I1123 15:03:28.744213 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"528697e8-1ddc-4ab0-aa0e-008631905a4c","Type":"ContainerStarted","Data":"0db0ab1375fb7487d196ec345c0738e0de0a29049ea856246e4379363ccc3e46"} Nov 23 15:03:28 crc kubenswrapper[5050]: I1123 15:03:28.747863 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" Nov 23 15:03:28 crc kubenswrapper[5050]: I1123 15:03:28.747901 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-vvmp7" event={"ID":"e45241bb-3031-428d-aa6b-370eb2da07b0","Type":"ContainerDied","Data":"04c5842ec69e7f0d00218bb7f1a31b2cd5f89f5941377d52d8aac40411cd959b"} Nov 23 15:03:28 crc kubenswrapper[5050]: I1123 15:03:28.747938 5050 scope.go:117] "RemoveContainer" containerID="2f9d72ef841a0d2c3f7445d31d2346eca403597faa2f4d79199c4bee1a01cdb9" Nov 23 15:03:28 crc kubenswrapper[5050]: I1123 15:03:28.772646 5050 scope.go:117] "RemoveContainer" containerID="3574580b7c087b3a9edf9ced911fd3c8c37ab2156ac82e545e63949cf9900fe4" Nov 23 15:03:28 crc kubenswrapper[5050]: I1123 15:03:28.805921 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-vvmp7"] Nov 23 15:03:28 crc kubenswrapper[5050]: I1123 15:03:28.816196 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-vvmp7"] Nov 23 15:03:29 crc kubenswrapper[5050]: I1123 15:03:29.224220 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:03:29 crc kubenswrapper[5050]: I1123 15:03:29.224695 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:03:29 crc kubenswrapper[5050]: I1123 15:03:29.224767 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 15:03:29 crc kubenswrapper[5050]: I1123 15:03:29.226264 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f369955faef6b1a99a27dfa755c0fc81c7c636113d3009cdb4311b4b6c0018d4"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 15:03:29 crc kubenswrapper[5050]: I1123 15:03:29.226402 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://f369955faef6b1a99a27dfa755c0fc81c7c636113d3009cdb4311b4b6c0018d4" gracePeriod=600 Nov 23 15:03:29 crc kubenswrapper[5050]: I1123 15:03:29.564767 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e45241bb-3031-428d-aa6b-370eb2da07b0" path="/var/lib/kubelet/pods/e45241bb-3031-428d-aa6b-370eb2da07b0/volumes" Nov 23 15:03:29 crc kubenswrapper[5050]: I1123 15:03:29.763106 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="f369955faef6b1a99a27dfa755c0fc81c7c636113d3009cdb4311b4b6c0018d4" exitCode=0 Nov 23 15:03:29 crc kubenswrapper[5050]: I1123 15:03:29.763198 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"f369955faef6b1a99a27dfa755c0fc81c7c636113d3009cdb4311b4b6c0018d4"} Nov 23 15:03:29 crc kubenswrapper[5050]: I1123 15:03:29.763271 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd"} Nov 23 15:03:29 crc kubenswrapper[5050]: I1123 15:03:29.763300 5050 scope.go:117] "RemoveContainer" containerID="2f1968a148f9134c159c0b58dbe311e0c835edf7fb66133145fa860c5ae063e0" Nov 23 15:03:29 crc kubenswrapper[5050]: I1123 15:03:29.767422 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"528697e8-1ddc-4ab0-aa0e-008631905a4c","Type":"ContainerStarted","Data":"5f913c69d91ed75b757a4d06691b6d5cb9dec9b9a7273d12971e80ccd91082db"} Nov 23 15:03:29 crc kubenswrapper[5050]: I1123 15:03:29.767545 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 23 15:03:29 crc kubenswrapper[5050]: I1123 15:03:29.811582 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.619291713 podStartE2EDuration="5.811556326s" podCreationTimestamp="2025-11-23 15:03:24 +0000 UTC" firstStartedPulling="2025-11-23 15:03:25.71738161 +0000 UTC m=+1300.884378135" lastFinishedPulling="2025-11-23 15:03:28.909646263 +0000 UTC m=+1304.076642748" observedRunningTime="2025-11-23 15:03:29.803166287 +0000 UTC m=+1304.970162782" watchObservedRunningTime="2025-11-23 15:03:29.811556326 +0000 UTC m=+1304.978552801" Nov 23 15:03:32 crc kubenswrapper[5050]: I1123 15:03:32.823638 5050 generic.go:334] "Generic (PLEG): container finished" podID="cff4dbdd-ad13-4f74-beb1-9d9658c4b25e" containerID="e5a03ed08028089ecf5d63d4900a3f9f939e1772c72456526c7bcdd023ba7ed7" exitCode=0 Nov 23 15:03:32 crc kubenswrapper[5050]: I1123 15:03:32.823770 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-q7jct" event={"ID":"cff4dbdd-ad13-4f74-beb1-9d9658c4b25e","Type":"ContainerDied","Data":"e5a03ed08028089ecf5d63d4900a3f9f939e1772c72456526c7bcdd023ba7ed7"} Nov 23 15:03:34 crc kubenswrapper[5050]: I1123 15:03:34.326682 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-q7jct" Nov 23 15:03:34 crc kubenswrapper[5050]: I1123 15:03:34.509508 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cff4dbdd-ad13-4f74-beb1-9d9658c4b25e-combined-ca-bundle\") pod \"cff4dbdd-ad13-4f74-beb1-9d9658c4b25e\" (UID: \"cff4dbdd-ad13-4f74-beb1-9d9658c4b25e\") " Nov 23 15:03:34 crc kubenswrapper[5050]: I1123 15:03:34.509571 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5nz8k\" (UniqueName: \"kubernetes.io/projected/cff4dbdd-ad13-4f74-beb1-9d9658c4b25e-kube-api-access-5nz8k\") pod \"cff4dbdd-ad13-4f74-beb1-9d9658c4b25e\" (UID: \"cff4dbdd-ad13-4f74-beb1-9d9658c4b25e\") " Nov 23 15:03:34 crc kubenswrapper[5050]: I1123 15:03:34.509615 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cff4dbdd-ad13-4f74-beb1-9d9658c4b25e-scripts\") pod \"cff4dbdd-ad13-4f74-beb1-9d9658c4b25e\" (UID: \"cff4dbdd-ad13-4f74-beb1-9d9658c4b25e\") " Nov 23 15:03:34 crc kubenswrapper[5050]: I1123 15:03:34.509851 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cff4dbdd-ad13-4f74-beb1-9d9658c4b25e-config-data\") pod \"cff4dbdd-ad13-4f74-beb1-9d9658c4b25e\" (UID: \"cff4dbdd-ad13-4f74-beb1-9d9658c4b25e\") " Nov 23 15:03:34 crc kubenswrapper[5050]: I1123 15:03:34.517311 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cff4dbdd-ad13-4f74-beb1-9d9658c4b25e-scripts" (OuterVolumeSpecName: "scripts") pod "cff4dbdd-ad13-4f74-beb1-9d9658c4b25e" (UID: "cff4dbdd-ad13-4f74-beb1-9d9658c4b25e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:03:34 crc kubenswrapper[5050]: I1123 15:03:34.518741 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cff4dbdd-ad13-4f74-beb1-9d9658c4b25e-kube-api-access-5nz8k" (OuterVolumeSpecName: "kube-api-access-5nz8k") pod "cff4dbdd-ad13-4f74-beb1-9d9658c4b25e" (UID: "cff4dbdd-ad13-4f74-beb1-9d9658c4b25e"). InnerVolumeSpecName "kube-api-access-5nz8k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:03:34 crc kubenswrapper[5050]: I1123 15:03:34.547744 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cff4dbdd-ad13-4f74-beb1-9d9658c4b25e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cff4dbdd-ad13-4f74-beb1-9d9658c4b25e" (UID: "cff4dbdd-ad13-4f74-beb1-9d9658c4b25e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:03:34 crc kubenswrapper[5050]: I1123 15:03:34.549753 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cff4dbdd-ad13-4f74-beb1-9d9658c4b25e-config-data" (OuterVolumeSpecName: "config-data") pod "cff4dbdd-ad13-4f74-beb1-9d9658c4b25e" (UID: "cff4dbdd-ad13-4f74-beb1-9d9658c4b25e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:03:34 crc kubenswrapper[5050]: I1123 15:03:34.611903 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cff4dbdd-ad13-4f74-beb1-9d9658c4b25e-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:34 crc kubenswrapper[5050]: I1123 15:03:34.612485 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cff4dbdd-ad13-4f74-beb1-9d9658c4b25e-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:34 crc kubenswrapper[5050]: I1123 15:03:34.612503 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cff4dbdd-ad13-4f74-beb1-9d9658c4b25e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:34 crc kubenswrapper[5050]: I1123 15:03:34.612517 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5nz8k\" (UniqueName: \"kubernetes.io/projected/cff4dbdd-ad13-4f74-beb1-9d9658c4b25e-kube-api-access-5nz8k\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:34 crc kubenswrapper[5050]: I1123 15:03:34.855020 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-q7jct" event={"ID":"cff4dbdd-ad13-4f74-beb1-9d9658c4b25e","Type":"ContainerDied","Data":"7f39cbf0b27091ed3a0a75abcd403c5bf7443e24f591cba7470aa075bb1969b1"} Nov 23 15:03:34 crc kubenswrapper[5050]: I1123 15:03:34.855068 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7f39cbf0b27091ed3a0a75abcd403c5bf7443e24f591cba7470aa075bb1969b1" Nov 23 15:03:34 crc kubenswrapper[5050]: I1123 15:03:34.855122 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-q7jct" Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.052615 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.052900 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="ebc90782-9288-409b-97b6-06eedca6af69" containerName="nova-scheduler-scheduler" containerID="cri-o://28797149a593494315d5778047a603218f729df770a8fe6758c9321d3bf2ef2e" gracePeriod=30 Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.066404 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.066765 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="e9a154a7-ee98-4732-bbc6-e0324fcf97c5" containerName="nova-api-log" containerID="cri-o://e21136cc2438205b1f830210e02b1ea8a018ed293faf14a51aba45f916483d10" gracePeriod=30 Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.066853 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="e9a154a7-ee98-4732-bbc6-e0324fcf97c5" containerName="nova-api-api" containerID="cri-o://0988185576d9ff8866bc58410524df2d8104159e463e976e14db88026c0d28a3" gracePeriod=30 Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.107992 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.108247 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="f4de9500-7d8f-4619-8ae3-579d82bd7f1c" containerName="nova-metadata-log" containerID="cri-o://e16dd4c5f277e9f18697d190962419a79baf44289a3f0d3db53a7fee7cd11b17" gracePeriod=30 Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.108401 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="f4de9500-7d8f-4619-8ae3-579d82bd7f1c" containerName="nova-metadata-metadata" containerID="cri-o://1ee1ee6664dd9194d64de4206b0ed4e97fb294a0083812d930930b7bf8f4b006" gracePeriod=30 Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.730893 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.840226 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-logs\") pod \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\" (UID: \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\") " Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.840315 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-combined-ca-bundle\") pod \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\" (UID: \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\") " Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.840403 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-public-tls-certs\") pod \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\" (UID: \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\") " Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.840430 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-internal-tls-certs\") pod \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\" (UID: \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\") " Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.840647 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-config-data\") pod \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\" (UID: \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\") " Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.840716 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zdzjw\" (UniqueName: \"kubernetes.io/projected/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-kube-api-access-zdzjw\") pod \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\" (UID: \"e9a154a7-ee98-4732-bbc6-e0324fcf97c5\") " Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.842686 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-logs" (OuterVolumeSpecName: "logs") pod "e9a154a7-ee98-4732-bbc6-e0324fcf97c5" (UID: "e9a154a7-ee98-4732-bbc6-e0324fcf97c5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.850154 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-kube-api-access-zdzjw" (OuterVolumeSpecName: "kube-api-access-zdzjw") pod "e9a154a7-ee98-4732-bbc6-e0324fcf97c5" (UID: "e9a154a7-ee98-4732-bbc6-e0324fcf97c5"). InnerVolumeSpecName "kube-api-access-zdzjw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.866561 5050 generic.go:334] "Generic (PLEG): container finished" podID="f4de9500-7d8f-4619-8ae3-579d82bd7f1c" containerID="e16dd4c5f277e9f18697d190962419a79baf44289a3f0d3db53a7fee7cd11b17" exitCode=143 Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.866868 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f4de9500-7d8f-4619-8ae3-579d82bd7f1c","Type":"ContainerDied","Data":"e16dd4c5f277e9f18697d190962419a79baf44289a3f0d3db53a7fee7cd11b17"} Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.870305 5050 generic.go:334] "Generic (PLEG): container finished" podID="e9a154a7-ee98-4732-bbc6-e0324fcf97c5" containerID="0988185576d9ff8866bc58410524df2d8104159e463e976e14db88026c0d28a3" exitCode=0 Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.870402 5050 generic.go:334] "Generic (PLEG): container finished" podID="e9a154a7-ee98-4732-bbc6-e0324fcf97c5" containerID="e21136cc2438205b1f830210e02b1ea8a018ed293faf14a51aba45f916483d10" exitCode=143 Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.870486 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e9a154a7-ee98-4732-bbc6-e0324fcf97c5","Type":"ContainerDied","Data":"0988185576d9ff8866bc58410524df2d8104159e463e976e14db88026c0d28a3"} Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.870562 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e9a154a7-ee98-4732-bbc6-e0324fcf97c5","Type":"ContainerDied","Data":"e21136cc2438205b1f830210e02b1ea8a018ed293faf14a51aba45f916483d10"} Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.870651 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e9a154a7-ee98-4732-bbc6-e0324fcf97c5","Type":"ContainerDied","Data":"c0963a0a51ee16d950521f7cc86b01bf90e0892b3fde7261ba439513c098cb9d"} Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.870724 5050 scope.go:117] "RemoveContainer" containerID="0988185576d9ff8866bc58410524df2d8104159e463e976e14db88026c0d28a3" Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.870911 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.880477 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e9a154a7-ee98-4732-bbc6-e0324fcf97c5" (UID: "e9a154a7-ee98-4732-bbc6-e0324fcf97c5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.886829 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-config-data" (OuterVolumeSpecName: "config-data") pod "e9a154a7-ee98-4732-bbc6-e0324fcf97c5" (UID: "e9a154a7-ee98-4732-bbc6-e0324fcf97c5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.917658 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "e9a154a7-ee98-4732-bbc6-e0324fcf97c5" (UID: "e9a154a7-ee98-4732-bbc6-e0324fcf97c5"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.917929 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "e9a154a7-ee98-4732-bbc6-e0324fcf97c5" (UID: "e9a154a7-ee98-4732-bbc6-e0324fcf97c5"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.918251 5050 scope.go:117] "RemoveContainer" containerID="e21136cc2438205b1f830210e02b1ea8a018ed293faf14a51aba45f916483d10" Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.943670 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zdzjw\" (UniqueName: \"kubernetes.io/projected/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-kube-api-access-zdzjw\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.943722 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-logs\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.943733 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.943742 5050 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.943750 5050 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.943773 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9a154a7-ee98-4732-bbc6-e0324fcf97c5-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.945720 5050 scope.go:117] "RemoveContainer" containerID="0988185576d9ff8866bc58410524df2d8104159e463e976e14db88026c0d28a3" Nov 23 15:03:35 crc kubenswrapper[5050]: E1123 15:03:35.946308 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0988185576d9ff8866bc58410524df2d8104159e463e976e14db88026c0d28a3\": container with ID starting with 0988185576d9ff8866bc58410524df2d8104159e463e976e14db88026c0d28a3 not found: ID does not exist" containerID="0988185576d9ff8866bc58410524df2d8104159e463e976e14db88026c0d28a3" Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.946346 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0988185576d9ff8866bc58410524df2d8104159e463e976e14db88026c0d28a3"} err="failed to get container status \"0988185576d9ff8866bc58410524df2d8104159e463e976e14db88026c0d28a3\": rpc error: code = NotFound desc = could not find container \"0988185576d9ff8866bc58410524df2d8104159e463e976e14db88026c0d28a3\": container with ID starting with 0988185576d9ff8866bc58410524df2d8104159e463e976e14db88026c0d28a3 not found: ID does not exist" Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.946388 5050 scope.go:117] "RemoveContainer" containerID="e21136cc2438205b1f830210e02b1ea8a018ed293faf14a51aba45f916483d10" Nov 23 15:03:35 crc kubenswrapper[5050]: E1123 15:03:35.946894 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e21136cc2438205b1f830210e02b1ea8a018ed293faf14a51aba45f916483d10\": container with ID starting with e21136cc2438205b1f830210e02b1ea8a018ed293faf14a51aba45f916483d10 not found: ID does not exist" containerID="e21136cc2438205b1f830210e02b1ea8a018ed293faf14a51aba45f916483d10" Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.946936 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e21136cc2438205b1f830210e02b1ea8a018ed293faf14a51aba45f916483d10"} err="failed to get container status \"e21136cc2438205b1f830210e02b1ea8a018ed293faf14a51aba45f916483d10\": rpc error: code = NotFound desc = could not find container \"e21136cc2438205b1f830210e02b1ea8a018ed293faf14a51aba45f916483d10\": container with ID starting with e21136cc2438205b1f830210e02b1ea8a018ed293faf14a51aba45f916483d10 not found: ID does not exist" Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.946958 5050 scope.go:117] "RemoveContainer" containerID="0988185576d9ff8866bc58410524df2d8104159e463e976e14db88026c0d28a3" Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.947302 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0988185576d9ff8866bc58410524df2d8104159e463e976e14db88026c0d28a3"} err="failed to get container status \"0988185576d9ff8866bc58410524df2d8104159e463e976e14db88026c0d28a3\": rpc error: code = NotFound desc = could not find container \"0988185576d9ff8866bc58410524df2d8104159e463e976e14db88026c0d28a3\": container with ID starting with 0988185576d9ff8866bc58410524df2d8104159e463e976e14db88026c0d28a3 not found: ID does not exist" Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.947348 5050 scope.go:117] "RemoveContainer" containerID="e21136cc2438205b1f830210e02b1ea8a018ed293faf14a51aba45f916483d10" Nov 23 15:03:35 crc kubenswrapper[5050]: I1123 15:03:35.947813 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e21136cc2438205b1f830210e02b1ea8a018ed293faf14a51aba45f916483d10"} err="failed to get container status \"e21136cc2438205b1f830210e02b1ea8a018ed293faf14a51aba45f916483d10\": rpc error: code = NotFound desc = could not find container \"e21136cc2438205b1f830210e02b1ea8a018ed293faf14a51aba45f916483d10\": container with ID starting with e21136cc2438205b1f830210e02b1ea8a018ed293faf14a51aba45f916483d10 not found: ID does not exist" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.208413 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.218004 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.247538 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 23 15:03:36 crc kubenswrapper[5050]: E1123 15:03:36.248016 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cff4dbdd-ad13-4f74-beb1-9d9658c4b25e" containerName="nova-manage" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.248033 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="cff4dbdd-ad13-4f74-beb1-9d9658c4b25e" containerName="nova-manage" Nov 23 15:03:36 crc kubenswrapper[5050]: E1123 15:03:36.248054 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9a154a7-ee98-4732-bbc6-e0324fcf97c5" containerName="nova-api-api" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.248064 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9a154a7-ee98-4732-bbc6-e0324fcf97c5" containerName="nova-api-api" Nov 23 15:03:36 crc kubenswrapper[5050]: E1123 15:03:36.248078 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9a154a7-ee98-4732-bbc6-e0324fcf97c5" containerName="nova-api-log" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.248084 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9a154a7-ee98-4732-bbc6-e0324fcf97c5" containerName="nova-api-log" Nov 23 15:03:36 crc kubenswrapper[5050]: E1123 15:03:36.248096 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e45241bb-3031-428d-aa6b-370eb2da07b0" containerName="dnsmasq-dns" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.248102 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e45241bb-3031-428d-aa6b-370eb2da07b0" containerName="dnsmasq-dns" Nov 23 15:03:36 crc kubenswrapper[5050]: E1123 15:03:36.248144 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e45241bb-3031-428d-aa6b-370eb2da07b0" containerName="init" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.248149 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e45241bb-3031-428d-aa6b-370eb2da07b0" containerName="init" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.248325 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9a154a7-ee98-4732-bbc6-e0324fcf97c5" containerName="nova-api-log" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.248338 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9a154a7-ee98-4732-bbc6-e0324fcf97c5" containerName="nova-api-api" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.248352 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="cff4dbdd-ad13-4f74-beb1-9d9658c4b25e" containerName="nova-manage" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.248364 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="e45241bb-3031-428d-aa6b-370eb2da07b0" containerName="dnsmasq-dns" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.249416 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.254993 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.255229 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.255230 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.260296 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.352976 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c3ae457-67ab-4c0b-a17e-a8264215793b-logs\") pod \"nova-api-0\" (UID: \"6c3ae457-67ab-4c0b-a17e-a8264215793b\") " pod="openstack/nova-api-0" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.353018 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c3ae457-67ab-4c0b-a17e-a8264215793b-internal-tls-certs\") pod \"nova-api-0\" (UID: \"6c3ae457-67ab-4c0b-a17e-a8264215793b\") " pod="openstack/nova-api-0" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.353064 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c3ae457-67ab-4c0b-a17e-a8264215793b-public-tls-certs\") pod \"nova-api-0\" (UID: \"6c3ae457-67ab-4c0b-a17e-a8264215793b\") " pod="openstack/nova-api-0" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.353120 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c3ae457-67ab-4c0b-a17e-a8264215793b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"6c3ae457-67ab-4c0b-a17e-a8264215793b\") " pod="openstack/nova-api-0" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.353322 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c3ae457-67ab-4c0b-a17e-a8264215793b-config-data\") pod \"nova-api-0\" (UID: \"6c3ae457-67ab-4c0b-a17e-a8264215793b\") " pod="openstack/nova-api-0" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.353390 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4q8sq\" (UniqueName: \"kubernetes.io/projected/6c3ae457-67ab-4c0b-a17e-a8264215793b-kube-api-access-4q8sq\") pod \"nova-api-0\" (UID: \"6c3ae457-67ab-4c0b-a17e-a8264215793b\") " pod="openstack/nova-api-0" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.455572 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c3ae457-67ab-4c0b-a17e-a8264215793b-public-tls-certs\") pod \"nova-api-0\" (UID: \"6c3ae457-67ab-4c0b-a17e-a8264215793b\") " pod="openstack/nova-api-0" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.455644 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c3ae457-67ab-4c0b-a17e-a8264215793b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"6c3ae457-67ab-4c0b-a17e-a8264215793b\") " pod="openstack/nova-api-0" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.455699 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c3ae457-67ab-4c0b-a17e-a8264215793b-config-data\") pod \"nova-api-0\" (UID: \"6c3ae457-67ab-4c0b-a17e-a8264215793b\") " pod="openstack/nova-api-0" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.455725 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4q8sq\" (UniqueName: \"kubernetes.io/projected/6c3ae457-67ab-4c0b-a17e-a8264215793b-kube-api-access-4q8sq\") pod \"nova-api-0\" (UID: \"6c3ae457-67ab-4c0b-a17e-a8264215793b\") " pod="openstack/nova-api-0" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.455829 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c3ae457-67ab-4c0b-a17e-a8264215793b-logs\") pod \"nova-api-0\" (UID: \"6c3ae457-67ab-4c0b-a17e-a8264215793b\") " pod="openstack/nova-api-0" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.455851 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c3ae457-67ab-4c0b-a17e-a8264215793b-internal-tls-certs\") pod \"nova-api-0\" (UID: \"6c3ae457-67ab-4c0b-a17e-a8264215793b\") " pod="openstack/nova-api-0" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.456538 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c3ae457-67ab-4c0b-a17e-a8264215793b-logs\") pod \"nova-api-0\" (UID: \"6c3ae457-67ab-4c0b-a17e-a8264215793b\") " pod="openstack/nova-api-0" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.461296 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c3ae457-67ab-4c0b-a17e-a8264215793b-public-tls-certs\") pod \"nova-api-0\" (UID: \"6c3ae457-67ab-4c0b-a17e-a8264215793b\") " pod="openstack/nova-api-0" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.461643 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c3ae457-67ab-4c0b-a17e-a8264215793b-config-data\") pod \"nova-api-0\" (UID: \"6c3ae457-67ab-4c0b-a17e-a8264215793b\") " pod="openstack/nova-api-0" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.462857 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c3ae457-67ab-4c0b-a17e-a8264215793b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"6c3ae457-67ab-4c0b-a17e-a8264215793b\") " pod="openstack/nova-api-0" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.465752 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c3ae457-67ab-4c0b-a17e-a8264215793b-internal-tls-certs\") pod \"nova-api-0\" (UID: \"6c3ae457-67ab-4c0b-a17e-a8264215793b\") " pod="openstack/nova-api-0" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.488605 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4q8sq\" (UniqueName: \"kubernetes.io/projected/6c3ae457-67ab-4c0b-a17e-a8264215793b-kube-api-access-4q8sq\") pod \"nova-api-0\" (UID: \"6c3ae457-67ab-4c0b-a17e-a8264215793b\") " pod="openstack/nova-api-0" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.610698 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.856583 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.910114 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebc90782-9288-409b-97b6-06eedca6af69-combined-ca-bundle\") pod \"ebc90782-9288-409b-97b6-06eedca6af69\" (UID: \"ebc90782-9288-409b-97b6-06eedca6af69\") " Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.918634 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mw777\" (UniqueName: \"kubernetes.io/projected/ebc90782-9288-409b-97b6-06eedca6af69-kube-api-access-mw777\") pod \"ebc90782-9288-409b-97b6-06eedca6af69\" (UID: \"ebc90782-9288-409b-97b6-06eedca6af69\") " Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.918707 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ebc90782-9288-409b-97b6-06eedca6af69-config-data\") pod \"ebc90782-9288-409b-97b6-06eedca6af69\" (UID: \"ebc90782-9288-409b-97b6-06eedca6af69\") " Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.927530 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ebc90782-9288-409b-97b6-06eedca6af69-kube-api-access-mw777" (OuterVolumeSpecName: "kube-api-access-mw777") pod "ebc90782-9288-409b-97b6-06eedca6af69" (UID: "ebc90782-9288-409b-97b6-06eedca6af69"). InnerVolumeSpecName "kube-api-access-mw777". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.960212 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ebc90782-9288-409b-97b6-06eedca6af69-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ebc90782-9288-409b-97b6-06eedca6af69" (UID: "ebc90782-9288-409b-97b6-06eedca6af69"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.965336 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ebc90782-9288-409b-97b6-06eedca6af69-config-data" (OuterVolumeSpecName: "config-data") pod "ebc90782-9288-409b-97b6-06eedca6af69" (UID: "ebc90782-9288-409b-97b6-06eedca6af69"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.965949 5050 generic.go:334] "Generic (PLEG): container finished" podID="ebc90782-9288-409b-97b6-06eedca6af69" containerID="28797149a593494315d5778047a603218f729df770a8fe6758c9321d3bf2ef2e" exitCode=0 Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.966026 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ebc90782-9288-409b-97b6-06eedca6af69","Type":"ContainerDied","Data":"28797149a593494315d5778047a603218f729df770a8fe6758c9321d3bf2ef2e"} Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.966058 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ebc90782-9288-409b-97b6-06eedca6af69","Type":"ContainerDied","Data":"866a94a802d2c4398c5c3f315e312e6ca12312e71616f1ea2bdd915ebec060bd"} Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.966078 5050 scope.go:117] "RemoveContainer" containerID="28797149a593494315d5778047a603218f729df770a8fe6758c9321d3bf2ef2e" Nov 23 15:03:36 crc kubenswrapper[5050]: I1123 15:03:36.966236 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.006687 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.021894 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.023878 5050 scope.go:117] "RemoveContainer" containerID="28797149a593494315d5778047a603218f729df770a8fe6758c9321d3bf2ef2e" Nov 23 15:03:37 crc kubenswrapper[5050]: E1123 15:03:37.024573 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28797149a593494315d5778047a603218f729df770a8fe6758c9321d3bf2ef2e\": container with ID starting with 28797149a593494315d5778047a603218f729df770a8fe6758c9321d3bf2ef2e not found: ID does not exist" containerID="28797149a593494315d5778047a603218f729df770a8fe6758c9321d3bf2ef2e" Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.024605 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28797149a593494315d5778047a603218f729df770a8fe6758c9321d3bf2ef2e"} err="failed to get container status \"28797149a593494315d5778047a603218f729df770a8fe6758c9321d3bf2ef2e\": rpc error: code = NotFound desc = could not find container \"28797149a593494315d5778047a603218f729df770a8fe6758c9321d3bf2ef2e\": container with ID starting with 28797149a593494315d5778047a603218f729df770a8fe6758c9321d3bf2ef2e not found: ID does not exist" Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.028091 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebc90782-9288-409b-97b6-06eedca6af69-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.028178 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mw777\" (UniqueName: \"kubernetes.io/projected/ebc90782-9288-409b-97b6-06eedca6af69-kube-api-access-mw777\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.028196 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ebc90782-9288-409b-97b6-06eedca6af69-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.040744 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 15:03:37 crc kubenswrapper[5050]: E1123 15:03:37.041308 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebc90782-9288-409b-97b6-06eedca6af69" containerName="nova-scheduler-scheduler" Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.041322 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebc90782-9288-409b-97b6-06eedca6af69" containerName="nova-scheduler-scheduler" Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.041812 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="ebc90782-9288-409b-97b6-06eedca6af69" containerName="nova-scheduler-scheduler" Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.042625 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.050141 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.054015 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.106645 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 23 15:03:37 crc kubenswrapper[5050]: W1123 15:03:37.108978 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6c3ae457_67ab_4c0b_a17e_a8264215793b.slice/crio-b9fab5ec2e83c0b4f3126e5998e522d339d6d799423ac29756bb22b80703041c WatchSource:0}: Error finding container b9fab5ec2e83c0b4f3126e5998e522d339d6d799423ac29756bb22b80703041c: Status 404 returned error can't find the container with id b9fab5ec2e83c0b4f3126e5998e522d339d6d799423ac29756bb22b80703041c Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.130363 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/634f01f5-daae-4e5d-811f-5d406bfee9c1-config-data\") pod \"nova-scheduler-0\" (UID: \"634f01f5-daae-4e5d-811f-5d406bfee9c1\") " pod="openstack/nova-scheduler-0" Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.130476 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/634f01f5-daae-4e5d-811f-5d406bfee9c1-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"634f01f5-daae-4e5d-811f-5d406bfee9c1\") " pod="openstack/nova-scheduler-0" Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.130522 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-242qc\" (UniqueName: \"kubernetes.io/projected/634f01f5-daae-4e5d-811f-5d406bfee9c1-kube-api-access-242qc\") pod \"nova-scheduler-0\" (UID: \"634f01f5-daae-4e5d-811f-5d406bfee9c1\") " pod="openstack/nova-scheduler-0" Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.232397 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/634f01f5-daae-4e5d-811f-5d406bfee9c1-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"634f01f5-daae-4e5d-811f-5d406bfee9c1\") " pod="openstack/nova-scheduler-0" Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.232533 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-242qc\" (UniqueName: \"kubernetes.io/projected/634f01f5-daae-4e5d-811f-5d406bfee9c1-kube-api-access-242qc\") pod \"nova-scheduler-0\" (UID: \"634f01f5-daae-4e5d-811f-5d406bfee9c1\") " pod="openstack/nova-scheduler-0" Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.232693 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/634f01f5-daae-4e5d-811f-5d406bfee9c1-config-data\") pod \"nova-scheduler-0\" (UID: \"634f01f5-daae-4e5d-811f-5d406bfee9c1\") " pod="openstack/nova-scheduler-0" Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.239563 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/634f01f5-daae-4e5d-811f-5d406bfee9c1-config-data\") pod \"nova-scheduler-0\" (UID: \"634f01f5-daae-4e5d-811f-5d406bfee9c1\") " pod="openstack/nova-scheduler-0" Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.242645 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/634f01f5-daae-4e5d-811f-5d406bfee9c1-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"634f01f5-daae-4e5d-811f-5d406bfee9c1\") " pod="openstack/nova-scheduler-0" Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.251053 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-242qc\" (UniqueName: \"kubernetes.io/projected/634f01f5-daae-4e5d-811f-5d406bfee9c1-kube-api-access-242qc\") pod \"nova-scheduler-0\" (UID: \"634f01f5-daae-4e5d-811f-5d406bfee9c1\") " pod="openstack/nova-scheduler-0" Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.361904 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.567766 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9a154a7-ee98-4732-bbc6-e0324fcf97c5" path="/var/lib/kubelet/pods/e9a154a7-ee98-4732-bbc6-e0324fcf97c5/volumes" Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.570051 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ebc90782-9288-409b-97b6-06eedca6af69" path="/var/lib/kubelet/pods/ebc90782-9288-409b-97b6-06eedca6af69/volumes" Nov 23 15:03:37 crc kubenswrapper[5050]: W1123 15:03:37.902093 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod634f01f5_daae_4e5d_811f_5d406bfee9c1.slice/crio-f3294b8f7f62c5e00c386225e88f7200776a716d7da4fb46d5facdef8781a296 WatchSource:0}: Error finding container f3294b8f7f62c5e00c386225e88f7200776a716d7da4fb46d5facdef8781a296: Status 404 returned error can't find the container with id f3294b8f7f62c5e00c386225e88f7200776a716d7da4fb46d5facdef8781a296 Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.903733 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.995982 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6c3ae457-67ab-4c0b-a17e-a8264215793b","Type":"ContainerStarted","Data":"11eb99c3ddd7ac1add778be8c07cea9de7ac571edec1bae0cd0e936ceb624706"} Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.996508 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6c3ae457-67ab-4c0b-a17e-a8264215793b","Type":"ContainerStarted","Data":"b0a2c2bac24d49f1139383209407c61098dfcccf1627c8ba79ec2b65ec146335"} Nov 23 15:03:37 crc kubenswrapper[5050]: I1123 15:03:37.996526 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6c3ae457-67ab-4c0b-a17e-a8264215793b","Type":"ContainerStarted","Data":"b9fab5ec2e83c0b4f3126e5998e522d339d6d799423ac29756bb22b80703041c"} Nov 23 15:03:38 crc kubenswrapper[5050]: I1123 15:03:38.006091 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"634f01f5-daae-4e5d-811f-5d406bfee9c1","Type":"ContainerStarted","Data":"f3294b8f7f62c5e00c386225e88f7200776a716d7da4fb46d5facdef8781a296"} Nov 23 15:03:38 crc kubenswrapper[5050]: I1123 15:03:38.031017 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.030978849 podStartE2EDuration="2.030978849s" podCreationTimestamp="2025-11-23 15:03:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:03:38.026224794 +0000 UTC m=+1313.193221299" watchObservedRunningTime="2025-11-23 15:03:38.030978849 +0000 UTC m=+1313.197975374" Nov 23 15:03:38 crc kubenswrapper[5050]: I1123 15:03:38.772118 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 15:03:38 crc kubenswrapper[5050]: I1123 15:03:38.873732 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7jhsd\" (UniqueName: \"kubernetes.io/projected/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-kube-api-access-7jhsd\") pod \"f4de9500-7d8f-4619-8ae3-579d82bd7f1c\" (UID: \"f4de9500-7d8f-4619-8ae3-579d82bd7f1c\") " Nov 23 15:03:38 crc kubenswrapper[5050]: I1123 15:03:38.874255 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-logs\") pod \"f4de9500-7d8f-4619-8ae3-579d82bd7f1c\" (UID: \"f4de9500-7d8f-4619-8ae3-579d82bd7f1c\") " Nov 23 15:03:38 crc kubenswrapper[5050]: I1123 15:03:38.874294 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-config-data\") pod \"f4de9500-7d8f-4619-8ae3-579d82bd7f1c\" (UID: \"f4de9500-7d8f-4619-8ae3-579d82bd7f1c\") " Nov 23 15:03:38 crc kubenswrapper[5050]: I1123 15:03:38.874334 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-combined-ca-bundle\") pod \"f4de9500-7d8f-4619-8ae3-579d82bd7f1c\" (UID: \"f4de9500-7d8f-4619-8ae3-579d82bd7f1c\") " Nov 23 15:03:38 crc kubenswrapper[5050]: I1123 15:03:38.874394 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-nova-metadata-tls-certs\") pod \"f4de9500-7d8f-4619-8ae3-579d82bd7f1c\" (UID: \"f4de9500-7d8f-4619-8ae3-579d82bd7f1c\") " Nov 23 15:03:38 crc kubenswrapper[5050]: I1123 15:03:38.876530 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-logs" (OuterVolumeSpecName: "logs") pod "f4de9500-7d8f-4619-8ae3-579d82bd7f1c" (UID: "f4de9500-7d8f-4619-8ae3-579d82bd7f1c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:03:38 crc kubenswrapper[5050]: I1123 15:03:38.886980 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-kube-api-access-7jhsd" (OuterVolumeSpecName: "kube-api-access-7jhsd") pod "f4de9500-7d8f-4619-8ae3-579d82bd7f1c" (UID: "f4de9500-7d8f-4619-8ae3-579d82bd7f1c"). InnerVolumeSpecName "kube-api-access-7jhsd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:03:38 crc kubenswrapper[5050]: I1123 15:03:38.913411 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f4de9500-7d8f-4619-8ae3-579d82bd7f1c" (UID: "f4de9500-7d8f-4619-8ae3-579d82bd7f1c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:03:38 crc kubenswrapper[5050]: I1123 15:03:38.923241 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-config-data" (OuterVolumeSpecName: "config-data") pod "f4de9500-7d8f-4619-8ae3-579d82bd7f1c" (UID: "f4de9500-7d8f-4619-8ae3-579d82bd7f1c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:03:38 crc kubenswrapper[5050]: I1123 15:03:38.938007 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "f4de9500-7d8f-4619-8ae3-579d82bd7f1c" (UID: "f4de9500-7d8f-4619-8ae3-579d82bd7f1c"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:03:38 crc kubenswrapper[5050]: I1123 15:03:38.978769 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-logs\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:38 crc kubenswrapper[5050]: I1123 15:03:38.979180 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:38 crc kubenswrapper[5050]: I1123 15:03:38.979272 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:38 crc kubenswrapper[5050]: I1123 15:03:38.979355 5050 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:38 crc kubenswrapper[5050]: I1123 15:03:38.979458 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7jhsd\" (UniqueName: \"kubernetes.io/projected/f4de9500-7d8f-4619-8ae3-579d82bd7f1c-kube-api-access-7jhsd\") on node \"crc\" DevicePath \"\"" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.024305 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"634f01f5-daae-4e5d-811f-5d406bfee9c1","Type":"ContainerStarted","Data":"2649b8f823f897f77f83477412dd9f8246b7b8e23f72d10846e06d96f7b4bff1"} Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.027984 5050 generic.go:334] "Generic (PLEG): container finished" podID="f4de9500-7d8f-4619-8ae3-579d82bd7f1c" containerID="1ee1ee6664dd9194d64de4206b0ed4e97fb294a0083812d930930b7bf8f4b006" exitCode=0 Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.028063 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f4de9500-7d8f-4619-8ae3-579d82bd7f1c","Type":"ContainerDied","Data":"1ee1ee6664dd9194d64de4206b0ed4e97fb294a0083812d930930b7bf8f4b006"} Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.028347 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f4de9500-7d8f-4619-8ae3-579d82bd7f1c","Type":"ContainerDied","Data":"036fdf0e0ab5937434f51543d7c9fa914cdfe5d2773c436a85129647783dbd75"} Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.028376 5050 scope.go:117] "RemoveContainer" containerID="1ee1ee6664dd9194d64de4206b0ed4e97fb294a0083812d930930b7bf8f4b006" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.028074 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.054387 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.054363934 podStartE2EDuration="2.054363934s" podCreationTimestamp="2025-11-23 15:03:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:03:39.048189258 +0000 UTC m=+1314.215185743" watchObservedRunningTime="2025-11-23 15:03:39.054363934 +0000 UTC m=+1314.221360419" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.054937 5050 scope.go:117] "RemoveContainer" containerID="e16dd4c5f277e9f18697d190962419a79baf44289a3f0d3db53a7fee7cd11b17" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.096514 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.105519 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.112505 5050 scope.go:117] "RemoveContainer" containerID="1ee1ee6664dd9194d64de4206b0ed4e97fb294a0083812d930930b7bf8f4b006" Nov 23 15:03:39 crc kubenswrapper[5050]: E1123 15:03:39.113090 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ee1ee6664dd9194d64de4206b0ed4e97fb294a0083812d930930b7bf8f4b006\": container with ID starting with 1ee1ee6664dd9194d64de4206b0ed4e97fb294a0083812d930930b7bf8f4b006 not found: ID does not exist" containerID="1ee1ee6664dd9194d64de4206b0ed4e97fb294a0083812d930930b7bf8f4b006" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.113126 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ee1ee6664dd9194d64de4206b0ed4e97fb294a0083812d930930b7bf8f4b006"} err="failed to get container status \"1ee1ee6664dd9194d64de4206b0ed4e97fb294a0083812d930930b7bf8f4b006\": rpc error: code = NotFound desc = could not find container \"1ee1ee6664dd9194d64de4206b0ed4e97fb294a0083812d930930b7bf8f4b006\": container with ID starting with 1ee1ee6664dd9194d64de4206b0ed4e97fb294a0083812d930930b7bf8f4b006 not found: ID does not exist" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.113165 5050 scope.go:117] "RemoveContainer" containerID="e16dd4c5f277e9f18697d190962419a79baf44289a3f0d3db53a7fee7cd11b17" Nov 23 15:03:39 crc kubenswrapper[5050]: E1123 15:03:39.113532 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e16dd4c5f277e9f18697d190962419a79baf44289a3f0d3db53a7fee7cd11b17\": container with ID starting with e16dd4c5f277e9f18697d190962419a79baf44289a3f0d3db53a7fee7cd11b17 not found: ID does not exist" containerID="e16dd4c5f277e9f18697d190962419a79baf44289a3f0d3db53a7fee7cd11b17" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.113588 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e16dd4c5f277e9f18697d190962419a79baf44289a3f0d3db53a7fee7cd11b17"} err="failed to get container status \"e16dd4c5f277e9f18697d190962419a79baf44289a3f0d3db53a7fee7cd11b17\": rpc error: code = NotFound desc = could not find container \"e16dd4c5f277e9f18697d190962419a79baf44289a3f0d3db53a7fee7cd11b17\": container with ID starting with e16dd4c5f277e9f18697d190962419a79baf44289a3f0d3db53a7fee7cd11b17 not found: ID does not exist" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.118481 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 23 15:03:39 crc kubenswrapper[5050]: E1123 15:03:39.119009 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4de9500-7d8f-4619-8ae3-579d82bd7f1c" containerName="nova-metadata-log" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.119024 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4de9500-7d8f-4619-8ae3-579d82bd7f1c" containerName="nova-metadata-log" Nov 23 15:03:39 crc kubenswrapper[5050]: E1123 15:03:39.119075 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4de9500-7d8f-4619-8ae3-579d82bd7f1c" containerName="nova-metadata-metadata" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.119082 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4de9500-7d8f-4619-8ae3-579d82bd7f1c" containerName="nova-metadata-metadata" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.119282 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4de9500-7d8f-4619-8ae3-579d82bd7f1c" containerName="nova-metadata-metadata" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.119305 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4de9500-7d8f-4619-8ae3-579d82bd7f1c" containerName="nova-metadata-log" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.125039 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.125244 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.129164 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.130239 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.183140 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43\") " pod="openstack/nova-metadata-0" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.183758 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m82zz\" (UniqueName: \"kubernetes.io/projected/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-kube-api-access-m82zz\") pod \"nova-metadata-0\" (UID: \"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43\") " pod="openstack/nova-metadata-0" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.183828 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-config-data\") pod \"nova-metadata-0\" (UID: \"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43\") " pod="openstack/nova-metadata-0" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.184216 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43\") " pod="openstack/nova-metadata-0" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.184356 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-logs\") pod \"nova-metadata-0\" (UID: \"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43\") " pod="openstack/nova-metadata-0" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.286279 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m82zz\" (UniqueName: \"kubernetes.io/projected/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-kube-api-access-m82zz\") pod \"nova-metadata-0\" (UID: \"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43\") " pod="openstack/nova-metadata-0" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.286338 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-config-data\") pod \"nova-metadata-0\" (UID: \"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43\") " pod="openstack/nova-metadata-0" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.286385 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43\") " pod="openstack/nova-metadata-0" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.286430 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-logs\") pod \"nova-metadata-0\" (UID: \"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43\") " pod="openstack/nova-metadata-0" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.286474 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43\") " pod="openstack/nova-metadata-0" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.292929 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-logs\") pod \"nova-metadata-0\" (UID: \"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43\") " pod="openstack/nova-metadata-0" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.294857 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43\") " pod="openstack/nova-metadata-0" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.300172 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43\") " pod="openstack/nova-metadata-0" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.302207 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-config-data\") pod \"nova-metadata-0\" (UID: \"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43\") " pod="openstack/nova-metadata-0" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.309226 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m82zz\" (UniqueName: \"kubernetes.io/projected/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-kube-api-access-m82zz\") pod \"nova-metadata-0\" (UID: \"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43\") " pod="openstack/nova-metadata-0" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.448436 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.596132 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4de9500-7d8f-4619-8ae3-579d82bd7f1c" path="/var/lib/kubelet/pods/f4de9500-7d8f-4619-8ae3-579d82bd7f1c/volumes" Nov 23 15:03:39 crc kubenswrapper[5050]: I1123 15:03:39.948616 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 15:03:40 crc kubenswrapper[5050]: I1123 15:03:40.045362 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43","Type":"ContainerStarted","Data":"871e395b9a5222660104bedabe687ac3c7c8de7bf3d7ac44d35069ee71894d4a"} Nov 23 15:03:41 crc kubenswrapper[5050]: I1123 15:03:41.063215 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43","Type":"ContainerStarted","Data":"6f245a393065748303acdd84cc3863db9cf884ff2abd3737de04d10232960c78"} Nov 23 15:03:41 crc kubenswrapper[5050]: I1123 15:03:41.063945 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43","Type":"ContainerStarted","Data":"bf046462bb12cdebcb5455c61d3b2ad24f5e4aea74396128b7021ca13a57dedf"} Nov 23 15:03:41 crc kubenswrapper[5050]: I1123 15:03:41.110425 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.110387195 podStartE2EDuration="2.110387195s" podCreationTimestamp="2025-11-23 15:03:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:03:41.090908851 +0000 UTC m=+1316.257905346" watchObservedRunningTime="2025-11-23 15:03:41.110387195 +0000 UTC m=+1316.277383710" Nov 23 15:03:42 crc kubenswrapper[5050]: I1123 15:03:42.362933 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 23 15:03:43 crc kubenswrapper[5050]: I1123 15:03:43.749660 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="f4de9500-7d8f-4619-8ae3-579d82bd7f1c" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.193:8775/\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 23 15:03:43 crc kubenswrapper[5050]: I1123 15:03:43.749793 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="f4de9500-7d8f-4619-8ae3-579d82bd7f1c" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.193:8775/\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 23 15:03:44 crc kubenswrapper[5050]: I1123 15:03:44.448841 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 23 15:03:44 crc kubenswrapper[5050]: I1123 15:03:44.449405 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 23 15:03:46 crc kubenswrapper[5050]: I1123 15:03:46.612296 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 23 15:03:46 crc kubenswrapper[5050]: I1123 15:03:46.612886 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 23 15:03:47 crc kubenswrapper[5050]: I1123 15:03:47.362748 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 23 15:03:47 crc kubenswrapper[5050]: I1123 15:03:47.414107 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 23 15:03:47 crc kubenswrapper[5050]: I1123 15:03:47.628628 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="6c3ae457-67ab-4c0b-a17e-a8264215793b" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.202:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 23 15:03:47 crc kubenswrapper[5050]: I1123 15:03:47.628683 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="6c3ae457-67ab-4c0b-a17e-a8264215793b" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.202:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 23 15:03:48 crc kubenswrapper[5050]: I1123 15:03:48.246068 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 23 15:03:49 crc kubenswrapper[5050]: I1123 15:03:49.449857 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 23 15:03:49 crc kubenswrapper[5050]: I1123 15:03:49.449977 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 23 15:03:50 crc kubenswrapper[5050]: I1123 15:03:50.467774 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.204:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 23 15:03:50 crc kubenswrapper[5050]: I1123 15:03:50.467753 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.204:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 23 15:03:55 crc kubenswrapper[5050]: I1123 15:03:55.119114 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 23 15:03:56 crc kubenswrapper[5050]: I1123 15:03:56.620856 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 23 15:03:56 crc kubenswrapper[5050]: I1123 15:03:56.622965 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 23 15:03:56 crc kubenswrapper[5050]: I1123 15:03:56.624350 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 23 15:03:56 crc kubenswrapper[5050]: I1123 15:03:56.624401 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 23 15:03:56 crc kubenswrapper[5050]: I1123 15:03:56.631612 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 23 15:03:56 crc kubenswrapper[5050]: I1123 15:03:56.632983 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 23 15:03:59 crc kubenswrapper[5050]: I1123 15:03:59.464010 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 23 15:03:59 crc kubenswrapper[5050]: I1123 15:03:59.468423 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 23 15:03:59 crc kubenswrapper[5050]: I1123 15:03:59.474434 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 23 15:04:00 crc kubenswrapper[5050]: I1123 15:04:00.369963 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 23 15:04:20 crc kubenswrapper[5050]: I1123 15:04:20.700531 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 23 15:04:20 crc kubenswrapper[5050]: I1123 15:04:20.701464 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="014d6c50-fe8c-4b39-bccd-531037f1ff10" containerName="openstackclient" containerID="cri-o://af27c7343928c8d4c4bff7ed205f0bb16b5e7302d0aa9e3deefadfcb0edf736c" gracePeriod=2 Nov 23 15:04:20 crc kubenswrapper[5050]: I1123 15:04:20.712682 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.080285 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.194756 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder255c-account-delete-4znlv"] Nov 23 15:04:21 crc kubenswrapper[5050]: E1123 15:04:21.195211 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="014d6c50-fe8c-4b39-bccd-531037f1ff10" containerName="openstackclient" Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.195226 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="014d6c50-fe8c-4b39-bccd-531037f1ff10" containerName="openstackclient" Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.195492 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="014d6c50-fe8c-4b39-bccd-531037f1ff10" containerName="openstackclient" Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.196153 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder255c-account-delete-4znlv" Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.257837 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder255c-account-delete-4znlv"] Nov 23 15:04:21 crc kubenswrapper[5050]: E1123 15:04:21.262073 5050 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 23 15:04:21 crc kubenswrapper[5050]: E1123 15:04:21.262144 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-config-data podName:5eff539e-c641-4873-aeae-450aaf0b4ac8 nodeName:}" failed. No retries permitted until 2025-11-23 15:04:21.762123964 +0000 UTC m=+1356.929120449 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-config-data") pod "rabbitmq-server-0" (UID: "5eff539e-c641-4873-aeae-450aaf0b4ac8") : configmap "rabbitmq-config-data" not found Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.322919 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.323843 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="f81d4e06-4245-4355-89d6-ddc438c06f29" containerName="openstack-network-exporter" containerID="cri-o://40c87c219c9a5646179505e0d69951602c9a0948f6df53d02f15fd03bcb735c9" gracePeriod=300 Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.365742 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e78eb875-17e5-431f-82b5-14a1190488b0-operator-scripts\") pod \"cinder255c-account-delete-4znlv\" (UID: \"e78eb875-17e5-431f-82b5-14a1190488b0\") " pod="openstack/cinder255c-account-delete-4znlv" Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.365871 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7lbw7\" (UniqueName: \"kubernetes.io/projected/e78eb875-17e5-431f-82b5-14a1190488b0-kube-api-access-7lbw7\") pod \"cinder255c-account-delete-4znlv\" (UID: \"e78eb875-17e5-431f-82b5-14a1190488b0\") " pod="openstack/cinder255c-account-delete-4znlv" Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.436049 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican6ffa-account-delete-7wsfp"] Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.437855 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican6ffa-account-delete-7wsfp" Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.459095 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican6ffa-account-delete-7wsfp"] Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.467488 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b570632-90a0-4fcd-a067-9913b51ad711-operator-scripts\") pod \"barbican6ffa-account-delete-7wsfp\" (UID: \"4b570632-90a0-4fcd-a067-9913b51ad711\") " pod="openstack/barbican6ffa-account-delete-7wsfp" Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.467587 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e78eb875-17e5-431f-82b5-14a1190488b0-operator-scripts\") pod \"cinder255c-account-delete-4znlv\" (UID: \"e78eb875-17e5-431f-82b5-14a1190488b0\") " pod="openstack/cinder255c-account-delete-4znlv" Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.467652 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pnf5n\" (UniqueName: \"kubernetes.io/projected/4b570632-90a0-4fcd-a067-9913b51ad711-kube-api-access-pnf5n\") pod \"barbican6ffa-account-delete-7wsfp\" (UID: \"4b570632-90a0-4fcd-a067-9913b51ad711\") " pod="openstack/barbican6ffa-account-delete-7wsfp" Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.467694 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7lbw7\" (UniqueName: \"kubernetes.io/projected/e78eb875-17e5-431f-82b5-14a1190488b0-kube-api-access-7lbw7\") pod \"cinder255c-account-delete-4znlv\" (UID: \"e78eb875-17e5-431f-82b5-14a1190488b0\") " pod="openstack/cinder255c-account-delete-4znlv" Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.468892 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e78eb875-17e5-431f-82b5-14a1190488b0-operator-scripts\") pod \"cinder255c-account-delete-4znlv\" (UID: \"e78eb875-17e5-431f-82b5-14a1190488b0\") " pod="openstack/cinder255c-account-delete-4znlv" Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.538477 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7lbw7\" (UniqueName: \"kubernetes.io/projected/e78eb875-17e5-431f-82b5-14a1190488b0-kube-api-access-7lbw7\") pod \"cinder255c-account-delete-4znlv\" (UID: \"e78eb875-17e5-431f-82b5-14a1190488b0\") " pod="openstack/cinder255c-account-delete-4znlv" Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.599318 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pnf5n\" (UniqueName: \"kubernetes.io/projected/4b570632-90a0-4fcd-a067-9913b51ad711-kube-api-access-pnf5n\") pod \"barbican6ffa-account-delete-7wsfp\" (UID: \"4b570632-90a0-4fcd-a067-9913b51ad711\") " pod="openstack/barbican6ffa-account-delete-7wsfp" Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.599607 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b570632-90a0-4fcd-a067-9913b51ad711-operator-scripts\") pod \"barbican6ffa-account-delete-7wsfp\" (UID: \"4b570632-90a0-4fcd-a067-9913b51ad711\") " pod="openstack/barbican6ffa-account-delete-7wsfp" Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.600743 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b570632-90a0-4fcd-a067-9913b51ad711-operator-scripts\") pod \"barbican6ffa-account-delete-7wsfp\" (UID: \"4b570632-90a0-4fcd-a067-9913b51ad711\") " pod="openstack/barbican6ffa-account-delete-7wsfp" Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.707193 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pnf5n\" (UniqueName: \"kubernetes.io/projected/4b570632-90a0-4fcd-a067-9913b51ad711-kube-api-access-pnf5n\") pod \"barbican6ffa-account-delete-7wsfp\" (UID: \"4b570632-90a0-4fcd-a067-9913b51ad711\") " pod="openstack/barbican6ffa-account-delete-7wsfp" Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.737845 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"f81d4e06-4245-4355-89d6-ddc438c06f29","Type":"ContainerDied","Data":"40c87c219c9a5646179505e0d69951602c9a0948f6df53d02f15fd03bcb735c9"} Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.737439 5050 generic.go:334] "Generic (PLEG): container finished" podID="f81d4e06-4245-4355-89d6-ddc438c06f29" containerID="40c87c219c9a5646179505e0d69951602c9a0948f6df53d02f15fd03bcb735c9" exitCode=2 Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.749131 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron45e2-account-delete-8sq6l"] Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.790229 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="85ca597a-bf71-43bb-b57c-f840b37f196f" containerName="galera" probeResult="failure" output="command timed out" Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.805482 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron45e2-account-delete-8sq6l" Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.807138 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="85ca597a-bf71-43bb-b57c-f840b37f196f" containerName="galera" probeResult="failure" output="command timed out" Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.808068 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron45e2-account-delete-8sq6l"] Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.833006 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder255c-account-delete-4znlv" Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.842183 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkqpp\" (UniqueName: \"kubernetes.io/projected/9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931-kube-api-access-qkqpp\") pod \"neutron45e2-account-delete-8sq6l\" (UID: \"9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931\") " pod="openstack/neutron45e2-account-delete-8sq6l" Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.842312 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931-operator-scripts\") pod \"neutron45e2-account-delete-8sq6l\" (UID: \"9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931\") " pod="openstack/neutron45e2-account-delete-8sq6l" Nov 23 15:04:21 crc kubenswrapper[5050]: E1123 15:04:21.843581 5050 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 23 15:04:21 crc kubenswrapper[5050]: E1123 15:04:21.843671 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-config-data podName:5eff539e-c641-4873-aeae-450aaf0b4ac8 nodeName:}" failed. No retries permitted until 2025-11-23 15:04:22.843649271 +0000 UTC m=+1358.010645756 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-config-data") pod "rabbitmq-server-0" (UID: "5eff539e-c641-4873-aeae-450aaf0b4ac8") : configmap "rabbitmq-config-data" not found Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.887063 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-82ms7"] Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.905974 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican6ffa-account-delete-7wsfp" Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.909392 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-82ms7"] Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.932898 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-ftmg2"] Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.947168 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkqpp\" (UniqueName: \"kubernetes.io/projected/9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931-kube-api-access-qkqpp\") pod \"neutron45e2-account-delete-8sq6l\" (UID: \"9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931\") " pod="openstack/neutron45e2-account-delete-8sq6l" Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.947272 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931-operator-scripts\") pod \"neutron45e2-account-delete-8sq6l\" (UID: \"9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931\") " pod="openstack/neutron45e2-account-delete-8sq6l" Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.948349 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931-operator-scripts\") pod \"neutron45e2-account-delete-8sq6l\" (UID: \"9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931\") " pod="openstack/neutron45e2-account-delete-8sq6l" Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.962554 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-ftmg2"] Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.981624 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-22cnp"] Nov 23 15:04:21 crc kubenswrapper[5050]: I1123 15:04:21.981936 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-metrics-22cnp" podUID="4a7ed50c-3990-423d-9fd6-1165da59c5c9" containerName="openstack-network-exporter" containerID="cri-o://7a08082700302fc03c17eee134d553b46f2157cc92e9951f31627a535a42e5af" gracePeriod=30 Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.009021 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-sdvpr"] Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.032041 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-sdvpr"] Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.035589 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-n9v8v"] Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.060032 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkqpp\" (UniqueName: \"kubernetes.io/projected/9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931-kube-api-access-qkqpp\") pod \"neutron45e2-account-delete-8sq6l\" (UID: \"9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931\") " pod="openstack/neutron45e2-account-delete-8sq6l" Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.112540 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-tbww8"] Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.167288 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron45e2-account-delete-8sq6l" Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.187174 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement734b-account-delete-zt8zp"] Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.194761 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement734b-account-delete-zt8zp" Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.221632 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="f81d4e06-4245-4355-89d6-ddc438c06f29" containerName="ovsdbserver-sb" containerID="cri-o://c0069a68c89482704d61e8c58556300339031d7c4accb976799bb1d562fe9ac9" gracePeriod=300 Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.257893 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-j7p9t"] Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.268861 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4lns\" (UniqueName: \"kubernetes.io/projected/3d3a2397-697f-4d1b-a5ed-0aaa1b202a12-kube-api-access-g4lns\") pod \"placement734b-account-delete-zt8zp\" (UID: \"3d3a2397-697f-4d1b-a5ed-0aaa1b202a12\") " pod="openstack/placement734b-account-delete-zt8zp" Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.268962 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d3a2397-697f-4d1b-a5ed-0aaa1b202a12-operator-scripts\") pod \"placement734b-account-delete-zt8zp\" (UID: \"3d3a2397-697f-4d1b-a5ed-0aaa1b202a12\") " pod="openstack/placement734b-account-delete-zt8zp" Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.279417 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.279781 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="27e26dea-41e9-4d40-9adc-f902e5f4791f" containerName="ovn-northd" containerID="cri-o://deafd7234d6f0ca5e0fe921dff3e5ef022a4b569f1d6016eec6fa8e609b595e2" gracePeriod=30 Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.280360 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="27e26dea-41e9-4d40-9adc-f902e5f4791f" containerName="openstack-network-exporter" containerID="cri-o://8b448c0c28f62c7ec1d3e7f3dd39d921c151ee6325f2379cf0312b266ebedf37" gracePeriod=30 Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.305711 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-j7p9t"] Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.324335 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.374145 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4lns\" (UniqueName: \"kubernetes.io/projected/3d3a2397-697f-4d1b-a5ed-0aaa1b202a12-kube-api-access-g4lns\") pod \"placement734b-account-delete-zt8zp\" (UID: \"3d3a2397-697f-4d1b-a5ed-0aaa1b202a12\") " pod="openstack/placement734b-account-delete-zt8zp" Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.374241 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d3a2397-697f-4d1b-a5ed-0aaa1b202a12-operator-scripts\") pod \"placement734b-account-delete-zt8zp\" (UID: \"3d3a2397-697f-4d1b-a5ed-0aaa1b202a12\") " pod="openstack/placement734b-account-delete-zt8zp" Nov 23 15:04:22 crc kubenswrapper[5050]: E1123 15:04:22.375389 5050 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 23 15:04:22 crc kubenswrapper[5050]: E1123 15:04:22.375481 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-config-data podName:f92353db-5352-4216-ad2d-50242e58dfb7 nodeName:}" failed. No retries permitted until 2025-11-23 15:04:22.875459175 +0000 UTC m=+1358.042455650 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-config-data") pod "rabbitmq-cell1-server-0" (UID: "f92353db-5352-4216-ad2d-50242e58dfb7") : configmap "rabbitmq-cell1-config-data" not found Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.376989 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d3a2397-697f-4d1b-a5ed-0aaa1b202a12-operator-scripts\") pod \"placement734b-account-delete-zt8zp\" (UID: \"3d3a2397-697f-4d1b-a5ed-0aaa1b202a12\") " pod="openstack/placement734b-account-delete-zt8zp" Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.392750 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4lns\" (UniqueName: \"kubernetes.io/projected/3d3a2397-697f-4d1b-a5ed-0aaa1b202a12-kube-api-access-g4lns\") pod \"placement734b-account-delete-zt8zp\" (UID: \"3d3a2397-697f-4d1b-a5ed-0aaa1b202a12\") " pod="openstack/placement734b-account-delete-zt8zp" Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.392824 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement734b-account-delete-zt8zp"] Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.404141 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glancebb96-account-delete-qls8c"] Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.406207 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glancebb96-account-delete-qls8c" Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.420774 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-lzdd7"] Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.431622 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-lzdd7"] Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.442110 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glancebb96-account-delete-qls8c"] Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.488165 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgsql\" (UniqueName: \"kubernetes.io/projected/13ecb6d2-cd50-4239-8939-6465176afd8e-kube-api-access-cgsql\") pod \"glancebb96-account-delete-qls8c\" (UID: \"13ecb6d2-cd50-4239-8939-6465176afd8e\") " pod="openstack/glancebb96-account-delete-qls8c" Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.488727 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/13ecb6d2-cd50-4239-8939-6465176afd8e-operator-scripts\") pod \"glancebb96-account-delete-qls8c\" (UID: \"13ecb6d2-cd50-4239-8939-6465176afd8e\") " pod="openstack/glancebb96-account-delete-qls8c" Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.501729 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.502719 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="716a14cd-88b9-4e18-a781-6dbfaad7634c" containerName="openstack-network-exporter" containerID="cri-o://e0aa36f5771c43f6dd0d81d07cdda1b7e03ac841535ddf7564847c9249d6a707" gracePeriod=300 Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.539159 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-99xwj"] Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.599513 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgsql\" (UniqueName: \"kubernetes.io/projected/13ecb6d2-cd50-4239-8939-6465176afd8e-kube-api-access-cgsql\") pod \"glancebb96-account-delete-qls8c\" (UID: \"13ecb6d2-cd50-4239-8939-6465176afd8e\") " pod="openstack/glancebb96-account-delete-qls8c" Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.599714 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/13ecb6d2-cd50-4239-8939-6465176afd8e-operator-scripts\") pod \"glancebb96-account-delete-qls8c\" (UID: \"13ecb6d2-cd50-4239-8939-6465176afd8e\") " pod="openstack/glancebb96-account-delete-qls8c" Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.600870 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/13ecb6d2-cd50-4239-8939-6465176afd8e-operator-scripts\") pod \"glancebb96-account-delete-qls8c\" (UID: \"13ecb6d2-cd50-4239-8939-6465176afd8e\") " pod="openstack/glancebb96-account-delete-qls8c" Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.601978 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-vptxh"] Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.602304 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" podUID="e7a4774e-f304-40ca-8595-bbe9c381f466" containerName="dnsmasq-dns" containerID="cri-o://063525e2dec5a92b0de43140afc955072b70de2dd16ff0f882b43cc83a5c5cb3" gracePeriod=10 Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.614208 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement734b-account-delete-zt8zp" Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.637411 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-99xwj"] Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.656796 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="716a14cd-88b9-4e18-a781-6dbfaad7634c" containerName="ovsdbserver-nb" containerID="cri-o://3249cf8628f145ad70288b99dd53e1eb58e6ee9953f4d2ed143ddefe3d8d6d1c" gracePeriod=300 Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.680227 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgsql\" (UniqueName: \"kubernetes.io/projected/13ecb6d2-cd50-4239-8939-6465176afd8e-kube-api-access-cgsql\") pod \"glancebb96-account-delete-qls8c\" (UID: \"13ecb6d2-cd50-4239-8939-6465176afd8e\") " pod="openstack/glancebb96-account-delete-qls8c" Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.704039 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.704501 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="ba1bb9f6-5ca2-4986-9473-62d50d9bebf0" containerName="cinder-api-log" containerID="cri-o://878b763faeb721f2171524629a6e423a6579824e5ba28a08437820030804f60f" gracePeriod=30 Nov 23 15:04:22 crc kubenswrapper[5050]: I1123 15:04:22.704653 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="ba1bb9f6-5ca2-4986-9473-62d50d9bebf0" containerName="cinder-api" containerID="cri-o://08fc1f6beb7a82ec24f421d1df48b2d8f792f3248ae507344624f3d012bb743a" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.775834 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.776266 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="ba4a4a57-47b0-423d-8bb1-76953fb3a37b" containerName="cinder-scheduler" containerID="cri-o://88de6b6d747657765b520632673420c57f5942e93770f67efd5b9461639f7cdd" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.777546 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="ba4a4a57-47b0-423d-8bb1-76953fb3a37b" containerName="probe" containerID="cri-o://ff0e97bc91a901d22909a988b2e36c290f89a2317abfa813abba814502aa0edc" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.813967 5050 generic.go:334] "Generic (PLEG): container finished" podID="27e26dea-41e9-4d40-9adc-f902e5f4791f" containerID="8b448c0c28f62c7ec1d3e7f3dd39d921c151ee6325f2379cf0312b266ebedf37" exitCode=2 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.814034 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"27e26dea-41e9-4d40-9adc-f902e5f4791f","Type":"ContainerDied","Data":"8b448c0c28f62c7ec1d3e7f3dd39d921c151ee6325f2379cf0312b266ebedf37"} Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.820563 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novacell03052-account-delete-kz7k7"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.824869 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell03052-account-delete-kz7k7" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.837930 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-22cnp_4a7ed50c-3990-423d-9fd6-1165da59c5c9/openstack-network-exporter/0.log" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.837980 5050 generic.go:334] "Generic (PLEG): container finished" podID="4a7ed50c-3990-423d-9fd6-1165da59c5c9" containerID="7a08082700302fc03c17eee134d553b46f2157cc92e9951f31627a535a42e5af" exitCode=2 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.838045 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-22cnp" event={"ID":"4a7ed50c-3990-423d-9fd6-1165da59c5c9","Type":"ContainerDied","Data":"7a08082700302fc03c17eee134d553b46f2157cc92e9951f31627a535a42e5af"} Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.853566 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell03052-account-delete-kz7k7"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.879400 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_716a14cd-88b9-4e18-a781-6dbfaad7634c/ovsdbserver-nb/0.log" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.879534 5050 generic.go:334] "Generic (PLEG): container finished" podID="716a14cd-88b9-4e18-a781-6dbfaad7634c" containerID="e0aa36f5771c43f6dd0d81d07cdda1b7e03ac841535ddf7564847c9249d6a707" exitCode=2 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.879659 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"716a14cd-88b9-4e18-a781-6dbfaad7634c","Type":"ContainerDied","Data":"e0aa36f5771c43f6dd0d81d07cdda1b7e03ac841535ddf7564847c9249d6a707"} Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.910125 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_f81d4e06-4245-4355-89d6-ddc438c06f29/ovsdbserver-sb/0.log" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.910173 5050 generic.go:334] "Generic (PLEG): container finished" podID="f81d4e06-4245-4355-89d6-ddc438c06f29" containerID="c0069a68c89482704d61e8c58556300339031d7c4accb976799bb1d562fe9ac9" exitCode=143 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.910209 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"f81d4e06-4245-4355-89d6-ddc438c06f29","Type":"ContainerDied","Data":"c0069a68c89482704d61e8c58556300339031d7c4accb976799bb1d562fe9ac9"} Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.910739 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-747lj\" (UniqueName: \"kubernetes.io/projected/3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89-kube-api-access-747lj\") pod \"novacell03052-account-delete-kz7k7\" (UID: \"3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89\") " pod="openstack/novacell03052-account-delete-kz7k7" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.910793 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89-operator-scripts\") pod \"novacell03052-account-delete-kz7k7\" (UID: \"3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89\") " pod="openstack/novacell03052-account-delete-kz7k7" Nov 23 15:04:23 crc kubenswrapper[5050]: E1123 15:04:22.911042 5050 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 23 15:04:23 crc kubenswrapper[5050]: E1123 15:04:22.911098 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-config-data podName:f92353db-5352-4216-ad2d-50242e58dfb7 nodeName:}" failed. No retries permitted until 2025-11-23 15:04:23.911079067 +0000 UTC m=+1359.078075552 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-config-data") pod "rabbitmq-cell1-server-0" (UID: "f92353db-5352-4216-ad2d-50242e58dfb7") : configmap "rabbitmq-cell1-config-data" not found Nov 23 15:04:23 crc kubenswrapper[5050]: E1123 15:04:22.924822 5050 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 23 15:04:23 crc kubenswrapper[5050]: E1123 15:04:22.924909 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-config-data podName:5eff539e-c641-4873-aeae-450aaf0b4ac8 nodeName:}" failed. No retries permitted until 2025-11-23 15:04:24.92487583 +0000 UTC m=+1360.091872315 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-config-data") pod "rabbitmq-server-0" (UID: "5eff539e-c641-4873-aeae-450aaf0b4ac8") : configmap "rabbitmq-config-data" not found Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.941978 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.943111 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="account-server" containerID="cri-o://0001c3500cc85fc0a925e13354d27829e719cc2b2c22f8ba291e2016b3e3b365" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.943283 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="container-updater" containerID="cri-o://f9d2cfcc4ee11e5c2631dbc7f3cfcfc91553670b329dfc79c70030ffc3518f25" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.943389 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="container-auditor" containerID="cri-o://9d6c10414c5bea42690cbd8da8cba7bb0cf669191242a30c4e88b58e7464d151" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.943338 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="object-server" containerID="cri-o://53cc065da29b9c9511ae76a606c6be3934715316c98087e1bceb4aedfb391ba5" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.943791 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="container-replicator" containerID="cri-o://b4f43cb835f2029a79a141c233333ef0e2ea5ee6f6d05bd0c246b7323bb38310" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.943975 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="container-server" containerID="cri-o://f9e7a939140c48c7a80ae5b9b609b9b098e6c60e33ce09927c76177950647981" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.944101 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="account-reaper" containerID="cri-o://1cec0ebe1304d052bb25cc3f36043c71b0777d6f8cbf3b9d8a22f1e057b1fddb" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.944236 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="account-auditor" containerID="cri-o://9a3ab1117cae597494105d7c65afeef7161729ed4e56b6205f5b1b5b9c2c0980" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.944367 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="rsync" containerID="cri-o://b22acf79827fd3ac0a34b514be2b00ed37d6b8a1dac44f85032fc97fb4bd5540" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.944482 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="account-replicator" containerID="cri-o://4211198ac477c9fe45e3ef0bd5dffc47dd9911807782e0d46558842b2d2b48fd" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.946154 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="object-updater" containerID="cri-o://e0f8af78c1a802c881b2c04585b63daab17211922673261ca7523f7d84b8e187" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.943355 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="object-expirer" containerID="cri-o://0e36c69aa375c539b26f58a19aaffeb696f806ace3d6046abb778218a238b336" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.944424 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="swift-recon-cron" containerID="cri-o://61d7218bc7db3ee0defb5601931d237c30251b28aeaf13b1fc22ffcbd83b4e84" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.948075 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="object-auditor" containerID="cri-o://8dae616c0d8bc5a23b5cfb9e119b706796daaa7ba5419e533d3fb7176e7bf222" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:22.945166 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="object-replicator" containerID="cri-o://1fcdee9bec472e24e3ee2fa6e158b77914f69e25bb807a1be9ca536d728a1ddf" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.018502 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-747lj\" (UniqueName: \"kubernetes.io/projected/3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89-kube-api-access-747lj\") pod \"novacell03052-account-delete-kz7k7\" (UID: \"3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89\") " pod="openstack/novacell03052-account-delete-kz7k7" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.018576 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89-operator-scripts\") pod \"novacell03052-account-delete-kz7k7\" (UID: \"3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89\") " pod="openstack/novacell03052-account-delete-kz7k7" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.020127 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89-operator-scripts\") pod \"novacell03052-account-delete-kz7k7\" (UID: \"3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89\") " pod="openstack/novacell03052-account-delete-kz7k7" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.050531 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7cb7995f89-k8h9t"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.051723 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7cb7995f89-k8h9t" podUID="1b3a1560-8762-47b9-8d1c-fe94eb46bec2" containerName="neutron-api" containerID="cri-o://c8744bf42aa8122e20bb79dbbcfbe437abb2154864233d844f2967643789a84f" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.052213 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7cb7995f89-k8h9t" podUID="1b3a1560-8762-47b9-8d1c-fe94eb46bec2" containerName="neutron-httpd" containerID="cri-o://8b5d3ae6f2b8800aa1cfd0b5a953b873c14fdd59195485538c97159fa050f837" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.054100 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-747lj\" (UniqueName: \"kubernetes.io/projected/3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89-kube-api-access-747lj\") pod \"novacell03052-account-delete-kz7k7\" (UID: \"3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89\") " pod="openstack/novacell03052-account-delete-kz7k7" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.071522 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-748fb7ccf6-s9qnq"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.072140 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-748fb7ccf6-s9qnq" podUID="add2a2e2-5553-4e43-8ddd-b8719949d531" containerName="placement-api" containerID="cri-o://ce2fe500a1bdad923f79c0d2d37ae5d7c546350b70f8148bfd61c5c68475e3f7" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.072490 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-748fb7ccf6-s9qnq" podUID="add2a2e2-5553-4e43-8ddd-b8719949d531" containerName="placement-log" containerID="cri-o://e29903a818a042f29b3e4e52f5b9a3d3f2db969791fe5adce7bb96fbe4dca897" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.101589 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novaapif81d-account-delete-xfdwn"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.103530 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novaapif81d-account-delete-xfdwn" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.113506 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novaapif81d-account-delete-xfdwn"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.123754 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1bda1a38-71dd-4de4-8e16-b5159a08fdfa-operator-scripts\") pod \"novaapif81d-account-delete-xfdwn\" (UID: \"1bda1a38-71dd-4de4-8e16-b5159a08fdfa\") " pod="openstack/novaapif81d-account-delete-xfdwn" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.123834 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vlrvp\" (UniqueName: \"kubernetes.io/projected/1bda1a38-71dd-4de4-8e16-b5159a08fdfa-kube-api-access-vlrvp\") pod \"novaapif81d-account-delete-xfdwn\" (UID: \"1bda1a38-71dd-4de4-8e16-b5159a08fdfa\") " pod="openstack/novaapif81d-account-delete-xfdwn" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.131232 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-j52wg"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.153685 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-j52wg"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.166498 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-q7jct"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.181840 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-q7jct"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.229423 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.229803 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322" containerName="glance-log" containerID="cri-o://9f6dbbcaa59ff563c11d0cdfecbaa890da1c52e228d3d8a74b64182f665c6508" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.230466 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322" containerName="glance-httpd" containerID="cri-o://63cf2a00b105f3566525e540e376dbc5f791f9441863630deaff0d4a49ab2c58" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.278518 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.278880 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="6c5e9b39-91fa-4f4e-9d95-0599bc22472d" containerName="glance-log" containerID="cri-o://29d8b6a8f6bcb0d52d1fb31bd328b23184faf043db74c7e0325581a2ae92bc41" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.279399 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vlrvp\" (UniqueName: \"kubernetes.io/projected/1bda1a38-71dd-4de4-8e16-b5159a08fdfa-kube-api-access-vlrvp\") pod \"novaapif81d-account-delete-xfdwn\" (UID: \"1bda1a38-71dd-4de4-8e16-b5159a08fdfa\") " pod="openstack/novaapif81d-account-delete-xfdwn" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.279566 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="6c5e9b39-91fa-4f4e-9d95-0599bc22472d" containerName="glance-httpd" containerID="cri-o://afbf2c9723a3c51e2ef343739812462a5a83e6a2cd33bd51d95de96ab628994d" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.279630 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1bda1a38-71dd-4de4-8e16-b5159a08fdfa-operator-scripts\") pod \"novaapif81d-account-delete-xfdwn\" (UID: \"1bda1a38-71dd-4de4-8e16-b5159a08fdfa\") " pod="openstack/novaapif81d-account-delete-xfdwn" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.291613 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1bda1a38-71dd-4de4-8e16-b5159a08fdfa-operator-scripts\") pod \"novaapif81d-account-delete-xfdwn\" (UID: \"1bda1a38-71dd-4de4-8e16-b5159a08fdfa\") " pod="openstack/novaapif81d-account-delete-xfdwn" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.307530 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-864fb4756c-xzxnp"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.307861 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-864fb4756c-xzxnp" podUID="042c506a-b65f-4cbd-9ca7-3df0ec55ffa0" containerName="barbican-worker-log" containerID="cri-o://4cbde859414d5230c51f3a896d2c7f2eb514382d4930ae12ebe388430ccf7b95" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.308408 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-864fb4756c-xzxnp" podUID="042c506a-b65f-4cbd-9ca7-3df0ec55ffa0" containerName="barbican-worker" containerID="cri-o://085d654f5685c959e6314e3ffe048bec6afd8a89e45c228a241cbd713de9226c" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: E1123 15:04:23.322565 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="deafd7234d6f0ca5e0fe921dff3e5ef022a4b569f1d6016eec6fa8e609b595e2" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.324078 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vlrvp\" (UniqueName: \"kubernetes.io/projected/1bda1a38-71dd-4de4-8e16-b5159a08fdfa-kube-api-access-vlrvp\") pod \"novaapif81d-account-delete-xfdwn\" (UID: \"1bda1a38-71dd-4de4-8e16-b5159a08fdfa\") " pod="openstack/novaapif81d-account-delete-xfdwn" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.327654 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-77dc855c68-z488p"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.327970 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-77dc855c68-z488p" podUID="f85034c8-6b79-42e1-849d-646a2ead1a93" containerName="barbican-api-log" containerID="cri-o://ec06abb2a42fb30f410f9e5ebcf42f255dbea4392fc4eb791ab3e885ee8c6c15" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.328269 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-77dc855c68-z488p" podUID="f85034c8-6b79-42e1-849d-646a2ead1a93" containerName="barbican-api" containerID="cri-o://ba962ea3ca9a2e41f992ef21c4cd5215480c219a40218ff8e9e9408b4eeec97b" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: E1123 15:04:23.338712 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="deafd7234d6f0ca5e0fe921dff3e5ef022a4b569f1d6016eec6fa8e609b595e2" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.342317 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 23 15:04:23 crc kubenswrapper[5050]: E1123 15:04:23.349014 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="deafd7234d6f0ca5e0fe921dff3e5ef022a4b569f1d6016eec6fa8e609b595e2" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 23 15:04:23 crc kubenswrapper[5050]: E1123 15:04:23.349064 5050 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="27e26dea-41e9-4d40-9adc-f902e5f4791f" containerName="ovn-northd" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.366145 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-586bfdfdd8-99vrd"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.366476 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" podUID="9d53c4b5-d0a1-4191-9239-8c7b6806f860" containerName="barbican-keystone-listener-log" containerID="cri-o://973cb826038668efa7e6c38aa0a38cd28308df3422e590a77aa204d04d4cf352" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.367028 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" podUID="9d53c4b5-d0a1-4191-9239-8c7b6806f860" containerName="barbican-keystone-listener" containerID="cri-o://561dbaa7317e13693d05c4f79f3b0d5138d3e140a5300d28891e26017d5bd81a" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.382023 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.394235 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.399355 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="6c3ae457-67ab-4c0b-a17e-a8264215793b" containerName="nova-api-log" containerID="cri-o://b0a2c2bac24d49f1139383209407c61098dfcccf1627c8ba79ec2b65ec146335" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.401465 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="6c3ae457-67ab-4c0b-a17e-a8264215793b" containerName="nova-api-api" containerID="cri-o://11eb99c3ddd7ac1add778be8c07cea9de7ac571edec1bae0cd0e936ceb624706" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.412239 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-hxh4h"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.437492 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-hxh4h"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.466558 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-94ab-account-create-dt6k2"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.501642 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-n9v8v" podUID="c21366ac-a7da-4cac-8332-753659210595" containerName="ovs-vswitchd" containerID="cri-o://a0820261b299cc33db42946f12f1f4467f892f1598cff4f933ab45def65d8af9" gracePeriod=29 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.502719 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.503068 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43" containerName="nova-metadata-log" containerID="cri-o://bf046462bb12cdebcb5455c61d3b2ad24f5e4aea74396128b7021ca13a57dedf" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.506403 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43" containerName="nova-metadata-metadata" containerID="cri-o://6f245a393065748303acdd84cc3863db9cf884ff2abd3737de04d10232960c78" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.513610 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-94ab-account-create-dt6k2"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.546562 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.547211 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="d3d96734-832d-489e-9fed-a4eb705f41d7" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://dc0413953c30be6f1a27357706432a328f29ebeedb2176f058654bf6f70fcf23" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: E1123 15:04:23.595477 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c0069a68c89482704d61e8c58556300339031d7c4accb976799bb1d562fe9ac9 is running failed: container process not found" containerID="c0069a68c89482704d61e8c58556300339031d7c4accb976799bb1d562fe9ac9" cmd=["/usr/bin/pidof","ovsdb-server"] Nov 23 15:04:23 crc kubenswrapper[5050]: E1123 15:04:23.595698 5050 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Nov 23 15:04:23 crc kubenswrapper[5050]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 23 15:04:23 crc kubenswrapper[5050]: + source /usr/local/bin/container-scripts/functions Nov 23 15:04:23 crc kubenswrapper[5050]: ++ OVNBridge=br-int Nov 23 15:04:23 crc kubenswrapper[5050]: ++ OVNRemote=tcp:localhost:6642 Nov 23 15:04:23 crc kubenswrapper[5050]: ++ OVNEncapType=geneve Nov 23 15:04:23 crc kubenswrapper[5050]: ++ OVNAvailabilityZones= Nov 23 15:04:23 crc kubenswrapper[5050]: ++ EnableChassisAsGateway=true Nov 23 15:04:23 crc kubenswrapper[5050]: ++ PhysicalNetworks= Nov 23 15:04:23 crc kubenswrapper[5050]: ++ OVNHostName= Nov 23 15:04:23 crc kubenswrapper[5050]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 23 15:04:23 crc kubenswrapper[5050]: ++ ovs_dir=/var/lib/openvswitch Nov 23 15:04:23 crc kubenswrapper[5050]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 23 15:04:23 crc kubenswrapper[5050]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 23 15:04:23 crc kubenswrapper[5050]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 23 15:04:23 crc kubenswrapper[5050]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 23 15:04:23 crc kubenswrapper[5050]: + sleep 0.5 Nov 23 15:04:23 crc kubenswrapper[5050]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 23 15:04:23 crc kubenswrapper[5050]: + sleep 0.5 Nov 23 15:04:23 crc kubenswrapper[5050]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 23 15:04:23 crc kubenswrapper[5050]: + cleanup_ovsdb_server_semaphore Nov 23 15:04:23 crc kubenswrapper[5050]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 23 15:04:23 crc kubenswrapper[5050]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 23 15:04:23 crc kubenswrapper[5050]: > execCommand=["/usr/local/bin/container-scripts/stop-ovsdb-server.sh"] containerName="ovsdb-server" pod="openstack/ovn-controller-ovs-n9v8v" message=< Nov 23 15:04:23 crc kubenswrapper[5050]: Exiting ovsdb-server (5) [ OK ] Nov 23 15:04:23 crc kubenswrapper[5050]: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 23 15:04:23 crc kubenswrapper[5050]: + source /usr/local/bin/container-scripts/functions Nov 23 15:04:23 crc kubenswrapper[5050]: ++ OVNBridge=br-int Nov 23 15:04:23 crc kubenswrapper[5050]: ++ OVNRemote=tcp:localhost:6642 Nov 23 15:04:23 crc kubenswrapper[5050]: ++ OVNEncapType=geneve Nov 23 15:04:23 crc kubenswrapper[5050]: ++ OVNAvailabilityZones= Nov 23 15:04:23 crc kubenswrapper[5050]: ++ EnableChassisAsGateway=true Nov 23 15:04:23 crc kubenswrapper[5050]: ++ PhysicalNetworks= Nov 23 15:04:23 crc kubenswrapper[5050]: ++ OVNHostName= Nov 23 15:04:23 crc kubenswrapper[5050]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 23 15:04:23 crc kubenswrapper[5050]: ++ ovs_dir=/var/lib/openvswitch Nov 23 15:04:23 crc kubenswrapper[5050]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 23 15:04:23 crc kubenswrapper[5050]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 23 15:04:23 crc kubenswrapper[5050]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 23 15:04:23 crc kubenswrapper[5050]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 23 15:04:23 crc kubenswrapper[5050]: + sleep 0.5 Nov 23 15:04:23 crc kubenswrapper[5050]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 23 15:04:23 crc kubenswrapper[5050]: + sleep 0.5 Nov 23 15:04:23 crc kubenswrapper[5050]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 23 15:04:23 crc kubenswrapper[5050]: + cleanup_ovsdb_server_semaphore Nov 23 15:04:23 crc kubenswrapper[5050]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 23 15:04:23 crc kubenswrapper[5050]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 23 15:04:23 crc kubenswrapper[5050]: > Nov 23 15:04:23 crc kubenswrapper[5050]: E1123 15:04:23.595729 5050 kuberuntime_container.go:691] "PreStop hook failed" err=< Nov 23 15:04:23 crc kubenswrapper[5050]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 23 15:04:23 crc kubenswrapper[5050]: + source /usr/local/bin/container-scripts/functions Nov 23 15:04:23 crc kubenswrapper[5050]: ++ OVNBridge=br-int Nov 23 15:04:23 crc kubenswrapper[5050]: ++ OVNRemote=tcp:localhost:6642 Nov 23 15:04:23 crc kubenswrapper[5050]: ++ OVNEncapType=geneve Nov 23 15:04:23 crc kubenswrapper[5050]: ++ OVNAvailabilityZones= Nov 23 15:04:23 crc kubenswrapper[5050]: ++ EnableChassisAsGateway=true Nov 23 15:04:23 crc kubenswrapper[5050]: ++ PhysicalNetworks= Nov 23 15:04:23 crc kubenswrapper[5050]: ++ OVNHostName= Nov 23 15:04:23 crc kubenswrapper[5050]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 23 15:04:23 crc kubenswrapper[5050]: ++ ovs_dir=/var/lib/openvswitch Nov 23 15:04:23 crc kubenswrapper[5050]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 23 15:04:23 crc kubenswrapper[5050]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 23 15:04:23 crc kubenswrapper[5050]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 23 15:04:23 crc kubenswrapper[5050]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 23 15:04:23 crc kubenswrapper[5050]: + sleep 0.5 Nov 23 15:04:23 crc kubenswrapper[5050]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 23 15:04:23 crc kubenswrapper[5050]: + sleep 0.5 Nov 23 15:04:23 crc kubenswrapper[5050]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 23 15:04:23 crc kubenswrapper[5050]: + cleanup_ovsdb_server_semaphore Nov 23 15:04:23 crc kubenswrapper[5050]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 23 15:04:23 crc kubenswrapper[5050]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 23 15:04:23 crc kubenswrapper[5050]: > pod="openstack/ovn-controller-ovs-n9v8v" podUID="c21366ac-a7da-4cac-8332-753659210595" containerName="ovsdb-server" containerID="cri-o://1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.595767 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-n9v8v" podUID="c21366ac-a7da-4cac-8332-753659210595" containerName="ovsdb-server" containerID="cri-o://1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8" gracePeriod=29 Nov 23 15:04:23 crc kubenswrapper[5050]: E1123 15:04:23.602916 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c0069a68c89482704d61e8c58556300339031d7c4accb976799bb1d562fe9ac9 is running failed: container process not found" containerID="c0069a68c89482704d61e8c58556300339031d7c4accb976799bb1d562fe9ac9" cmd=["/usr/bin/pidof","ovsdb-server"] Nov 23 15:04:23 crc kubenswrapper[5050]: E1123 15:04:23.604967 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c0069a68c89482704d61e8c58556300339031d7c4accb976799bb1d562fe9ac9 is running failed: container process not found" containerID="c0069a68c89482704d61e8c58556300339031d7c4accb976799bb1d562fe9ac9" cmd=["/usr/bin/pidof","ovsdb-server"] Nov 23 15:04:23 crc kubenswrapper[5050]: E1123 15:04:23.605053 5050 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c0069a68c89482704d61e8c58556300339031d7c4accb976799bb1d562fe9ac9 is running failed: container process not found" probeType="Readiness" pod="openstack/ovsdbserver-sb-0" podUID="f81d4e06-4245-4355-89d6-ddc438c06f29" containerName="ovsdbserver-sb" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.678975 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="5eff539e-c641-4873-aeae-450aaf0b4ac8" containerName="rabbitmq" containerID="cri-o://a9dbba1cde13781acfcbcff23b41231ce6338e634ff0c22cf2de26d62d3a3f34" gracePeriod=604800 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.687829 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3291f014-f18b-4142-a770-d9c33b141d66" path="/var/lib/kubelet/pods/3291f014-f18b-4142-a770-d9c33b141d66/volumes" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.689126 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="344e218a-8f67-40b6-90e6-6970ca14cda3" path="/var/lib/kubelet/pods/344e218a-8f67-40b6-90e6-6970ca14cda3/volumes" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.689712 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ce1b12-d28e-481e-ad05-68355446dd4c" path="/var/lib/kubelet/pods/49ce1b12-d28e-481e-ad05-68355446dd4c/volumes" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.690459 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93d8639a-cdc9-4e9f-a17f-883debe12333" path="/var/lib/kubelet/pods/93d8639a-cdc9-4e9f-a17f-883debe12333/volumes" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.691593 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="948389bb-db2c-40c2-a458-b54cb0fa94ee" path="/var/lib/kubelet/pods/948389bb-db2c-40c2-a458-b54cb0fa94ee/volumes" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.692632 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b347d677-8b98-4402-b7b8-88fe0b080ac0" path="/var/lib/kubelet/pods/b347d677-8b98-4402-b7b8-88fe0b080ac0/volumes" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.694314 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cff4dbdd-ad13-4f74-beb1-9d9658c4b25e" path="/var/lib/kubelet/pods/cff4dbdd-ad13-4f74-beb1-9d9658c4b25e/volumes" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.694925 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8601165-154f-4167-a470-e06481c1944e" path="/var/lib/kubelet/pods/e8601165-154f-4167-a470-e06481c1944e/volumes" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.695654 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4e77f81-6a4b-4c66-8e27-891159e3da45" path="/var/lib/kubelet/pods/f4e77f81-6a4b-4c66-8e27-891159e3da45/volumes" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.696833 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f53cab34-8b0f-45cf-8ef9-a524aa3578b4" path="/var/lib/kubelet/pods/f53cab34-8b0f-45cf-8ef9-a524aa3578b4/volumes" Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.697835 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.698000 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.698017 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-lwv2w"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.698030 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.698047 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-lwv2w"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.698063 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.698346 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="4d1fe4ad-9245-4af5-b378-c908ce72f08c" containerName="nova-cell0-conductor-conductor" containerID="cri-o://42a4a3972d0dfdbfa5a41878495e90a1790378398c3c410fbb952adc25a032b1" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.698813 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="634f01f5-daae-4e5d-811f-5d406bfee9c1" containerName="nova-scheduler-scheduler" containerID="cri-o://2649b8f823f897f77f83477412dd9f8246b7b8e23f72d10846e06d96f7b4bff1" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.699464 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="75e360ca-c84a-4806-a86a-86924a639cfc" containerName="nova-cell1-conductor-conductor" containerID="cri-o://dc18dcf10f3de24a9d22f69583ddef25bdb552099564e7a29e69ffc7a506a441" gracePeriod=30 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.699586 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-dpm95"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.723406 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-dpm95"] Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.793064 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="f92353db-5352-4216-ad2d-50242e58dfb7" containerName="rabbitmq" containerID="cri-o://fbb98bc62f9119abace6a66eef302292172343ff03acadf76beb838abacc4705" gracePeriod=604800 Nov 23 15:04:23 crc kubenswrapper[5050]: E1123 15:04:23.934532 5050 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 23 15:04:23 crc kubenswrapper[5050]: E1123 15:04:23.934688 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-config-data podName:f92353db-5352-4216-ad2d-50242e58dfb7 nodeName:}" failed. No retries permitted until 2025-11-23 15:04:25.934672497 +0000 UTC m=+1361.101668982 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-config-data") pod "rabbitmq-cell1-server-0" (UID: "f92353db-5352-4216-ad2d-50242e58dfb7") : configmap "rabbitmq-cell1-config-data" not found Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.939611 5050 generic.go:334] "Generic (PLEG): container finished" podID="6c5e9b39-91fa-4f4e-9d95-0599bc22472d" containerID="29d8b6a8f6bcb0d52d1fb31bd328b23184faf043db74c7e0325581a2ae92bc41" exitCode=143 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.939934 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6c5e9b39-91fa-4f4e-9d95-0599bc22472d","Type":"ContainerDied","Data":"29d8b6a8f6bcb0d52d1fb31bd328b23184faf043db74c7e0325581a2ae92bc41"} Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.965626 5050 generic.go:334] "Generic (PLEG): container finished" podID="add2a2e2-5553-4e43-8ddd-b8719949d531" containerID="e29903a818a042f29b3e4e52f5b9a3d3f2db969791fe5adce7bb96fbe4dca897" exitCode=143 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.965754 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-748fb7ccf6-s9qnq" event={"ID":"add2a2e2-5553-4e43-8ddd-b8719949d531","Type":"ContainerDied","Data":"e29903a818a042f29b3e4e52f5b9a3d3f2db969791fe5adce7bb96fbe4dca897"} Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.980987 5050 generic.go:334] "Generic (PLEG): container finished" podID="e7a4774e-f304-40ca-8595-bbe9c381f466" containerID="063525e2dec5a92b0de43140afc955072b70de2dd16ff0f882b43cc83a5c5cb3" exitCode=0 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.981126 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" event={"ID":"e7a4774e-f304-40ca-8595-bbe9c381f466","Type":"ContainerDied","Data":"063525e2dec5a92b0de43140afc955072b70de2dd16ff0f882b43cc83a5c5cb3"} Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.982943 5050 generic.go:334] "Generic (PLEG): container finished" podID="042c506a-b65f-4cbd-9ca7-3df0ec55ffa0" containerID="4cbde859414d5230c51f3a896d2c7f2eb514382d4930ae12ebe388430ccf7b95" exitCode=143 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.982984 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-864fb4756c-xzxnp" event={"ID":"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0","Type":"ContainerDied","Data":"4cbde859414d5230c51f3a896d2c7f2eb514382d4930ae12ebe388430ccf7b95"} Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.985893 5050 generic.go:334] "Generic (PLEG): container finished" podID="6c3ae457-67ab-4c0b-a17e-a8264215793b" containerID="b0a2c2bac24d49f1139383209407c61098dfcccf1627c8ba79ec2b65ec146335" exitCode=143 Nov 23 15:04:23 crc kubenswrapper[5050]: I1123 15:04:23.985934 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6c3ae457-67ab-4c0b-a17e-a8264215793b","Type":"ContainerDied","Data":"b0a2c2bac24d49f1139383209407c61098dfcccf1627c8ba79ec2b65ec146335"} Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.004599 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_716a14cd-88b9-4e18-a781-6dbfaad7634c/ovsdbserver-nb/0.log" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.004987 5050 generic.go:334] "Generic (PLEG): container finished" podID="716a14cd-88b9-4e18-a781-6dbfaad7634c" containerID="3249cf8628f145ad70288b99dd53e1eb58e6ee9953f4d2ed143ddefe3d8d6d1c" exitCode=143 Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.005124 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"716a14cd-88b9-4e18-a781-6dbfaad7634c","Type":"ContainerDied","Data":"3249cf8628f145ad70288b99dd53e1eb58e6ee9953f4d2ed143ddefe3d8d6d1c"} Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.005221 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"716a14cd-88b9-4e18-a781-6dbfaad7634c","Type":"ContainerDied","Data":"1eaf8c3d075287d38dfeeb9be67a8d31d35dfb63a94883c205131fdaa82c8b4d"} Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.005237 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1eaf8c3d075287d38dfeeb9be67a8d31d35dfb63a94883c205131fdaa82c8b4d" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.010173 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-cell1-galera-0" podUID="85ca597a-bf71-43bb-b57c-f840b37f196f" containerName="galera" containerID="cri-o://838adec05740ac7159beefeb2323ec2c83ab99b474e2e4e94e47e803accce920" gracePeriod=30 Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.016338 5050 generic.go:334] "Generic (PLEG): container finished" podID="014d6c50-fe8c-4b39-bccd-531037f1ff10" containerID="af27c7343928c8d4c4bff7ed205f0bb16b5e7302d0aa9e3deefadfcb0edf736c" exitCode=137 Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.045832 5050 generic.go:334] "Generic (PLEG): container finished" podID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerID="b22acf79827fd3ac0a34b514be2b00ed37d6b8a1dac44f85032fc97fb4bd5540" exitCode=0 Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.046065 5050 generic.go:334] "Generic (PLEG): container finished" podID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerID="0e36c69aa375c539b26f58a19aaffeb696f806ace3d6046abb778218a238b336" exitCode=0 Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.046142 5050 generic.go:334] "Generic (PLEG): container finished" podID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerID="e0f8af78c1a802c881b2c04585b63daab17211922673261ca7523f7d84b8e187" exitCode=0 Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.046202 5050 generic.go:334] "Generic (PLEG): container finished" podID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerID="8dae616c0d8bc5a23b5cfb9e119b706796daaa7ba5419e533d3fb7176e7bf222" exitCode=0 Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.046266 5050 generic.go:334] "Generic (PLEG): container finished" podID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerID="1fcdee9bec472e24e3ee2fa6e158b77914f69e25bb807a1be9ca536d728a1ddf" exitCode=0 Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.046318 5050 generic.go:334] "Generic (PLEG): container finished" podID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerID="53cc065da29b9c9511ae76a606c6be3934715316c98087e1bceb4aedfb391ba5" exitCode=0 Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.046368 5050 generic.go:334] "Generic (PLEG): container finished" podID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerID="f9d2cfcc4ee11e5c2631dbc7f3cfcfc91553670b329dfc79c70030ffc3518f25" exitCode=0 Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.046417 5050 generic.go:334] "Generic (PLEG): container finished" podID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerID="9d6c10414c5bea42690cbd8da8cba7bb0cf669191242a30c4e88b58e7464d151" exitCode=0 Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.046493 5050 generic.go:334] "Generic (PLEG): container finished" podID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerID="b4f43cb835f2029a79a141c233333ef0e2ea5ee6f6d05bd0c246b7323bb38310" exitCode=0 Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.046553 5050 generic.go:334] "Generic (PLEG): container finished" podID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerID="f9e7a939140c48c7a80ae5b9b609b9b098e6c60e33ce09927c76177950647981" exitCode=0 Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.046612 5050 generic.go:334] "Generic (PLEG): container finished" podID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerID="1cec0ebe1304d052bb25cc3f36043c71b0777d6f8cbf3b9d8a22f1e057b1fddb" exitCode=0 Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.046670 5050 generic.go:334] "Generic (PLEG): container finished" podID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerID="9a3ab1117cae597494105d7c65afeef7161729ed4e56b6205f5b1b5b9c2c0980" exitCode=0 Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.046726 5050 generic.go:334] "Generic (PLEG): container finished" podID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerID="4211198ac477c9fe45e3ef0bd5dffc47dd9911807782e0d46558842b2d2b48fd" exitCode=0 Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.046783 5050 generic.go:334] "Generic (PLEG): container finished" podID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerID="0001c3500cc85fc0a925e13354d27829e719cc2b2c22f8ba291e2016b3e3b365" exitCode=0 Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.046885 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerDied","Data":"b22acf79827fd3ac0a34b514be2b00ed37d6b8a1dac44f85032fc97fb4bd5540"} Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.046979 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerDied","Data":"0e36c69aa375c539b26f58a19aaffeb696f806ace3d6046abb778218a238b336"} Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.047046 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerDied","Data":"e0f8af78c1a802c881b2c04585b63daab17211922673261ca7523f7d84b8e187"} Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.047161 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerDied","Data":"8dae616c0d8bc5a23b5cfb9e119b706796daaa7ba5419e533d3fb7176e7bf222"} Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.047235 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerDied","Data":"1fcdee9bec472e24e3ee2fa6e158b77914f69e25bb807a1be9ca536d728a1ddf"} Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.047306 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerDied","Data":"53cc065da29b9c9511ae76a606c6be3934715316c98087e1bceb4aedfb391ba5"} Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.047401 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerDied","Data":"f9d2cfcc4ee11e5c2631dbc7f3cfcfc91553670b329dfc79c70030ffc3518f25"} Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.047479 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerDied","Data":"9d6c10414c5bea42690cbd8da8cba7bb0cf669191242a30c4e88b58e7464d151"} Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.047539 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerDied","Data":"b4f43cb835f2029a79a141c233333ef0e2ea5ee6f6d05bd0c246b7323bb38310"} Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.047614 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerDied","Data":"f9e7a939140c48c7a80ae5b9b609b9b098e6c60e33ce09927c76177950647981"} Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.047672 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerDied","Data":"1cec0ebe1304d052bb25cc3f36043c71b0777d6f8cbf3b9d8a22f1e057b1fddb"} Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.047739 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerDied","Data":"9a3ab1117cae597494105d7c65afeef7161729ed4e56b6205f5b1b5b9c2c0980"} Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.047809 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerDied","Data":"4211198ac477c9fe45e3ef0bd5dffc47dd9911807782e0d46558842b2d2b48fd"} Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.047873 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerDied","Data":"0001c3500cc85fc0a925e13354d27829e719cc2b2c22f8ba291e2016b3e3b365"} Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.056311 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-22cnp_4a7ed50c-3990-423d-9fd6-1165da59c5c9/openstack-network-exporter/0.log" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.056421 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-22cnp" event={"ID":"4a7ed50c-3990-423d-9fd6-1165da59c5c9","Type":"ContainerDied","Data":"429b35bcdb10b05c74108505605b277102d3dfbca26dcd7d7d00ef19354c7d9b"} Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.056488 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="429b35bcdb10b05c74108505605b277102d3dfbca26dcd7d7d00ef19354c7d9b" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.061061 5050 generic.go:334] "Generic (PLEG): container finished" podID="1b3a1560-8762-47b9-8d1c-fe94eb46bec2" containerID="8b5d3ae6f2b8800aa1cfd0b5a953b873c14fdd59195485538c97159fa050f837" exitCode=0 Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.061121 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7cb7995f89-k8h9t" event={"ID":"1b3a1560-8762-47b9-8d1c-fe94eb46bec2","Type":"ContainerDied","Data":"8b5d3ae6f2b8800aa1cfd0b5a953b873c14fdd59195485538c97159fa050f837"} Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.064286 5050 generic.go:334] "Generic (PLEG): container finished" podID="4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322" containerID="9f6dbbcaa59ff563c11d0cdfecbaa890da1c52e228d3d8a74b64182f665c6508" exitCode=143 Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.064518 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322","Type":"ContainerDied","Data":"9f6dbbcaa59ff563c11d0cdfecbaa890da1c52e228d3d8a74b64182f665c6508"} Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.077594 5050 generic.go:334] "Generic (PLEG): container finished" podID="8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43" containerID="bf046462bb12cdebcb5455c61d3b2ad24f5e4aea74396128b7021ca13a57dedf" exitCode=143 Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.077672 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43","Type":"ContainerDied","Data":"bf046462bb12cdebcb5455c61d3b2ad24f5e4aea74396128b7021ca13a57dedf"} Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.082140 5050 generic.go:334] "Generic (PLEG): container finished" podID="f85034c8-6b79-42e1-849d-646a2ead1a93" containerID="ec06abb2a42fb30f410f9e5ebcf42f255dbea4392fc4eb791ab3e885ee8c6c15" exitCode=143 Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.082209 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-77dc855c68-z488p" event={"ID":"f85034c8-6b79-42e1-849d-646a2ead1a93","Type":"ContainerDied","Data":"ec06abb2a42fb30f410f9e5ebcf42f255dbea4392fc4eb791ab3e885ee8c6c15"} Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.092375 5050 generic.go:334] "Generic (PLEG): container finished" podID="c21366ac-a7da-4cac-8332-753659210595" containerID="1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8" exitCode=0 Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.092566 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-n9v8v" event={"ID":"c21366ac-a7da-4cac-8332-753659210595","Type":"ContainerDied","Data":"1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8"} Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.100741 5050 generic.go:334] "Generic (PLEG): container finished" podID="ba1bb9f6-5ca2-4986-9473-62d50d9bebf0" containerID="878b763faeb721f2171524629a6e423a6579824e5ba28a08437820030804f60f" exitCode=143 Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.100838 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0","Type":"ContainerDied","Data":"878b763faeb721f2171524629a6e423a6579824e5ba28a08437820030804f60f"} Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.112818 5050 generic.go:334] "Generic (PLEG): container finished" podID="9d53c4b5-d0a1-4191-9239-8c7b6806f860" containerID="973cb826038668efa7e6c38aa0a38cd28308df3422e590a77aa204d04d4cf352" exitCode=143 Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.112895 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" event={"ID":"9d53c4b5-d0a1-4191-9239-8c7b6806f860","Type":"ContainerDied","Data":"973cb826038668efa7e6c38aa0a38cd28308df3422e590a77aa204d04d4cf352"} Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.168810 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican6ffa-account-delete-7wsfp"] Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.250329 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glancebb96-account-delete-qls8c" Nov 23 15:04:24 crc kubenswrapper[5050]: W1123 15:04:24.263928 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9ea4c6e2_e26b_454f_a6e1_b8fd7df8a931.slice/crio-da2d7fab422dd01a4c5dd295b8674310420b84ecfd81a0d1cdb28d3cbddda867 WatchSource:0}: Error finding container da2d7fab422dd01a4c5dd295b8674310420b84ecfd81a0d1cdb28d3cbddda867: Status 404 returned error can't find the container with id da2d7fab422dd01a4c5dd295b8674310420b84ecfd81a0d1cdb28d3cbddda867 Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.280485 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder255c-account-delete-4znlv"] Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.287324 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron45e2-account-delete-8sq6l"] Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.305082 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement734b-account-delete-zt8zp"] Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.403413 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell03052-account-delete-kz7k7" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.418437 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-6b854f87-drlhc"] Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.418555 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novaapif81d-account-delete-xfdwn" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.418813 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-6b854f87-drlhc" podUID="c1187c9d-5557-496e-be1d-8df301d6daa6" containerName="proxy-httpd" containerID="cri-o://15153a6ddd26bb52ffe675fc4511dfe431c750717734e0d53c6df64c01657ed6" gracePeriod=30 Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.418975 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-6b854f87-drlhc" podUID="c1187c9d-5557-496e-be1d-8df301d6daa6" containerName="proxy-server" containerID="cri-o://ddcc4b1b60ec1caba00533bfdcca9aabeb296bd155561a69a9313a277fd7f548" gracePeriod=30 Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.437122 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-22cnp_4a7ed50c-3990-423d-9fd6-1165da59c5c9/openstack-network-exporter/0.log" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.437210 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-22cnp" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.462527 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_716a14cd-88b9-4e18-a781-6dbfaad7634c/ovsdbserver-nb/0.log" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.462632 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.520094 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.566340 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/716a14cd-88b9-4e18-a781-6dbfaad7634c-metrics-certs-tls-certs\") pod \"716a14cd-88b9-4e18-a781-6dbfaad7634c\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.566586 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c6t6p\" (UniqueName: \"kubernetes.io/projected/716a14cd-88b9-4e18-a781-6dbfaad7634c-kube-api-access-c6t6p\") pod \"716a14cd-88b9-4e18-a781-6dbfaad7634c\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.566759 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/4a7ed50c-3990-423d-9fd6-1165da59c5c9-ovs-rundir\") pod \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\" (UID: \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.566908 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/4a7ed50c-3990-423d-9fd6-1165da59c5c9-ovn-rundir\") pod \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\" (UID: \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.567054 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a7ed50c-3990-423d-9fd6-1165da59c5c9-combined-ca-bundle\") pod \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\" (UID: \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.567264 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/716a14cd-88b9-4e18-a781-6dbfaad7634c-combined-ca-bundle\") pod \"716a14cd-88b9-4e18-a781-6dbfaad7634c\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.567433 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a7ed50c-3990-423d-9fd6-1165da59c5c9-config\") pod \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\" (UID: \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.567615 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/716a14cd-88b9-4e18-a781-6dbfaad7634c-config\") pod \"716a14cd-88b9-4e18-a781-6dbfaad7634c\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.567798 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-nb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"716a14cd-88b9-4e18-a781-6dbfaad7634c\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.567966 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/716a14cd-88b9-4e18-a781-6dbfaad7634c-scripts\") pod \"716a14cd-88b9-4e18-a781-6dbfaad7634c\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.568101 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/716a14cd-88b9-4e18-a781-6dbfaad7634c-ovsdb-rundir\") pod \"716a14cd-88b9-4e18-a781-6dbfaad7634c\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.568187 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a7ed50c-3990-423d-9fd6-1165da59c5c9-metrics-certs-tls-certs\") pod \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\" (UID: \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.568304 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/716a14cd-88b9-4e18-a781-6dbfaad7634c-ovsdbserver-nb-tls-certs\") pod \"716a14cd-88b9-4e18-a781-6dbfaad7634c\" (UID: \"716a14cd-88b9-4e18-a781-6dbfaad7634c\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.568402 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-np29s\" (UniqueName: \"kubernetes.io/projected/4a7ed50c-3990-423d-9fd6-1165da59c5c9-kube-api-access-np29s\") pod \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\" (UID: \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.570614 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4a7ed50c-3990-423d-9fd6-1165da59c5c9-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "4a7ed50c-3990-423d-9fd6-1165da59c5c9" (UID: "4a7ed50c-3990-423d-9fd6-1165da59c5c9"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.570703 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4a7ed50c-3990-423d-9fd6-1165da59c5c9-ovs-rundir" (OuterVolumeSpecName: "ovs-rundir") pod "4a7ed50c-3990-423d-9fd6-1165da59c5c9" (UID: "4a7ed50c-3990-423d-9fd6-1165da59c5c9"). InnerVolumeSpecName "ovs-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.570978 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4a7ed50c-3990-423d-9fd6-1165da59c5c9-config" (OuterVolumeSpecName: "config") pod "4a7ed50c-3990-423d-9fd6-1165da59c5c9" (UID: "4a7ed50c-3990-423d-9fd6-1165da59c5c9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.571325 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/716a14cd-88b9-4e18-a781-6dbfaad7634c-config" (OuterVolumeSpecName: "config") pod "716a14cd-88b9-4e18-a781-6dbfaad7634c" (UID: "716a14cd-88b9-4e18-a781-6dbfaad7634c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.575125 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/716a14cd-88b9-4e18-a781-6dbfaad7634c-scripts" (OuterVolumeSpecName: "scripts") pod "716a14cd-88b9-4e18-a781-6dbfaad7634c" (UID: "716a14cd-88b9-4e18-a781-6dbfaad7634c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.575229 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/716a14cd-88b9-4e18-a781-6dbfaad7634c-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "716a14cd-88b9-4e18-a781-6dbfaad7634c" (UID: "716a14cd-88b9-4e18-a781-6dbfaad7634c"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.585489 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a7ed50c-3990-423d-9fd6-1165da59c5c9-kube-api-access-np29s" (OuterVolumeSpecName: "kube-api-access-np29s") pod "4a7ed50c-3990-423d-9fd6-1165da59c5c9" (UID: "4a7ed50c-3990-423d-9fd6-1165da59c5c9"). InnerVolumeSpecName "kube-api-access-np29s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.593052 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "ovndbcluster-nb-etc-ovn") pod "716a14cd-88b9-4e18-a781-6dbfaad7634c" (UID: "716a14cd-88b9-4e18-a781-6dbfaad7634c"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.607137 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/716a14cd-88b9-4e18-a781-6dbfaad7634c-kube-api-access-c6t6p" (OuterVolumeSpecName: "kube-api-access-c6t6p") pod "716a14cd-88b9-4e18-a781-6dbfaad7634c" (UID: "716a14cd-88b9-4e18-a781-6dbfaad7634c"). InnerVolumeSpecName "kube-api-access-c6t6p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.610160 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.648840 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_f81d4e06-4245-4355-89d6-ddc438c06f29/ovsdbserver-sb/0.log" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.650132 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.656879 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/716a14cd-88b9-4e18-a781-6dbfaad7634c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "716a14cd-88b9-4e18-a781-6dbfaad7634c" (UID: "716a14cd-88b9-4e18-a781-6dbfaad7634c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.671631 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/014d6c50-fe8c-4b39-bccd-531037f1ff10-combined-ca-bundle\") pod \"014d6c50-fe8c-4b39-bccd-531037f1ff10\" (UID: \"014d6c50-fe8c-4b39-bccd-531037f1ff10\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.671690 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/014d6c50-fe8c-4b39-bccd-531037f1ff10-openstack-config-secret\") pod \"014d6c50-fe8c-4b39-bccd-531037f1ff10\" (UID: \"014d6c50-fe8c-4b39-bccd-531037f1ff10\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.671852 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-srl5p\" (UniqueName: \"kubernetes.io/projected/014d6c50-fe8c-4b39-bccd-531037f1ff10-kube-api-access-srl5p\") pod \"014d6c50-fe8c-4b39-bccd-531037f1ff10\" (UID: \"014d6c50-fe8c-4b39-bccd-531037f1ff10\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.672000 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/014d6c50-fe8c-4b39-bccd-531037f1ff10-openstack-config\") pod \"014d6c50-fe8c-4b39-bccd-531037f1ff10\" (UID: \"014d6c50-fe8c-4b39-bccd-531037f1ff10\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.678781 5050 reconciler_common.go:293] "Volume detached for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/4a7ed50c-3990-423d-9fd6-1165da59c5c9-ovs-rundir\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.678820 5050 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/4a7ed50c-3990-423d-9fd6-1165da59c5c9-ovn-rundir\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.678831 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/716a14cd-88b9-4e18-a781-6dbfaad7634c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.678843 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a7ed50c-3990-423d-9fd6-1165da59c5c9-config\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.678854 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/716a14cd-88b9-4e18-a781-6dbfaad7634c-config\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.678881 5050 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.678892 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/716a14cd-88b9-4e18-a781-6dbfaad7634c-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.678901 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/716a14cd-88b9-4e18-a781-6dbfaad7634c-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.678913 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-np29s\" (UniqueName: \"kubernetes.io/projected/4a7ed50c-3990-423d-9fd6-1165da59c5c9-kube-api-access-np29s\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.678923 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c6t6p\" (UniqueName: \"kubernetes.io/projected/716a14cd-88b9-4e18-a781-6dbfaad7634c-kube-api-access-c6t6p\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.728259 5050 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.747383 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/014d6c50-fe8c-4b39-bccd-531037f1ff10-kube-api-access-srl5p" (OuterVolumeSpecName: "kube-api-access-srl5p") pod "014d6c50-fe8c-4b39-bccd-531037f1ff10" (UID: "014d6c50-fe8c-4b39-bccd-531037f1ff10"). InnerVolumeSpecName "kube-api-access-srl5p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.799329 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/014d6c50-fe8c-4b39-bccd-531037f1ff10-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "014d6c50-fe8c-4b39-bccd-531037f1ff10" (UID: "014d6c50-fe8c-4b39-bccd-531037f1ff10"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.799590 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a7ed50c-3990-423d-9fd6-1165da59c5c9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4a7ed50c-3990-423d-9fd6-1165da59c5c9" (UID: "4a7ed50c-3990-423d-9fd6-1165da59c5c9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.800400 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-dns-swift-storage-0\") pod \"e7a4774e-f304-40ca-8595-bbe9c381f466\" (UID: \"e7a4774e-f304-40ca-8595-bbe9c381f466\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.800530 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6n2xs\" (UniqueName: \"kubernetes.io/projected/f81d4e06-4245-4355-89d6-ddc438c06f29-kube-api-access-6n2xs\") pod \"f81d4e06-4245-4355-89d6-ddc438c06f29\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.800594 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-ovsdbserver-nb\") pod \"e7a4774e-f304-40ca-8595-bbe9c381f466\" (UID: \"e7a4774e-f304-40ca-8595-bbe9c381f466\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.825002 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/014d6c50-fe8c-4b39-bccd-531037f1ff10-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "014d6c50-fe8c-4b39-bccd-531037f1ff10" (UID: "014d6c50-fe8c-4b39-bccd-531037f1ff10"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.831274 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f81d4e06-4245-4355-89d6-ddc438c06f29-kube-api-access-6n2xs" (OuterVolumeSpecName: "kube-api-access-6n2xs") pod "f81d4e06-4245-4355-89d6-ddc438c06f29" (UID: "f81d4e06-4245-4355-89d6-ddc438c06f29"). InnerVolumeSpecName "kube-api-access-6n2xs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.834762 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f81d4e06-4245-4355-89d6-ddc438c06f29-scripts\") pod \"f81d4e06-4245-4355-89d6-ddc438c06f29\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.834816 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-dns-svc\") pod \"e7a4774e-f304-40ca-8595-bbe9c381f466\" (UID: \"e7a4774e-f304-40ca-8595-bbe9c381f466\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.834842 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f81d4e06-4245-4355-89d6-ddc438c06f29-combined-ca-bundle\") pod \"f81d4e06-4245-4355-89d6-ddc438c06f29\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.834891 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-ovsdbserver-sb\") pod \"e7a4774e-f304-40ca-8595-bbe9c381f466\" (UID: \"e7a4774e-f304-40ca-8595-bbe9c381f466\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.842809 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f81d4e06-4245-4355-89d6-ddc438c06f29-scripts" (OuterVolumeSpecName: "scripts") pod "f81d4e06-4245-4355-89d6-ddc438c06f29" (UID: "f81d4e06-4245-4355-89d6-ddc438c06f29"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.843619 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-sb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"f81d4e06-4245-4355-89d6-ddc438c06f29\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.843686 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f81d4e06-4245-4355-89d6-ddc438c06f29-metrics-certs-tls-certs\") pod \"f81d4e06-4245-4355-89d6-ddc438c06f29\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.843725 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f81d4e06-4245-4355-89d6-ddc438c06f29-ovsdb-rundir\") pod \"f81d4e06-4245-4355-89d6-ddc438c06f29\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.843840 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nxqvj\" (UniqueName: \"kubernetes.io/projected/e7a4774e-f304-40ca-8595-bbe9c381f466-kube-api-access-nxqvj\") pod \"e7a4774e-f304-40ca-8595-bbe9c381f466\" (UID: \"e7a4774e-f304-40ca-8595-bbe9c381f466\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.843909 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f81d4e06-4245-4355-89d6-ddc438c06f29-ovsdbserver-sb-tls-certs\") pod \"f81d4e06-4245-4355-89d6-ddc438c06f29\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.843957 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a7ed50c-3990-423d-9fd6-1165da59c5c9-combined-ca-bundle\") pod \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\" (UID: \"4a7ed50c-3990-423d-9fd6-1165da59c5c9\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.843993 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f81d4e06-4245-4355-89d6-ddc438c06f29-config\") pod \"f81d4e06-4245-4355-89d6-ddc438c06f29\" (UID: \"f81d4e06-4245-4355-89d6-ddc438c06f29\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.844049 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/014d6c50-fe8c-4b39-bccd-531037f1ff10-openstack-config\") pod \"014d6c50-fe8c-4b39-bccd-531037f1ff10\" (UID: \"014d6c50-fe8c-4b39-bccd-531037f1ff10\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.844116 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-config\") pod \"e7a4774e-f304-40ca-8595-bbe9c381f466\" (UID: \"e7a4774e-f304-40ca-8595-bbe9c381f466\") " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.845326 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6n2xs\" (UniqueName: \"kubernetes.io/projected/f81d4e06-4245-4355-89d6-ddc438c06f29-kube-api-access-6n2xs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.845363 5050 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.845380 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/014d6c50-fe8c-4b39-bccd-531037f1ff10-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.845394 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f81d4e06-4245-4355-89d6-ddc438c06f29-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.845408 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-srl5p\" (UniqueName: \"kubernetes.io/projected/014d6c50-fe8c-4b39-bccd-531037f1ff10-kube-api-access-srl5p\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:24 crc kubenswrapper[5050]: W1123 15:04:24.853885 5050 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/014d6c50-fe8c-4b39-bccd-531037f1ff10/volumes/kubernetes.io~configmap/openstack-config Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.853944 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/014d6c50-fe8c-4b39-bccd-531037f1ff10-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "014d6c50-fe8c-4b39-bccd-531037f1ff10" (UID: "014d6c50-fe8c-4b39-bccd-531037f1ff10"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.858548 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f81d4e06-4245-4355-89d6-ddc438c06f29-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "f81d4e06-4245-4355-89d6-ddc438c06f29" (UID: "f81d4e06-4245-4355-89d6-ddc438c06f29"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:04:24 crc kubenswrapper[5050]: W1123 15:04:24.858654 5050 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/4a7ed50c-3990-423d-9fd6-1165da59c5c9/volumes/kubernetes.io~secret/combined-ca-bundle Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.858662 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a7ed50c-3990-423d-9fd6-1165da59c5c9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4a7ed50c-3990-423d-9fd6-1165da59c5c9" (UID: "4a7ed50c-3990-423d-9fd6-1165da59c5c9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.862728 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f81d4e06-4245-4355-89d6-ddc438c06f29-config" (OuterVolumeSpecName: "config") pod "f81d4e06-4245-4355-89d6-ddc438c06f29" (UID: "f81d4e06-4245-4355-89d6-ddc438c06f29"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.877863 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "ovndbcluster-sb-etc-ovn") pod "f81d4e06-4245-4355-89d6-ddc438c06f29" (UID: "f81d4e06-4245-4355-89d6-ddc438c06f29"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.881854 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7a4774e-f304-40ca-8595-bbe9c381f466-kube-api-access-nxqvj" (OuterVolumeSpecName: "kube-api-access-nxqvj") pod "e7a4774e-f304-40ca-8595-bbe9c381f466" (UID: "e7a4774e-f304-40ca-8595-bbe9c381f466"). InnerVolumeSpecName "kube-api-access-nxqvj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.950632 5050 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.951107 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f81d4e06-4245-4355-89d6-ddc438c06f29-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.951119 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nxqvj\" (UniqueName: \"kubernetes.io/projected/e7a4774e-f304-40ca-8595-bbe9c381f466-kube-api-access-nxqvj\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.951130 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a7ed50c-3990-423d-9fd6-1165da59c5c9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.951139 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f81d4e06-4245-4355-89d6-ddc438c06f29-config\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.951148 5050 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/014d6c50-fe8c-4b39-bccd-531037f1ff10-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:24 crc kubenswrapper[5050]: E1123 15:04:24.951618 5050 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 23 15:04:24 crc kubenswrapper[5050]: E1123 15:04:24.951715 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-config-data podName:5eff539e-c641-4873-aeae-450aaf0b4ac8 nodeName:}" failed. No retries permitted until 2025-11-23 15:04:28.951686161 +0000 UTC m=+1364.118682646 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-config-data") pod "rabbitmq-server-0" (UID: "5eff539e-c641-4873-aeae-450aaf0b4ac8") : configmap "rabbitmq-config-data" not found Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.991759 5050 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 23 15:04:24 crc kubenswrapper[5050]: I1123 15:04:24.991876 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a7ed50c-3990-423d-9fd6-1165da59c5c9-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "4a7ed50c-3990-423d-9fd6-1165da59c5c9" (UID: "4a7ed50c-3990-423d-9fd6-1165da59c5c9"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.054771 5050 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a7ed50c-3990-423d-9fd6-1165da59c5c9-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.054823 5050 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.088250 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e7a4774e-f304-40ca-8595-bbe9c381f466" (UID: "e7a4774e-f304-40ca-8595-bbe9c381f466"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.097741 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/014d6c50-fe8c-4b39-bccd-531037f1ff10-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "014d6c50-fe8c-4b39-bccd-531037f1ff10" (UID: "014d6c50-fe8c-4b39-bccd-531037f1ff10"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.101202 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e7a4774e-f304-40ca-8595-bbe9c381f466" (UID: "e7a4774e-f304-40ca-8595-bbe9c381f466"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.135334 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/716a14cd-88b9-4e18-a781-6dbfaad7634c-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "716a14cd-88b9-4e18-a781-6dbfaad7634c" (UID: "716a14cd-88b9-4e18-a781-6dbfaad7634c"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.137025 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f81d4e06-4245-4355-89d6-ddc438c06f29-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f81d4e06-4245-4355-89d6-ddc438c06f29" (UID: "f81d4e06-4245-4355-89d6-ddc438c06f29"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.139114 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-cell1-novncproxy-0" podUID="d3d96734-832d-489e-9fed-a4eb705f41d7" containerName="nova-cell1-novncproxy-novncproxy" probeResult="failure" output="Get \"https://10.217.0.197:6080/vnc_lite.html\": dial tcp 10.217.0.197:6080: connect: connection refused" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.143490 5050 generic.go:334] "Generic (PLEG): container finished" podID="d3d96734-832d-489e-9fed-a4eb705f41d7" containerID="dc0413953c30be6f1a27357706432a328f29ebeedb2176f058654bf6f70fcf23" exitCode=0 Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.143571 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d3d96734-832d-489e-9fed-a4eb705f41d7","Type":"ContainerDied","Data":"dc0413953c30be6f1a27357706432a328f29ebeedb2176f058654bf6f70fcf23"} Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.146096 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.150778 5050 scope.go:117] "RemoveContainer" containerID="af27c7343928c8d4c4bff7ed205f0bb16b5e7302d0aa9e3deefadfcb0edf736c" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.160026 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" event={"ID":"e7a4774e-f304-40ca-8595-bbe9c381f466","Type":"ContainerDied","Data":"7ed36c96e2576c13dab1973b1bc4f2e61749170e1aa055bd02a656954ba7520c"} Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.160113 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-vptxh" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.165517 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.165572 5050 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/014d6c50-fe8c-4b39-bccd-531037f1ff10-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.165589 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f81d4e06-4245-4355-89d6-ddc438c06f29-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.165604 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.165616 5050 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/716a14cd-88b9-4e18-a781-6dbfaad7634c-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.179888 5050 generic.go:334] "Generic (PLEG): container finished" podID="c1187c9d-5557-496e-be1d-8df301d6daa6" containerID="15153a6ddd26bb52ffe675fc4511dfe431c750717734e0d53c6df64c01657ed6" exitCode=0 Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.180169 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6b854f87-drlhc" event={"ID":"c1187c9d-5557-496e-be1d-8df301d6daa6","Type":"ContainerDied","Data":"15153a6ddd26bb52ffe675fc4511dfe431c750717734e0d53c6df64c01657ed6"} Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.194809 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_f81d4e06-4245-4355-89d6-ddc438c06f29/ovsdbserver-sb/0.log" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.195021 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"f81d4e06-4245-4355-89d6-ddc438c06f29","Type":"ContainerDied","Data":"26dfadc19806d4c9b9a23efca729410607729d3d5613fa898d8137bbcd345102"} Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.195120 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.199973 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder255c-account-delete-4znlv" event={"ID":"e78eb875-17e5-431f-82b5-14a1190488b0","Type":"ContainerStarted","Data":"a376d0ead1564a8504ad61643e1d45fd22031515ddaddc6c2c82994faf9e51b4"} Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.209103 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/716a14cd-88b9-4e18-a781-6dbfaad7634c-ovsdbserver-nb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-nb-tls-certs") pod "716a14cd-88b9-4e18-a781-6dbfaad7634c" (UID: "716a14cd-88b9-4e18-a781-6dbfaad7634c"). InnerVolumeSpecName "ovsdbserver-nb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.211042 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-config" (OuterVolumeSpecName: "config") pod "e7a4774e-f304-40ca-8595-bbe9c381f466" (UID: "e7a4774e-f304-40ca-8595-bbe9c381f466"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.216210 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican6ffa-account-delete-7wsfp" event={"ID":"4b570632-90a0-4fcd-a067-9913b51ad711","Type":"ContainerStarted","Data":"5ab05a12fa19db392ca4e60409469274cc565dcc9c3b508b3ef8ad77d4e5f63b"} Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.216277 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican6ffa-account-delete-7wsfp" event={"ID":"4b570632-90a0-4fcd-a067-9913b51ad711","Type":"ContainerStarted","Data":"b90b14193dc4172d78e6891a4096b09ad12467704d70cbc1bef0055816bb921e"} Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.226880 5050 generic.go:334] "Generic (PLEG): container finished" podID="85ca597a-bf71-43bb-b57c-f840b37f196f" containerID="838adec05740ac7159beefeb2323ec2c83ab99b474e2e4e94e47e803accce920" exitCode=0 Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.226958 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"85ca597a-bf71-43bb-b57c-f840b37f196f","Type":"ContainerDied","Data":"838adec05740ac7159beefeb2323ec2c83ab99b474e2e4e94e47e803accce920"} Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.232827 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron45e2-account-delete-8sq6l" event={"ID":"9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931","Type":"ContainerStarted","Data":"da2d7fab422dd01a4c5dd295b8674310420b84ecfd81a0d1cdb28d3cbddda867"} Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.254064 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glancebb96-account-delete-qls8c"] Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.254227 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement734b-account-delete-zt8zp" event={"ID":"3d3a2397-697f-4d1b-a5ed-0aaa1b202a12","Type":"ContainerStarted","Data":"bf90f8d814c7f6ab836037c9af40b1c9c08c6873fea4b44fead56814d8fa63e2"} Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.254583 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican6ffa-account-delete-7wsfp" podStartSLOduration=4.254555069 podStartE2EDuration="4.254555069s" podCreationTimestamp="2025-11-23 15:04:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:04:25.247533102 +0000 UTC m=+1360.414529587" watchObservedRunningTime="2025-11-23 15:04:25.254555069 +0000 UTC m=+1360.421551564" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.260283 5050 generic.go:334] "Generic (PLEG): container finished" podID="9d53c4b5-d0a1-4191-9239-8c7b6806f860" containerID="561dbaa7317e13693d05c4f79f3b0d5138d3e140a5300d28891e26017d5bd81a" exitCode=0 Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.260418 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" event={"ID":"9d53c4b5-d0a1-4191-9239-8c7b6806f860","Type":"ContainerDied","Data":"561dbaa7317e13693d05c4f79f3b0d5138d3e140a5300d28891e26017d5bd81a"} Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.270056 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/716a14cd-88b9-4e18-a781-6dbfaad7634c-ovsdbserver-nb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.270091 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-config\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.278647 5050 generic.go:334] "Generic (PLEG): container finished" podID="ba4a4a57-47b0-423d-8bb1-76953fb3a37b" containerID="ff0e97bc91a901d22909a988b2e36c290f89a2317abfa813abba814502aa0edc" exitCode=0 Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.278766 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.278756 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ba4a4a57-47b0-423d-8bb1-76953fb3a37b","Type":"ContainerDied","Data":"ff0e97bc91a901d22909a988b2e36c290f89a2317abfa813abba814502aa0edc"} Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.278832 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-22cnp" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.279381 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-7cb7995f89-k8h9t" podUID="1b3a1560-8762-47b9-8d1c-fe94eb46bec2" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.154:9696/\": dial tcp 10.217.0.154:9696: connect: connection refused" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.314909 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.347592 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-22cnp"] Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.368990 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e7a4774e-f304-40ca-8595-bbe9c381f466" (UID: "e7a4774e-f304-40ca-8595-bbe9c381f466"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.369908 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-metrics-22cnp"] Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.372371 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d53c4b5-d0a1-4191-9239-8c7b6806f860-config-data\") pod \"9d53c4b5-d0a1-4191-9239-8c7b6806f860\" (UID: \"9d53c4b5-d0a1-4191-9239-8c7b6806f860\") " Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.372496 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9d53c4b5-d0a1-4191-9239-8c7b6806f860-config-data-custom\") pod \"9d53c4b5-d0a1-4191-9239-8c7b6806f860\" (UID: \"9d53c4b5-d0a1-4191-9239-8c7b6806f860\") " Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.372703 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d53c4b5-d0a1-4191-9239-8c7b6806f860-logs\") pod \"9d53c4b5-d0a1-4191-9239-8c7b6806f860\" (UID: \"9d53c4b5-d0a1-4191-9239-8c7b6806f860\") " Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.372869 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d53c4b5-d0a1-4191-9239-8c7b6806f860-combined-ca-bundle\") pod \"9d53c4b5-d0a1-4191-9239-8c7b6806f860\" (UID: \"9d53c4b5-d0a1-4191-9239-8c7b6806f860\") " Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.372963 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-87nhx\" (UniqueName: \"kubernetes.io/projected/9d53c4b5-d0a1-4191-9239-8c7b6806f860-kube-api-access-87nhx\") pod \"9d53c4b5-d0a1-4191-9239-8c7b6806f860\" (UID: \"9d53c4b5-d0a1-4191-9239-8c7b6806f860\") " Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.377729 5050 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.379030 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d53c4b5-d0a1-4191-9239-8c7b6806f860-logs" (OuterVolumeSpecName: "logs") pod "9d53c4b5-d0a1-4191-9239-8c7b6806f860" (UID: "9d53c4b5-d0a1-4191-9239-8c7b6806f860"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.405720 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d53c4b5-d0a1-4191-9239-8c7b6806f860-kube-api-access-87nhx" (OuterVolumeSpecName: "kube-api-access-87nhx") pod "9d53c4b5-d0a1-4191-9239-8c7b6806f860" (UID: "9d53c4b5-d0a1-4191-9239-8c7b6806f860"). InnerVolumeSpecName "kube-api-access-87nhx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.425188 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.432876 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.443898 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d53c4b5-d0a1-4191-9239-8c7b6806f860-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "9d53c4b5-d0a1-4191-9239-8c7b6806f860" (UID: "9d53c4b5-d0a1-4191-9239-8c7b6806f860"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.470130 5050 scope.go:117] "RemoveContainer" containerID="063525e2dec5a92b0de43140afc955072b70de2dd16ff0f882b43cc83a5c5cb3" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.480854 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.502572 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-87nhx\" (UniqueName: \"kubernetes.io/projected/9d53c4b5-d0a1-4191-9239-8c7b6806f860-kube-api-access-87nhx\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.502634 5050 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9d53c4b5-d0a1-4191-9239-8c7b6806f860-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.502648 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d53c4b5-d0a1-4191-9239-8c7b6806f860-logs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.518153 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f81d4e06-4245-4355-89d6-ddc438c06f29-ovsdbserver-sb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-sb-tls-certs") pod "f81d4e06-4245-4355-89d6-ddc438c06f29" (UID: "f81d4e06-4245-4355-89d6-ddc438c06f29"). InnerVolumeSpecName "ovsdbserver-sb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.540724 5050 scope.go:117] "RemoveContainer" containerID="ff35e18b1c173ef8289abf48d1745ad3d991c922a44ba70cf39ceac46dec1cc2" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.587365 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="014d6c50-fe8c-4b39-bccd-531037f1ff10" path="/var/lib/kubelet/pods/014d6c50-fe8c-4b39-bccd-531037f1ff10/volumes" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.602419 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a7ed50c-3990-423d-9fd6-1165da59c5c9" path="/var/lib/kubelet/pods/4a7ed50c-3990-423d-9fd6-1165da59c5c9/volumes" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.605539 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3d96734-832d-489e-9fed-a4eb705f41d7-combined-ca-bundle\") pod \"d3d96734-832d-489e-9fed-a4eb705f41d7\" (UID: \"d3d96734-832d-489e-9fed-a4eb705f41d7\") " Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.605837 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3d96734-832d-489e-9fed-a4eb705f41d7-vencrypt-tls-certs\") pod \"d3d96734-832d-489e-9fed-a4eb705f41d7\" (UID: \"d3d96734-832d-489e-9fed-a4eb705f41d7\") " Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.606863 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3d96734-832d-489e-9fed-a4eb705f41d7-nova-novncproxy-tls-certs\") pod \"d3d96734-832d-489e-9fed-a4eb705f41d7\" (UID: \"d3d96734-832d-489e-9fed-a4eb705f41d7\") " Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.606942 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n7k9k\" (UniqueName: \"kubernetes.io/projected/d3d96734-832d-489e-9fed-a4eb705f41d7-kube-api-access-n7k9k\") pod \"d3d96734-832d-489e-9fed-a4eb705f41d7\" (UID: \"d3d96734-832d-489e-9fed-a4eb705f41d7\") " Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.606994 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3d96734-832d-489e-9fed-a4eb705f41d7-config-data\") pod \"d3d96734-832d-489e-9fed-a4eb705f41d7\" (UID: \"d3d96734-832d-489e-9fed-a4eb705f41d7\") " Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.610044 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f81d4e06-4245-4355-89d6-ddc438c06f29-ovsdbserver-sb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.611729 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d53c4b5-d0a1-4191-9239-8c7b6806f860-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9d53c4b5-d0a1-4191-9239-8c7b6806f860" (UID: "9d53c4b5-d0a1-4191-9239-8c7b6806f860"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.619125 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="716a14cd-88b9-4e18-a781-6dbfaad7634c" path="/var/lib/kubelet/pods/716a14cd-88b9-4e18-a781-6dbfaad7634c/volumes" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.620034 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d6abc54-c88f-403a-b9ca-8645646c6356" path="/var/lib/kubelet/pods/8d6abc54-c88f-403a-b9ca-8645646c6356/volumes" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.623103 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c" path="/var/lib/kubelet/pods/8d6f5af2-c95a-47c1-b9c3-2d13eea56e9c/volumes" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.626029 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3d96734-832d-489e-9fed-a4eb705f41d7-kube-api-access-n7k9k" (OuterVolumeSpecName: "kube-api-access-n7k9k") pod "d3d96734-832d-489e-9fed-a4eb705f41d7" (UID: "d3d96734-832d-489e-9fed-a4eb705f41d7"). InnerVolumeSpecName "kube-api-access-n7k9k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.651553 5050 scope.go:117] "RemoveContainer" containerID="40c87c219c9a5646179505e0d69951602c9a0948f6df53d02f15fd03bcb735c9" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.717290 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n7k9k\" (UniqueName: \"kubernetes.io/projected/d3d96734-832d-489e-9fed-a4eb705f41d7-kube-api-access-n7k9k\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.717333 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d53c4b5-d0a1-4191-9239-8c7b6806f860-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.875735 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3d96734-832d-489e-9fed-a4eb705f41d7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d3d96734-832d-489e-9fed-a4eb705f41d7" (UID: "d3d96734-832d-489e-9fed-a4eb705f41d7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.902116 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d53c4b5-d0a1-4191-9239-8c7b6806f860-config-data" (OuterVolumeSpecName: "config-data") pod "9d53c4b5-d0a1-4191-9239-8c7b6806f860" (UID: "9d53c4b5-d0a1-4191-9239-8c7b6806f860"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.918810 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e7a4774e-f304-40ca-8595-bbe9c381f466" (UID: "e7a4774e-f304-40ca-8595-bbe9c381f466"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.961627 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3d96734-832d-489e-9fed-a4eb705f41d7-config-data" (OuterVolumeSpecName: "config-data") pod "d3d96734-832d-489e-9fed-a4eb705f41d7" (UID: "d3d96734-832d-489e-9fed-a4eb705f41d7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.962731 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e7a4774e-f304-40ca-8595-bbe9c381f466-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.962776 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d53c4b5-d0a1-4191-9239-8c7b6806f860-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.962804 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3d96734-832d-489e-9fed-a4eb705f41d7-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.962819 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3d96734-832d-489e-9fed-a4eb705f41d7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:25 crc kubenswrapper[5050]: E1123 15:04:25.962739 5050 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 23 15:04:25 crc kubenswrapper[5050]: E1123 15:04:25.962912 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-config-data podName:f92353db-5352-4216-ad2d-50242e58dfb7 nodeName:}" failed. No retries permitted until 2025-11-23 15:04:29.962892481 +0000 UTC m=+1365.129888966 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-config-data") pod "rabbitmq-cell1-server-0" (UID: "f92353db-5352-4216-ad2d-50242e58dfb7") : configmap "rabbitmq-cell1-config-data" not found Nov 23 15:04:25 crc kubenswrapper[5050]: I1123 15:04:25.965342 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f81d4e06-4245-4355-89d6-ddc438c06f29-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "f81d4e06-4245-4355-89d6-ddc438c06f29" (UID: "f81d4e06-4245-4355-89d6-ddc438c06f29"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.007479 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3d96734-832d-489e-9fed-a4eb705f41d7-nova-novncproxy-tls-certs" (OuterVolumeSpecName: "nova-novncproxy-tls-certs") pod "d3d96734-832d-489e-9fed-a4eb705f41d7" (UID: "d3d96734-832d-489e-9fed-a4eb705f41d7"). InnerVolumeSpecName "nova-novncproxy-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.102892 5050 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f81d4e06-4245-4355-89d6-ddc438c06f29-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.102927 5050 reconciler_common.go:293] "Volume detached for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3d96734-832d-489e-9fed-a4eb705f41d7-nova-novncproxy-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.158702 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3d96734-832d-489e-9fed-a4eb705f41d7-vencrypt-tls-certs" (OuterVolumeSpecName: "vencrypt-tls-certs") pod "d3d96734-832d-489e-9fed-a4eb705f41d7" (UID: "d3d96734-832d-489e-9fed-a4eb705f41d7"). InnerVolumeSpecName "vencrypt-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.212993 5050 reconciler_common.go:293] "Volume detached for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3d96734-832d-489e-9fed-a4eb705f41d7-vencrypt-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.336721 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell03052-account-delete-kz7k7"] Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.336768 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novaapif81d-account-delete-xfdwn"] Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.336780 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.336795 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.336811 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.337060 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/memcached-0" podUID="d1ce97bd-1ff7-400e-a741-7e757fbd7e17" containerName="memcached" containerID="cri-o://10d8f0ce4d54a26bf81a8c27901423a54c9049d1e6deff9e87bd78ed1978474b" gracePeriod=30 Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.337818 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="528697e8-1ddc-4ab0-aa0e-008631905a4c" containerName="proxy-httpd" containerID="cri-o://5f913c69d91ed75b757a4d06691b6d5cb9dec9b9a7273d12971e80ccd91082db" gracePeriod=30 Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.337842 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="528697e8-1ddc-4ab0-aa0e-008631905a4c" containerName="sg-core" containerID="cri-o://0db0ab1375fb7487d196ec345c0738e0de0a29049ea856246e4379363ccc3e46" gracePeriod=30 Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.337881 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="528697e8-1ddc-4ab0-aa0e-008631905a4c" containerName="ceilometer-notification-agent" containerID="cri-o://888990a4380d2a527f44498ff0a147a611909bb72bf4f2fad48c2ae5096b8882" gracePeriod=30 Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.337843 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="94bbca67-2998-4727-bbe1-95d54277f4aa" containerName="kube-state-metrics" containerID="cri-o://7ddde7be0ab80cfcd7eacb38e126a697a0d2f4d855ad70062b839e6d933396d5" gracePeriod=30 Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.347037 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="528697e8-1ddc-4ab0-aa0e-008631905a4c" containerName="ceilometer-central-agent" containerID="cri-o://54bb6bc8b8fe2be064d6b1b7c3753c2cce5da608c4510b93b32b5cb3298cf7f7" gracePeriod=30 Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.356550 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="ba1bb9f6-5ca2-4986-9473-62d50d9bebf0" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.165:8776/healthcheck\": read tcp 10.217.0.2:49178->10.217.0.165:8776: read: connection reset by peer" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.359275 5050 generic.go:334] "Generic (PLEG): container finished" podID="e78eb875-17e5-431f-82b5-14a1190488b0" containerID="b0249ea7f2b3e47427ff24b071e6deac824d08d94f85a1a6b454cedaf35fdcfe" exitCode=0 Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.359408 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder255c-account-delete-4znlv" event={"ID":"e78eb875-17e5-431f-82b5-14a1190488b0","Type":"ContainerDied","Data":"b0249ea7f2b3e47427ff24b071e6deac824d08d94f85a1a6b454cedaf35fdcfe"} Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.433500 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="94bbca67-2998-4727-bbe1-95d54277f4aa" containerName="kube-state-metrics" probeResult="failure" output="Get \"https://10.217.0.190:8080/livez\": unexpected EOF" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.434176 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glancebb96-account-delete-qls8c" event={"ID":"13ecb6d2-cd50-4239-8939-6465176afd8e","Type":"ContainerStarted","Data":"06f7ec8eaee623028a2fb5166e2c0a70be842f5548766d5f65f74ceabacb80f5"} Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.475518 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell03052-account-delete-kz7k7" event={"ID":"3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89","Type":"ContainerStarted","Data":"e5e99474b6c49167c6c2d8f690d0cc421ed1b7ed5f2c94f6c9ff2d4ecfc7bbe1"} Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.475707 5050 scope.go:117] "RemoveContainer" containerID="c0069a68c89482704d61e8c58556300339031d7c4accb976799bb1d562fe9ac9" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.488961 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.512750 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-zskrr"] Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.522916 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.523605 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d3d96734-832d-489e-9fed-a4eb705f41d7","Type":"ContainerDied","Data":"c3ccda55e8a7cc4faf0cb8b489ad3654e6ccbae866c0291dff60a6ad34b673e9"} Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.563973 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapif81d-account-delete-xfdwn" event={"ID":"1bda1a38-71dd-4de4-8e16-b5159a08fdfa","Type":"ContainerStarted","Data":"7fd4f46c7d4a6b10b51c4634653ef5eea9723e27b89cb170f3b6260e7ce0f0fa"} Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.568631 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-mzzv7"] Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.591214 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"85ca597a-bf71-43bb-b57c-f840b37f196f","Type":"ContainerDied","Data":"4b00faf0a87aa75a385c4aba5cd2b739db65e2441c80af82db041006c53e48cf"} Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.591702 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.594477 5050 generic.go:334] "Generic (PLEG): container finished" podID="c1187c9d-5557-496e-be1d-8df301d6daa6" containerID="ddcc4b1b60ec1caba00533bfdcca9aabeb296bd155561a69a9313a277fd7f548" exitCode=0 Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.594572 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6b854f87-drlhc" event={"ID":"c1187c9d-5557-496e-be1d-8df301d6daa6","Type":"ContainerDied","Data":"ddcc4b1b60ec1caba00533bfdcca9aabeb296bd155561a69a9313a277fd7f548"} Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.594611 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6b854f87-drlhc" event={"ID":"c1187c9d-5557-496e-be1d-8df301d6daa6","Type":"ContainerDied","Data":"b1c52e489d41dc167d934414599ef28dff555b6b9b1022ec9897bbd738edc1ae"} Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.594626 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b1c52e489d41dc167d934414599ef28dff555b6b9b1022ec9897bbd738edc1ae" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.601550 5050 generic.go:334] "Generic (PLEG): container finished" podID="9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931" containerID="a5fe601429909f5855737ff265e47e05c7c710743d5e35f8c314bde3530608cb" exitCode=0 Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.601596 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron45e2-account-delete-8sq6l" event={"ID":"9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931","Type":"ContainerDied","Data":"a5fe601429909f5855737ff265e47e05c7c710743d5e35f8c314bde3530608cb"} Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.610271 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-ffbf89f58-t9kvk"] Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.610667 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/keystone-ffbf89f58-t9kvk" podUID="e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9" containerName="keystone-api" containerID="cri-o://510cddb97927101d8a412932a0b83e3c82d63904833e599f58c30bcc8dd0541d" gracePeriod=30 Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.615191 5050 generic.go:334] "Generic (PLEG): container finished" podID="4b570632-90a0-4fcd-a067-9913b51ad711" containerID="5ab05a12fa19db392ca4e60409469274cc565dcc9c3b508b3ef8ad77d4e5f63b" exitCode=0 Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.615240 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican6ffa-account-delete-7wsfp" event={"ID":"4b570632-90a0-4fcd-a067-9913b51ad711","Type":"ContainerDied","Data":"5ab05a12fa19db392ca4e60409469274cc565dcc9c3b508b3ef8ad77d4e5f63b"} Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.626329 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-mzzv7"] Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.633420 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/85ca597a-bf71-43bb-b57c-f840b37f196f-galera-tls-certs\") pod \"85ca597a-bf71-43bb-b57c-f840b37f196f\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.636342 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/85ca597a-bf71-43bb-b57c-f840b37f196f-kolla-config\") pod \"85ca597a-bf71-43bb-b57c-f840b37f196f\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.652735 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/85ca597a-bf71-43bb-b57c-f840b37f196f-config-data-generated\") pod \"85ca597a-bf71-43bb-b57c-f840b37f196f\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.639501 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/85ca597a-bf71-43bb-b57c-f840b37f196f-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "85ca597a-bf71-43bb-b57c-f840b37f196f" (UID: "85ca597a-bf71-43bb-b57c-f840b37f196f"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.643778 5050 generic.go:334] "Generic (PLEG): container finished" podID="3d3a2397-697f-4d1b-a5ed-0aaa1b202a12" containerID="5981e02fbf178bbd36c2b4aa0bd3d390cd001f59c62a709e8011df05a496109d" exitCode=0 Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.640584 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement734b-account-delete-zt8zp" event={"ID":"3d3a2397-697f-4d1b-a5ed-0aaa1b202a12","Type":"ContainerDied","Data":"5981e02fbf178bbd36c2b4aa0bd3d390cd001f59c62a709e8011df05a496109d"} Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.654064 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/85ca597a-bf71-43bb-b57c-f840b37f196f-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "85ca597a-bf71-43bb-b57c-f840b37f196f" (UID: "85ca597a-bf71-43bb-b57c-f840b37f196f"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.654183 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/85ca597a-bf71-43bb-b57c-f840b37f196f-config-data-default\") pod \"85ca597a-bf71-43bb-b57c-f840b37f196f\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.654318 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pzhml\" (UniqueName: \"kubernetes.io/projected/85ca597a-bf71-43bb-b57c-f840b37f196f-kube-api-access-pzhml\") pod \"85ca597a-bf71-43bb-b57c-f840b37f196f\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.654400 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85ca597a-bf71-43bb-b57c-f840b37f196f-combined-ca-bundle\") pod \"85ca597a-bf71-43bb-b57c-f840b37f196f\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.654490 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"85ca597a-bf71-43bb-b57c-f840b37f196f\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.654610 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/85ca597a-bf71-43bb-b57c-f840b37f196f-operator-scripts\") pod \"85ca597a-bf71-43bb-b57c-f840b37f196f\" (UID: \"85ca597a-bf71-43bb-b57c-f840b37f196f\") " Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.656167 5050 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/85ca597a-bf71-43bb-b57c-f840b37f196f-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.656300 5050 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/85ca597a-bf71-43bb-b57c-f840b37f196f-config-data-default\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.659881 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-zskrr"] Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.661610 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/85ca597a-bf71-43bb-b57c-f840b37f196f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "85ca597a-bf71-43bb-b57c-f840b37f196f" (UID: "85ca597a-bf71-43bb-b57c-f840b37f196f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.664241 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85ca597a-bf71-43bb-b57c-f840b37f196f-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "85ca597a-bf71-43bb-b57c-f840b37f196f" (UID: "85ca597a-bf71-43bb-b57c-f840b37f196f"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.676029 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85ca597a-bf71-43bb-b57c-f840b37f196f-kube-api-access-pzhml" (OuterVolumeSpecName: "kube-api-access-pzhml") pod "85ca597a-bf71-43bb-b57c-f840b37f196f" (UID: "85ca597a-bf71-43bb-b57c-f840b37f196f"). InnerVolumeSpecName "kube-api-access-pzhml". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.689360 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.711434 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "mysql-db") pod "85ca597a-bf71-43bb-b57c-f840b37f196f" (UID: "85ca597a-bf71-43bb-b57c-f840b37f196f"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.733774 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" event={"ID":"9d53c4b5-d0a1-4191-9239-8c7b6806f860","Type":"ContainerDied","Data":"04599c1ee457198a051927d61d801c2c251b476f97e5c92bf4c2314b51b24b38"} Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.733908 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-586bfdfdd8-99vrd" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.759690 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/85ca597a-bf71-43bb-b57c-f840b37f196f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "85ca597a-bf71-43bb-b57c-f840b37f196f" (UID: "85ca597a-bf71-43bb-b57c-f840b37f196f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.765797 5050 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/85ca597a-bf71-43bb-b57c-f840b37f196f-config-data-generated\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.765826 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pzhml\" (UniqueName: \"kubernetes.io/projected/85ca597a-bf71-43bb-b57c-f840b37f196f-kube-api-access-pzhml\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.765853 5050 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.765867 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85ca597a-bf71-43bb-b57c-f840b37f196f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.765879 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/85ca597a-bf71-43bb-b57c-f840b37f196f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.773674 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.204:8775/\": read tcp 10.217.0.2:39514->10.217.0.204:8775: read: connection reset by peer" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.774169 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.204:8775/\": read tcp 10.217.0.2:39508->10.217.0.204:8775: read: connection reset by peer" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.774223 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-rzl6b"] Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.787245 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-rzl6b"] Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.797078 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-6ffa-account-create-pf9hk"] Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.808854 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-6ffa-account-create-pf9hk"] Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.863262 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-77dc855c68-z488p" podUID="f85034c8-6b79-42e1-849d-646a2ead1a93" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.161:9311/healthcheck\": read tcp 10.217.0.2:42292->10.217.0.161:9311: read: connection reset by peer" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.863939 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-77dc855c68-z488p" podUID="f85034c8-6b79-42e1-849d-646a2ead1a93" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.161:9311/healthcheck\": read tcp 10.217.0.2:42308->10.217.0.161:9311: read: connection reset by peer" Nov 23 15:04:26 crc kubenswrapper[5050]: E1123 15:04:26.877186 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8 is running failed: container process not found" containerID="1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 23 15:04:26 crc kubenswrapper[5050]: E1123 15:04:26.885733 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8 is running failed: container process not found" containerID="1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 23 15:04:26 crc kubenswrapper[5050]: E1123 15:04:26.886120 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8 is running failed: container process not found" containerID="1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 23 15:04:26 crc kubenswrapper[5050]: E1123 15:04:26.886149 5050 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-n9v8v" podUID="c21366ac-a7da-4cac-8332-753659210595" containerName="ovsdb-server" Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.919235 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican6ffa-account-delete-7wsfp"] Nov 23 15:04:26 crc kubenswrapper[5050]: I1123 15:04:26.978193 5050 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.004549 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-d6xrh"] Nov 23 15:04:27 crc kubenswrapper[5050]: E1123 15:04:27.008504 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a0820261b299cc33db42946f12f1f4467f892f1598cff4f933ab45def65d8af9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 23 15:04:27 crc kubenswrapper[5050]: E1123 15:04:27.008511 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="42a4a3972d0dfdbfa5a41878495e90a1790378398c3c410fbb952adc25a032b1" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.028172 5050 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:27 crc kubenswrapper[5050]: E1123 15:04:27.053987 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a0820261b299cc33db42946f12f1f4467f892f1598cff4f933ab45def65d8af9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 23 15:04:27 crc kubenswrapper[5050]: E1123 15:04:27.090802 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="42a4a3972d0dfdbfa5a41878495e90a1790378398c3c410fbb952adc25a032b1" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.101320 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-d6xrh"] Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.113790 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/85ca597a-bf71-43bb-b57c-f840b37f196f-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "85ca597a-bf71-43bb-b57c-f840b37f196f" (UID: "85ca597a-bf71-43bb-b57c-f840b37f196f"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.142810 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-tbww8" podUID="33af3c2e-dea7-4448-8b28-b579d77490b9" containerName="ovn-controller" probeResult="failure" output=< Nov 23 15:04:27 crc kubenswrapper[5050]: ERROR - Failed to get connection status from ovn-controller, ovn-appctl exit status: 0 Nov 23 15:04:27 crc kubenswrapper[5050]: > Nov 23 15:04:27 crc kubenswrapper[5050]: E1123 15:04:27.143331 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="42a4a3972d0dfdbfa5a41878495e90a1790378398c3c410fbb952adc25a032b1" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 23 15:04:27 crc kubenswrapper[5050]: E1123 15:04:27.143369 5050 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="4d1fe4ad-9245-4af5-b378-c908ce72f08c" containerName="nova-cell0-conductor-conductor" Nov 23 15:04:27 crc kubenswrapper[5050]: E1123 15:04:27.143403 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a0820261b299cc33db42946f12f1f4467f892f1598cff4f933ab45def65d8af9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 23 15:04:27 crc kubenswrapper[5050]: E1123 15:04:27.143422 5050 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-n9v8v" podUID="c21366ac-a7da-4cac-8332-753659210595" containerName="ovs-vswitchd" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.145171 5050 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/85ca597a-bf71-43bb-b57c-f840b37f196f-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.162915 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-c056-account-create-tc55d"] Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.170096 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-c056-account-create-tc55d"] Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.180726 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-sdk6z"] Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.187974 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-sdk6z"] Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.197765 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-45e2-account-create-hj4jm"] Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.212575 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-45e2-account-create-hj4jm"] Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.224228 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron45e2-account-delete-8sq6l"] Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.257190 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.289826 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-875vf"] Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.302134 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-875vf"] Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.314717 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-734b-account-create-8vfn6"] Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.325033 5050 scope.go:117] "RemoveContainer" containerID="dc0413953c30be6f1a27357706432a328f29ebeedb2176f058654bf6f70fcf23" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.331299 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement734b-account-delete-zt8zp"] Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.348912 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-734b-account-create-8vfn6"] Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.350593 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wx6m8\" (UniqueName: \"kubernetes.io/projected/c1187c9d-5557-496e-be1d-8df301d6daa6-kube-api-access-wx6m8\") pod \"c1187c9d-5557-496e-be1d-8df301d6daa6\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.352149 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1187c9d-5557-496e-be1d-8df301d6daa6-run-httpd\") pod \"c1187c9d-5557-496e-be1d-8df301d6daa6\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.352192 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1187c9d-5557-496e-be1d-8df301d6daa6-combined-ca-bundle\") pod \"c1187c9d-5557-496e-be1d-8df301d6daa6\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.352316 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1187c9d-5557-496e-be1d-8df301d6daa6-config-data\") pod \"c1187c9d-5557-496e-be1d-8df301d6daa6\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.352366 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1187c9d-5557-496e-be1d-8df301d6daa6-internal-tls-certs\") pod \"c1187c9d-5557-496e-be1d-8df301d6daa6\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.352843 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1187c9d-5557-496e-be1d-8df301d6daa6-log-httpd\") pod \"c1187c9d-5557-496e-be1d-8df301d6daa6\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.352882 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1187c9d-5557-496e-be1d-8df301d6daa6-public-tls-certs\") pod \"c1187c9d-5557-496e-be1d-8df301d6daa6\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.352944 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c1187c9d-5557-496e-be1d-8df301d6daa6-etc-swift\") pod \"c1187c9d-5557-496e-be1d-8df301d6daa6\" (UID: \"c1187c9d-5557-496e-be1d-8df301d6daa6\") " Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.354634 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1187c9d-5557-496e-be1d-8df301d6daa6-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c1187c9d-5557-496e-be1d-8df301d6daa6" (UID: "c1187c9d-5557-496e-be1d-8df301d6daa6"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.354727 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1187c9d-5557-496e-be1d-8df301d6daa6-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c1187c9d-5557-496e-be1d-8df301d6daa6" (UID: "c1187c9d-5557-496e-be1d-8df301d6daa6"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.360714 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1187c9d-5557-496e-be1d-8df301d6daa6-kube-api-access-wx6m8" (OuterVolumeSpecName: "kube-api-access-wx6m8") pod "c1187c9d-5557-496e-be1d-8df301d6daa6" (UID: "c1187c9d-5557-496e-be1d-8df301d6daa6"). InnerVolumeSpecName "kube-api-access-wx6m8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.366247 5050 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1187c9d-5557-496e-be1d-8df301d6daa6-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.366279 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wx6m8\" (UniqueName: \"kubernetes.io/projected/c1187c9d-5557-496e-be1d-8df301d6daa6-kube-api-access-wx6m8\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.366292 5050 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1187c9d-5557-496e-be1d-8df301d6daa6-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.379760 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-ktwq8"] Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.387319 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="0a256986-024f-45e6-9b86-b5a724ab5f6e" containerName="galera" containerID="cri-o://e05743a215c5ada706341f4409a1a724e3949e0173a53be6eeb47dacec86519e" gracePeriod=30 Nov 23 15:04:27 crc kubenswrapper[5050]: E1123 15:04:27.395530 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dc18dcf10f3de24a9d22f69583ddef25bdb552099564e7a29e69ffc7a506a441" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 23 15:04:27 crc kubenswrapper[5050]: E1123 15:04:27.399514 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2649b8f823f897f77f83477412dd9f8246b7b8e23f72d10846e06d96f7b4bff1" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.401853 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-ktwq8"] Nov 23 15:04:27 crc kubenswrapper[5050]: E1123 15:04:27.402552 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2649b8f823f897f77f83477412dd9f8246b7b8e23f72d10846e06d96f7b4bff1" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 23 15:04:27 crc kubenswrapper[5050]: E1123 15:04:27.407823 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2649b8f823f897f77f83477412dd9f8246b7b8e23f72d10846e06d96f7b4bff1" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 23 15:04:27 crc kubenswrapper[5050]: E1123 15:04:27.407874 5050 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="634f01f5-daae-4e5d-811f-5d406bfee9c1" containerName="nova-scheduler-scheduler" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.413075 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1187c9d-5557-496e-be1d-8df301d6daa6-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "c1187c9d-5557-496e-be1d-8df301d6daa6" (UID: "c1187c9d-5557-496e-be1d-8df301d6daa6"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.414781 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glancebb96-account-delete-qls8c"] Nov 23 15:04:27 crc kubenswrapper[5050]: E1123 15:04:27.415390 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dc18dcf10f3de24a9d22f69583ddef25bdb552099564e7a29e69ffc7a506a441" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 23 15:04:27 crc kubenswrapper[5050]: E1123 15:04:27.422091 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dc18dcf10f3de24a9d22f69583ddef25bdb552099564e7a29e69ffc7a506a441" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 23 15:04:27 crc kubenswrapper[5050]: E1123 15:04:27.422158 5050 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="75e360ca-c84a-4806-a86a-86924a639cfc" containerName="nova-cell1-conductor-conductor" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.429296 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-bb96-account-create-mhdc2"] Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.439375 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-bb96-account-create-mhdc2"] Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.470450 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.473627 5050 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c1187c9d-5557-496e-be1d-8df301d6daa6-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.477929 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.484181 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-mwptv"] Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.490338 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-mwptv"] Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.497487 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-vptxh"] Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.503425 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-vptxh"] Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.509058 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1187c9d-5557-496e-be1d-8df301d6daa6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c1187c9d-5557-496e-be1d-8df301d6daa6" (UID: "c1187c9d-5557-496e-be1d-8df301d6daa6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.583305 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1187c9d-5557-496e-be1d-8df301d6daa6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.589249 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1187c9d-5557-496e-be1d-8df301d6daa6-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "c1187c9d-5557-496e-be1d-8df301d6daa6" (UID: "c1187c9d-5557-496e-be1d-8df301d6daa6"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.601351 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1187c9d-5557-496e-be1d-8df301d6daa6-config-data" (OuterVolumeSpecName: "config-data") pod "c1187c9d-5557-496e-be1d-8df301d6daa6" (UID: "c1187c9d-5557-496e-be1d-8df301d6daa6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.677747 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="241ba8d0-d451-4df3-888a-ff7df9129456" path="/var/lib/kubelet/pods/241ba8d0-d451-4df3-888a-ff7df9129456/volumes" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.678749 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27cbd9dd-3372-49ca-9321-421a05e76922" path="/var/lib/kubelet/pods/27cbd9dd-3372-49ca-9321-421a05e76922/volumes" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.679317 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a2308e0-8430-418a-b943-0f6bc4e22904" path="/var/lib/kubelet/pods/4a2308e0-8430-418a-b943-0f6bc4e22904/volumes" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.679873 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bf9e433-c2f5-434e-8f06-56813a38e287" path="/var/lib/kubelet/pods/4bf9e433-c2f5-434e-8f06-56813a38e287/volumes" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.681588 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca" path="/var/lib/kubelet/pods/889f2b40-a1f4-45e2-b8e1-0fc72a7b55ca/volumes" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.682540 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="99652fba-7c5d-4aaa-8de4-e29e38027171" path="/var/lib/kubelet/pods/99652fba-7c5d-4aaa-8de4-e29e38027171/volumes" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.683288 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd7c0f96-8702-4589-b3f5-a6a164a38d4e" path="/var/lib/kubelet/pods/bd7c0f96-8702-4589-b3f5-a6a164a38d4e/volumes" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.685098 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a" path="/var/lib/kubelet/pods/c6c16397-0bd7-4c64-bbd4-0a8f5ce2ef6a/volumes" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.685665 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1187c9d-5557-496e-be1d-8df301d6daa6-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.685697 5050 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1187c9d-5557-496e-be1d-8df301d6daa6-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.685888 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd" path="/var/lib/kubelet/pods/d1c0ea55-9eb9-4e1c-bc1f-80ccf4c9d2cd/volumes" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.698348 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d52e824b-4a80-4af9-961a-42652860b157" path="/var/lib/kubelet/pods/d52e824b-4a80-4af9-961a-42652860b157/volumes" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.699064 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc6bbfa7-abb2-4676-814f-7a65c33b2e61" path="/var/lib/kubelet/pods/dc6bbfa7-abb2-4676-814f-7a65c33b2e61/volumes" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.699647 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3ace8e3-1201-45e0-b9f7-7a8e272cef48" path="/var/lib/kubelet/pods/e3ace8e3-1201-45e0-b9f7-7a8e272cef48/volumes" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.700668 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6916e1b-5874-4095-8306-0af203ba7f7d" path="/var/lib/kubelet/pods/e6916e1b-5874-4095-8306-0af203ba7f7d/volumes" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.701156 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7a4774e-f304-40ca-8595-bbe9c381f466" path="/var/lib/kubelet/pods/e7a4774e-f304-40ca-8595-bbe9c381f466/volumes" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.701976 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f81d4e06-4245-4355-89d6-ddc438c06f29" path="/var/lib/kubelet/pods/f81d4e06-4245-4355-89d6-ddc438c06f29/volumes" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.715956 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1187c9d-5557-496e-be1d-8df301d6daa6-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "c1187c9d-5557-496e-be1d-8df301d6daa6" (UID: "c1187c9d-5557-496e-be1d-8df301d6daa6"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:27 crc kubenswrapper[5050]: E1123 15:04:27.768098 5050 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3d3a2397_697f_4d1b_a5ed_0aaa1b202a12.slice/crio-5981e02fbf178bbd36c2b4aa0bd3d390cd001f59c62a709e8011df05a496109d.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3d3a2397_697f_4d1b_a5ed_0aaa1b202a12.slice/crio-conmon-5981e02fbf178bbd36c2b4aa0bd3d390cd001f59c62a709e8011df05a496109d.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9ea4c6e2_e26b_454f_a6e1_b8fd7df8a931.slice/crio-a5fe601429909f5855737ff265e47e05c7c710743d5e35f8c314bde3530608cb.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podba1bb9f6_5ca2_4986_9473_62d50d9bebf0.slice/crio-conmon-08fc1f6beb7a82ec24f421d1df48b2d8f792f3248ae507344624f3d012bb743a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode78eb875_17e5_431f_82b5_14a1190488b0.slice/crio-b0249ea7f2b3e47427ff24b071e6deac824d08d94f85a1a6b454cedaf35fdcfe.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podadd2a2e2_5553_4e43_8ddd_b8719949d531.slice/crio-ce2fe500a1bdad923f79c0d2d37ae5d7c546350b70f8148bfd61c5c68475e3f7.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9ea4c6e2_e26b_454f_a6e1_b8fd7df8a931.slice/crio-conmon-a5fe601429909f5855737ff265e47e05c7c710743d5e35f8c314bde3530608cb.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode78eb875_17e5_431f_82b5_14a1190488b0.slice/crio-conmon-b0249ea7f2b3e47427ff24b071e6deac824d08d94f85a1a6b454cedaf35fdcfe.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode7a4774e_f304_40ca_8595_bbe9c381f466.slice/crio-7ed36c96e2576c13dab1973b1bc4f2e61749170e1aa055bd02a656954ba7520c\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod528697e8_1ddc_4ab0_aa0e_008631905a4c.slice/crio-conmon-5f913c69d91ed75b757a4d06691b6d5cb9dec9b9a7273d12971e80ccd91082db.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf81d4e06_4245_4355_89d6_ddc438c06f29.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode7a4774e_f304_40ca_8595_bbe9c381f466.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6c5e9b39_91fa_4f4e_9d95_0599bc22472d.slice/crio-conmon-afbf2c9723a3c51e2ef343739812462a5a83e6a2cd33bd51d95de96ab628994d.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd3d96734_832d_489e_9fed_a4eb705f41d7.slice/crio-c3ccda55e8a7cc4faf0cb8b489ad3654e6ccbae866c0291dff60a6ad34b673e9\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod528697e8_1ddc_4ab0_aa0e_008631905a4c.slice/crio-5f913c69d91ed75b757a4d06691b6d5cb9dec9b9a7273d12971e80ccd91082db.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8e597d4a_3ff4_4b9e_ab6a_c0364f6afb43.slice/crio-6f245a393065748303acdd84cc3863db9cf884ff2abd3737de04d10232960c78.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podba1bb9f6_5ca2_4986_9473_62d50d9bebf0.slice/crio-08fc1f6beb7a82ec24f421d1df48b2d8f792f3248ae507344624f3d012bb743a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6c3ae457_67ab_4c0b_a17e_a8264215793b.slice/crio-conmon-11eb99c3ddd7ac1add778be8c07cea9de7ac571edec1bae0cd0e936ceb624706.scope\": RecentStats: unable to find data in memory cache]" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.801956 5050 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1187c9d-5557-496e-be1d-8df301d6daa6-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.824518 5050 generic.go:334] "Generic (PLEG): container finished" podID="f85034c8-6b79-42e1-849d-646a2ead1a93" containerID="ba962ea3ca9a2e41f992ef21c4cd5215480c219a40218ff8e9e9408b4eeec97b" exitCode=0 Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.836789 5050 generic.go:334] "Generic (PLEG): container finished" podID="94bbca67-2998-4727-bbe1-95d54277f4aa" containerID="7ddde7be0ab80cfcd7eacb38e126a697a0d2f4d855ad70062b839e6d933396d5" exitCode=2 Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.878262 5050 generic.go:334] "Generic (PLEG): container finished" podID="8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43" containerID="6f245a393065748303acdd84cc3863db9cf884ff2abd3737de04d10232960c78" exitCode=0 Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.881615 5050 generic.go:334] "Generic (PLEG): container finished" podID="d1ce97bd-1ff7-400e-a741-7e757fbd7e17" containerID="10d8f0ce4d54a26bf81a8c27901423a54c9049d1e6deff9e87bd78ed1978474b" exitCode=0 Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.886030 5050 generic.go:334] "Generic (PLEG): container finished" podID="042c506a-b65f-4cbd-9ca7-3df0ec55ffa0" containerID="085d654f5685c959e6314e3ffe048bec6afd8a89e45c228a241cbd713de9226c" exitCode=0 Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.889587 5050 generic.go:334] "Generic (PLEG): container finished" podID="6c3ae457-67ab-4c0b-a17e-a8264215793b" containerID="11eb99c3ddd7ac1add778be8c07cea9de7ac571edec1bae0cd0e936ceb624706" exitCode=0 Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.894351 5050 generic.go:334] "Generic (PLEG): container finished" podID="4d1fe4ad-9245-4af5-b378-c908ce72f08c" containerID="42a4a3972d0dfdbfa5a41878495e90a1790378398c3c410fbb952adc25a032b1" exitCode=0 Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.898602 5050 generic.go:334] "Generic (PLEG): container finished" podID="528697e8-1ddc-4ab0-aa0e-008631905a4c" containerID="5f913c69d91ed75b757a4d06691b6d5cb9dec9b9a7273d12971e80ccd91082db" exitCode=0 Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.898628 5050 generic.go:334] "Generic (PLEG): container finished" podID="528697e8-1ddc-4ab0-aa0e-008631905a4c" containerID="0db0ab1375fb7487d196ec345c0738e0de0a29049ea856246e4379363ccc3e46" exitCode=2 Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.898642 5050 generic.go:334] "Generic (PLEG): container finished" podID="528697e8-1ddc-4ab0-aa0e-008631905a4c" containerID="54bb6bc8b8fe2be064d6b1b7c3753c2cce5da608c4510b93b32b5cb3298cf7f7" exitCode=0 Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.910380 5050 generic.go:334] "Generic (PLEG): container finished" podID="4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322" containerID="63cf2a00b105f3566525e540e376dbc5f791f9441863630deaff0d4a49ab2c58" exitCode=0 Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.919729 5050 generic.go:334] "Generic (PLEG): container finished" podID="6c5e9b39-91fa-4f4e-9d95-0599bc22472d" containerID="afbf2c9723a3c51e2ef343739812462a5a83e6a2cd33bd51d95de96ab628994d" exitCode=0 Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.925693 5050 generic.go:334] "Generic (PLEG): container finished" podID="ba1bb9f6-5ca2-4986-9473-62d50d9bebf0" containerID="08fc1f6beb7a82ec24f421d1df48b2d8f792f3248ae507344624f3d012bb743a" exitCode=0 Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.935963 5050 generic.go:334] "Generic (PLEG): container finished" podID="add2a2e2-5553-4e43-8ddd-b8719949d531" containerID="ce2fe500a1bdad923f79c0d2d37ae5d7c546350b70f8148bfd61c5c68475e3f7" exitCode=0 Nov 23 15:04:27 crc kubenswrapper[5050]: I1123 15:04:27.936266 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6b854f87-drlhc" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.158617 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="f92353db-5352-4216-ad2d-50242e58dfb7" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.103:5671: connect: connection refused" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.265199 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-77dc855c68-z488p" event={"ID":"f85034c8-6b79-42e1-849d-646a2ead1a93","Type":"ContainerDied","Data":"ba962ea3ca9a2e41f992ef21c4cd5215480c219a40218ff8e9e9408b4eeec97b"} Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.265249 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-3052-account-create-ztg8b"] Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.265270 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell03052-account-delete-kz7k7"] Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.265285 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"94bbca67-2998-4727-bbe1-95d54277f4aa","Type":"ContainerDied","Data":"7ddde7be0ab80cfcd7eacb38e126a697a0d2f4d855ad70062b839e6d933396d5"} Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.265298 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"94bbca67-2998-4727-bbe1-95d54277f4aa","Type":"ContainerDied","Data":"2adb3c2f6378a26e445982e9e114aa13e08fbdc11745d0270575d6708316c8c9"} Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.265309 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2adb3c2f6378a26e445982e9e114aa13e08fbdc11745d0270575d6708316c8c9" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.265320 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-3052-account-create-ztg8b"] Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.265341 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43","Type":"ContainerDied","Data":"6f245a393065748303acdd84cc3863db9cf884ff2abd3737de04d10232960c78"} Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.265356 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"d1ce97bd-1ff7-400e-a741-7e757fbd7e17","Type":"ContainerDied","Data":"10d8f0ce4d54a26bf81a8c27901423a54c9049d1e6deff9e87bd78ed1978474b"} Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.265368 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-rl7fq"] Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.265380 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-rl7fq"] Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.265391 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-864fb4756c-xzxnp" event={"ID":"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0","Type":"ContainerDied","Data":"085d654f5685c959e6314e3ffe048bec6afd8a89e45c228a241cbd713de9226c"} Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.265404 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novaapif81d-account-delete-xfdwn"] Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.265418 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6c3ae457-67ab-4c0b-a17e-a8264215793b","Type":"ContainerDied","Data":"11eb99c3ddd7ac1add778be8c07cea9de7ac571edec1bae0cd0e936ceb624706"} Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.265430 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-f81d-account-create-5bgqw"] Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.265460 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-f81d-account-create-5bgqw"] Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.265474 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"4d1fe4ad-9245-4af5-b378-c908ce72f08c","Type":"ContainerDied","Data":"42a4a3972d0dfdbfa5a41878495e90a1790378398c3c410fbb952adc25a032b1"} Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.265488 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"528697e8-1ddc-4ab0-aa0e-008631905a4c","Type":"ContainerDied","Data":"5f913c69d91ed75b757a4d06691b6d5cb9dec9b9a7273d12971e80ccd91082db"} Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.265501 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"528697e8-1ddc-4ab0-aa0e-008631905a4c","Type":"ContainerDied","Data":"0db0ab1375fb7487d196ec345c0738e0de0a29049ea856246e4379363ccc3e46"} Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.265511 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"528697e8-1ddc-4ab0-aa0e-008631905a4c","Type":"ContainerDied","Data":"54bb6bc8b8fe2be064d6b1b7c3753c2cce5da608c4510b93b32b5cb3298cf7f7"} Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.265522 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322","Type":"ContainerDied","Data":"63cf2a00b105f3566525e540e376dbc5f791f9441863630deaff0d4a49ab2c58"} Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.265535 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6c5e9b39-91fa-4f4e-9d95-0599bc22472d","Type":"ContainerDied","Data":"afbf2c9723a3c51e2ef343739812462a5a83e6a2cd33bd51d95de96ab628994d"} Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.265548 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0","Type":"ContainerDied","Data":"08fc1f6beb7a82ec24f421d1df48b2d8f792f3248ae507344624f3d012bb743a"} Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.265561 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-748fb7ccf6-s9qnq" event={"ID":"add2a2e2-5553-4e43-8ddd-b8719949d531","Type":"ContainerDied","Data":"ce2fe500a1bdad923f79c0d2d37ae5d7c546350b70f8148bfd61c5c68475e3f7"} Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.284215 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.284287 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.308888 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="5eff539e-c641-4873-aeae-450aaf0b4ac8" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.104:5671: connect: connection refused" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.309154 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 23 15:04:28 crc kubenswrapper[5050]: E1123 15:04:28.309572 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="deafd7234d6f0ca5e0fe921dff3e5ef022a4b569f1d6016eec6fa8e609b595e2" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 23 15:04:28 crc kubenswrapper[5050]: E1123 15:04:28.311592 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="deafd7234d6f0ca5e0fe921dff3e5ef022a4b569f1d6016eec6fa8e609b595e2" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 23 15:04:28 crc kubenswrapper[5050]: E1123 15:04:28.320794 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="deafd7234d6f0ca5e0fe921dff3e5ef022a4b569f1d6016eec6fa8e609b595e2" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 23 15:04:28 crc kubenswrapper[5050]: E1123 15:04:28.320852 5050 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="27e26dea-41e9-4d40-9adc-f902e5f4791f" containerName="ovn-northd" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.326822 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-586bfdfdd8-99vrd"] Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.334557 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.347846 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.349310 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-586bfdfdd8-99vrd"] Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.369884 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-6b854f87-drlhc"] Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.381637 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-proxy-6b854f87-drlhc"] Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.388933 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.394416 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.394552 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.412479 5050 scope.go:117] "RemoveContainer" containerID="838adec05740ac7159beefeb2323ec2c83ab99b474e2e4e94e47e803accce920" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.417704 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.418416 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/94bbca67-2998-4727-bbe1-95d54277f4aa-kube-state-metrics-tls-config\") pod \"94bbca67-2998-4727-bbe1-95d54277f4aa\" (UID: \"94bbca67-2998-4727-bbe1-95d54277f4aa\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.418511 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94bbca67-2998-4727-bbe1-95d54277f4aa-combined-ca-bundle\") pod \"94bbca67-2998-4727-bbe1-95d54277f4aa\" (UID: \"94bbca67-2998-4727-bbe1-95d54277f4aa\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.418640 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/94bbca67-2998-4727-bbe1-95d54277f4aa-kube-state-metrics-tls-certs\") pod \"94bbca67-2998-4727-bbe1-95d54277f4aa\" (UID: \"94bbca67-2998-4727-bbe1-95d54277f4aa\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.418689 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7mrbk\" (UniqueName: \"kubernetes.io/projected/94bbca67-2998-4727-bbe1-95d54277f4aa-kube-api-access-7mrbk\") pod \"94bbca67-2998-4727-bbe1-95d54277f4aa\" (UID: \"94bbca67-2998-4727-bbe1-95d54277f4aa\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.430048 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.430509 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94bbca67-2998-4727-bbe1-95d54277f4aa-kube-api-access-7mrbk" (OuterVolumeSpecName: "kube-api-access-7mrbk") pod "94bbca67-2998-4727-bbe1-95d54277f4aa" (UID: "94bbca67-2998-4727-bbe1-95d54277f4aa"). InnerVolumeSpecName "kube-api-access-7mrbk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.461597 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.474486 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94bbca67-2998-4727-bbe1-95d54277f4aa-kube-state-metrics-tls-config" (OuterVolumeSpecName: "kube-state-metrics-tls-config") pod "94bbca67-2998-4727-bbe1-95d54277f4aa" (UID: "94bbca67-2998-4727-bbe1-95d54277f4aa"). InnerVolumeSpecName "kube-state-metrics-tls-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.494886 5050 scope.go:117] "RemoveContainer" containerID="a2b90cc293f875a53ea5d05893281bf2cf0a6970ffc907d52d3260fc55c43311" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.503036 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94bbca67-2998-4727-bbe1-95d54277f4aa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "94bbca67-2998-4727-bbe1-95d54277f4aa" (UID: "94bbca67-2998-4727-bbe1-95d54277f4aa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.520156 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-internal-tls-certs\") pod \"add2a2e2-5553-4e43-8ddd-b8719949d531\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.520215 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-combined-ca-bundle\") pod \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.520263 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-internal-tls-certs\") pod \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.520295 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-config-data-custom\") pod \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.520327 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-etc-machine-id\") pod \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.520378 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-config-data\") pod \"d1ce97bd-1ff7-400e-a741-7e757fbd7e17\" (UID: \"d1ce97bd-1ff7-400e-a741-7e757fbd7e17\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.520402 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-internal-tls-certs\") pod \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.520458 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/add2a2e2-5553-4e43-8ddd-b8719949d531-logs\") pod \"add2a2e2-5553-4e43-8ddd-b8719949d531\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.520500 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-config-data\") pod \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.520555 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-config-data\") pod \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.520584 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-srcwm\" (UniqueName: \"kubernetes.io/projected/add2a2e2-5553-4e43-8ddd-b8719949d531-kube-api-access-srcwm\") pod \"add2a2e2-5553-4e43-8ddd-b8719949d531\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.520616 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-kolla-config\") pod \"d1ce97bd-1ff7-400e-a741-7e757fbd7e17\" (UID: \"d1ce97bd-1ff7-400e-a741-7e757fbd7e17\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.520655 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c3ae457-67ab-4c0b-a17e-a8264215793b-config-data\") pod \"6c3ae457-67ab-4c0b-a17e-a8264215793b\" (UID: \"6c3ae457-67ab-4c0b-a17e-a8264215793b\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.520686 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-config-data\") pod \"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43\" (UID: \"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.520714 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x24jh\" (UniqueName: \"kubernetes.io/projected/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-kube-api-access-x24jh\") pod \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.520762 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4q8sq\" (UniqueName: \"kubernetes.io/projected/6c3ae457-67ab-4c0b-a17e-a8264215793b-kube-api-access-4q8sq\") pod \"6c3ae457-67ab-4c0b-a17e-a8264215793b\" (UID: \"6c3ae457-67ab-4c0b-a17e-a8264215793b\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.520793 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-scripts\") pod \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.520829 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-combined-ca-bundle\") pod \"add2a2e2-5553-4e43-8ddd-b8719949d531\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.520854 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c3ae457-67ab-4c0b-a17e-a8264215793b-logs\") pod \"6c3ae457-67ab-4c0b-a17e-a8264215793b\" (UID: \"6c3ae457-67ab-4c0b-a17e-a8264215793b\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.520882 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-scripts\") pod \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.520922 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-scripts\") pod \"add2a2e2-5553-4e43-8ddd-b8719949d531\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.520965 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c3ae457-67ab-4c0b-a17e-a8264215793b-public-tls-certs\") pod \"6c3ae457-67ab-4c0b-a17e-a8264215793b\" (UID: \"6c3ae457-67ab-4c0b-a17e-a8264215793b\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.521016 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rg5xn\" (UniqueName: \"kubernetes.io/projected/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-kube-api-access-rg5xn\") pod \"d1ce97bd-1ff7-400e-a741-7e757fbd7e17\" (UID: \"d1ce97bd-1ff7-400e-a741-7e757fbd7e17\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.521071 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fr79l\" (UniqueName: \"kubernetes.io/projected/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-kube-api-access-fr79l\") pod \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.521098 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-config-data\") pod \"add2a2e2-5553-4e43-8ddd-b8719949d531\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.521128 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-combined-ca-bundle\") pod \"d1ce97bd-1ff7-400e-a741-7e757fbd7e17\" (UID: \"d1ce97bd-1ff7-400e-a741-7e757fbd7e17\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.521172 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-scripts\") pod \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.521196 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-public-tls-certs\") pod \"add2a2e2-5553-4e43-8ddd-b8719949d531\" (UID: \"add2a2e2-5553-4e43-8ddd-b8719949d531\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.521225 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-combined-ca-bundle\") pod \"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43\" (UID: \"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.521253 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m82zz\" (UniqueName: \"kubernetes.io/projected/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-kube-api-access-m82zz\") pod \"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43\" (UID: \"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.521276 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-logs\") pod \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.521304 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-public-tls-certs\") pod \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.521334 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-logs\") pod \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.521361 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-combined-ca-bundle\") pod \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.521385 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-memcached-tls-certs\") pod \"d1ce97bd-1ff7-400e-a741-7e757fbd7e17\" (UID: \"d1ce97bd-1ff7-400e-a741-7e757fbd7e17\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.521415 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-config-data\") pod \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.521458 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-logs\") pod \"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43\" (UID: \"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.521491 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-httpd-run\") pod \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.521522 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-nova-metadata-tls-certs\") pod \"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43\" (UID: \"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.521548 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.521576 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-public-tls-certs\") pod \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.521606 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-combined-ca-bundle\") pod \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\" (UID: \"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.521633 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-logs\") pod \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.521660 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-httpd-run\") pod \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\" (UID: \"6c5e9b39-91fa-4f4e-9d95-0599bc22472d\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.521717 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.521790 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "ba1bb9f6-5ca2-4986-9473-62d50d9bebf0" (UID: "ba1bb9f6-5ca2-4986-9473-62d50d9bebf0"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.521886 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z59db\" (UniqueName: \"kubernetes.io/projected/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-kube-api-access-z59db\") pod \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\" (UID: \"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.521931 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c3ae457-67ab-4c0b-a17e-a8264215793b-combined-ca-bundle\") pod \"6c3ae457-67ab-4c0b-a17e-a8264215793b\" (UID: \"6c3ae457-67ab-4c0b-a17e-a8264215793b\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.521960 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c3ae457-67ab-4c0b-a17e-a8264215793b-internal-tls-certs\") pod \"6c3ae457-67ab-4c0b-a17e-a8264215793b\" (UID: \"6c3ae457-67ab-4c0b-a17e-a8264215793b\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.522564 5050 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/94bbca67-2998-4727-bbe1-95d54277f4aa-kube-state-metrics-tls-config\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.522584 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94bbca67-2998-4727-bbe1-95d54277f4aa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.522599 5050 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.522612 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7mrbk\" (UniqueName: \"kubernetes.io/projected/94bbca67-2998-4727-bbe1-95d54277f4aa-kube-api-access-7mrbk\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.527126 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-logs" (OuterVolumeSpecName: "logs") pod "8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43" (UID: "8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.535891 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c3ae457-67ab-4c0b-a17e-a8264215793b-logs" (OuterVolumeSpecName: "logs") pod "6c3ae457-67ab-4c0b-a17e-a8264215793b" (UID: "6c3ae457-67ab-4c0b-a17e-a8264215793b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.536155 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322" (UID: "4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.537600 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "6c5e9b39-91fa-4f4e-9d95-0599bc22472d" (UID: "6c5e9b39-91fa-4f4e-9d95-0599bc22472d"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.537606 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-logs" (OuterVolumeSpecName: "logs") pod "6c5e9b39-91fa-4f4e-9d95-0599bc22472d" (UID: "6c5e9b39-91fa-4f4e-9d95-0599bc22472d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.538547 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/add2a2e2-5553-4e43-8ddd-b8719949d531-logs" (OuterVolumeSpecName: "logs") pod "add2a2e2-5553-4e43-8ddd-b8719949d531" (UID: "add2a2e2-5553-4e43-8ddd-b8719949d531"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.538958 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "d1ce97bd-1ff7-400e-a741-7e757fbd7e17" (UID: "d1ce97bd-1ff7-400e-a741-7e757fbd7e17"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.541726 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-config-data" (OuterVolumeSpecName: "config-data") pod "d1ce97bd-1ff7-400e-a741-7e757fbd7e17" (UID: "d1ce97bd-1ff7-400e-a741-7e757fbd7e17"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.543574 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-logs" (OuterVolumeSpecName: "logs") pod "ba1bb9f6-5ca2-4986-9473-62d50d9bebf0" (UID: "ba1bb9f6-5ca2-4986-9473-62d50d9bebf0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.543723 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-logs" (OuterVolumeSpecName: "logs") pod "4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322" (UID: "4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.566419 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-scripts" (OuterVolumeSpecName: "scripts") pod "4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322" (UID: "4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.568329 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322" (UID: "4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.568370 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-scripts" (OuterVolumeSpecName: "scripts") pod "ba1bb9f6-5ca2-4986-9473-62d50d9bebf0" (UID: "ba1bb9f6-5ca2-4986-9473-62d50d9bebf0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.568521 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-kube-api-access-fr79l" (OuterVolumeSpecName: "kube-api-access-fr79l") pod "ba1bb9f6-5ca2-4986-9473-62d50d9bebf0" (UID: "ba1bb9f6-5ca2-4986-9473-62d50d9bebf0"). InnerVolumeSpecName "kube-api-access-fr79l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.569917 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "ba1bb9f6-5ca2-4986-9473-62d50d9bebf0" (UID: "ba1bb9f6-5ca2-4986-9473-62d50d9bebf0"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.570183 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "glance") pod "6c5e9b39-91fa-4f4e-9d95-0599bc22472d" (UID: "6c5e9b39-91fa-4f4e-9d95-0599bc22472d"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.574696 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-kube-api-access-z59db" (OuterVolumeSpecName: "kube-api-access-z59db") pod "4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322" (UID: "4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322"). InnerVolumeSpecName "kube-api-access-z59db". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.575698 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-kube-api-access-x24jh" (OuterVolumeSpecName: "kube-api-access-x24jh") pod "6c5e9b39-91fa-4f4e-9d95-0599bc22472d" (UID: "6c5e9b39-91fa-4f4e-9d95-0599bc22472d"). InnerVolumeSpecName "kube-api-access-x24jh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.575721 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-scripts" (OuterVolumeSpecName: "scripts") pod "add2a2e2-5553-4e43-8ddd-b8719949d531" (UID: "add2a2e2-5553-4e43-8ddd-b8719949d531"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.575840 5050 scope.go:117] "RemoveContainer" containerID="561dbaa7317e13693d05c4f79f3b0d5138d3e140a5300d28891e26017d5bd81a" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.575709 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-scripts" (OuterVolumeSpecName: "scripts") pod "6c5e9b39-91fa-4f4e-9d95-0599bc22472d" (UID: "6c5e9b39-91fa-4f4e-9d95-0599bc22472d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.581702 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-kube-api-access-rg5xn" (OuterVolumeSpecName: "kube-api-access-rg5xn") pod "d1ce97bd-1ff7-400e-a741-7e757fbd7e17" (UID: "d1ce97bd-1ff7-400e-a741-7e757fbd7e17"). InnerVolumeSpecName "kube-api-access-rg5xn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.606429 5050 scope.go:117] "RemoveContainer" containerID="973cb826038668efa7e6c38aa0a38cd28308df3422e590a77aa204d04d4cf352" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.607340 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-kube-api-access-m82zz" (OuterVolumeSpecName: "kube-api-access-m82zz") pod "8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43" (UID: "8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43"). InnerVolumeSpecName "kube-api-access-m82zz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.608311 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/add2a2e2-5553-4e43-8ddd-b8719949d531-kube-api-access-srcwm" (OuterVolumeSpecName: "kube-api-access-srcwm") pod "add2a2e2-5553-4e43-8ddd-b8719949d531" (UID: "add2a2e2-5553-4e43-8ddd-b8719949d531"). InnerVolumeSpecName "kube-api-access-srcwm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.609772 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c3ae457-67ab-4c0b-a17e-a8264215793b-kube-api-access-4q8sq" (OuterVolumeSpecName: "kube-api-access-4q8sq") pod "6c3ae457-67ab-4c0b-a17e-a8264215793b" (UID: "6c3ae457-67ab-4c0b-a17e-a8264215793b"). InnerVolumeSpecName "kube-api-access-4q8sq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.623595 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f85034c8-6b79-42e1-849d-646a2ead1a93-logs\") pod \"f85034c8-6b79-42e1-849d-646a2ead1a93\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.623913 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-internal-tls-certs\") pod \"f85034c8-6b79-42e1-849d-646a2ead1a93\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.624050 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-public-tls-certs\") pod \"f85034c8-6b79-42e1-849d-646a2ead1a93\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.624127 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-config-data\") pod \"f85034c8-6b79-42e1-849d-646a2ead1a93\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.624331 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6v4d8\" (UniqueName: \"kubernetes.io/projected/f85034c8-6b79-42e1-849d-646a2ead1a93-kube-api-access-6v4d8\") pod \"f85034c8-6b79-42e1-849d-646a2ead1a93\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.624379 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-config-data-custom\") pod \"f85034c8-6b79-42e1-849d-646a2ead1a93\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.624414 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-combined-ca-bundle\") pod \"f85034c8-6b79-42e1-849d-646a2ead1a93\" (UID: \"f85034c8-6b79-42e1-849d-646a2ead1a93\") " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.625913 5050 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.625938 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.625951 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/add2a2e2-5553-4e43-8ddd-b8719949d531-logs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.625964 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-srcwm\" (UniqueName: \"kubernetes.io/projected/add2a2e2-5553-4e43-8ddd-b8719949d531-kube-api-access-srcwm\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.625976 5050 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.625986 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x24jh\" (UniqueName: \"kubernetes.io/projected/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-kube-api-access-x24jh\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.625996 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4q8sq\" (UniqueName: \"kubernetes.io/projected/6c3ae457-67ab-4c0b-a17e-a8264215793b-kube-api-access-4q8sq\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.626006 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.626016 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c3ae457-67ab-4c0b-a17e-a8264215793b-logs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.626026 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.626038 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.626047 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rg5xn\" (UniqueName: \"kubernetes.io/projected/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-kube-api-access-rg5xn\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.626059 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fr79l\" (UniqueName: \"kubernetes.io/projected/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-kube-api-access-fr79l\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.626070 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.626083 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m82zz\" (UniqueName: \"kubernetes.io/projected/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-kube-api-access-m82zz\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.626092 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-logs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.626100 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-logs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.626250 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-logs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.626264 5050 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.626287 5050 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.626297 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-logs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.626306 5050 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.626324 5050 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.626333 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z59db\" (UniqueName: \"kubernetes.io/projected/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-kube-api-access-z59db\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.628116 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f85034c8-6b79-42e1-849d-646a2ead1a93-logs" (OuterVolumeSpecName: "logs") pod "f85034c8-6b79-42e1-849d-646a2ead1a93" (UID: "f85034c8-6b79-42e1-849d-646a2ead1a93"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.663732 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "f85034c8-6b79-42e1-849d-646a2ead1a93" (UID: "f85034c8-6b79-42e1-849d-646a2ead1a93"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.675864 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f85034c8-6b79-42e1-849d-646a2ead1a93-kube-api-access-6v4d8" (OuterVolumeSpecName: "kube-api-access-6v4d8") pod "f85034c8-6b79-42e1-849d-646a2ead1a93" (UID: "f85034c8-6b79-42e1-849d-646a2ead1a93"). InnerVolumeSpecName "kube-api-access-6v4d8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.695418 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322" (UID: "4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.713068 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c3ae457-67ab-4c0b-a17e-a8264215793b-config-data" (OuterVolumeSpecName: "config-data") pod "6c3ae457-67ab-4c0b-a17e-a8264215793b" (UID: "6c3ae457-67ab-4c0b-a17e-a8264215793b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.729797 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6v4d8\" (UniqueName: \"kubernetes.io/projected/f85034c8-6b79-42e1-849d-646a2ead1a93-kube-api-access-6v4d8\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.729826 5050 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.729836 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c3ae457-67ab-4c0b-a17e-a8264215793b-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.729855 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f85034c8-6b79-42e1-849d-646a2ead1a93-logs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.729867 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.836400 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6c5e9b39-91fa-4f4e-9d95-0599bc22472d" (UID: "6c5e9b39-91fa-4f4e-9d95-0599bc22472d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.855086 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c3ae457-67ab-4c0b-a17e-a8264215793b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6c3ae457-67ab-4c0b-a17e-a8264215793b" (UID: "6c3ae457-67ab-4c0b-a17e-a8264215793b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.865556 5050 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.868713 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c3ae457-67ab-4c0b-a17e-a8264215793b-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "6c3ae457-67ab-4c0b-a17e-a8264215793b" (UID: "6c3ae457-67ab-4c0b-a17e-a8264215793b"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.871975 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43" (UID: "8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.874341 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "ba1bb9f6-5ca2-4986-9473-62d50d9bebf0" (UID: "ba1bb9f6-5ca2-4986-9473-62d50d9bebf0"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.883018 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f85034c8-6b79-42e1-849d-646a2ead1a93" (UID: "f85034c8-6b79-42e1-849d-646a2ead1a93"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.884553 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "ba1bb9f6-5ca2-4986-9473-62d50d9bebf0" (UID: "ba1bb9f6-5ca2-4986-9473-62d50d9bebf0"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.905949 5050 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.933304 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d1ce97bd-1ff7-400e-a741-7e757fbd7e17" (UID: "d1ce97bd-1ff7-400e-a741-7e757fbd7e17"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.935937 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.935972 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.935984 5050 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.935997 5050 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.936010 5050 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.936021 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c3ae457-67ab-4c0b-a17e-a8264215793b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.936032 5050 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c3ae457-67ab-4c0b-a17e-a8264215793b-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.936043 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.936055 5050 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.936067 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.938378 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322" (UID: "4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.939071 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ba1bb9f6-5ca2-4986-9473-62d50d9bebf0" (UID: "ba1bb9f6-5ca2-4986-9473-62d50d9bebf0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.948784 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94bbca67-2998-4727-bbe1-95d54277f4aa-kube-state-metrics-tls-certs" (OuterVolumeSpecName: "kube-state-metrics-tls-certs") pod "94bbca67-2998-4727-bbe1-95d54277f4aa" (UID: "94bbca67-2998-4727-bbe1-95d54277f4aa"). InnerVolumeSpecName "kube-state-metrics-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.952810 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapif81d-account-delete-xfdwn" event={"ID":"1bda1a38-71dd-4de4-8e16-b5159a08fdfa","Type":"ContainerStarted","Data":"dae7cb177dedffc9bba415c55b29716daa7fad33f3294e8262f68961a6e59713"} Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.954430 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "add2a2e2-5553-4e43-8ddd-b8719949d531" (UID: "add2a2e2-5553-4e43-8ddd-b8719949d531"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.955920 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ba1bb9f6-5ca2-4986-9473-62d50d9bebf0","Type":"ContainerDied","Data":"995a0dabf2d59b6a031197958997eafb205618dab2b1d1f6ccf0bf53bf2ddab8"} Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.956041 5050 scope.go:117] "RemoveContainer" containerID="08fc1f6beb7a82ec24f421d1df48b2d8f792f3248ae507344624f3d012bb743a" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.956245 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.960703 5050 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/novaapif81d-account-delete-xfdwn" secret="" err="secret \"galera-openstack-dockercfg-lrxnn\" not found" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.960737 5050 generic.go:334] "Generic (PLEG): container finished" podID="634f01f5-daae-4e5d-811f-5d406bfee9c1" containerID="2649b8f823f897f77f83477412dd9f8246b7b8e23f72d10846e06d96f7b4bff1" exitCode=0 Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.960813 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"634f01f5-daae-4e5d-811f-5d406bfee9c1","Type":"ContainerDied","Data":"2649b8f823f897f77f83477412dd9f8246b7b8e23f72d10846e06d96f7b4bff1"} Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.969879 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glancebb96-account-delete-qls8c" event={"ID":"13ecb6d2-cd50-4239-8939-6465176afd8e","Type":"ContainerStarted","Data":"f061470e15b4db94da38ba3328655821452437dec9ef88351bce72f0bc801736"} Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.969931 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glancebb96-account-delete-qls8c" podUID="13ecb6d2-cd50-4239-8939-6465176afd8e" containerName="mariadb-account-delete" containerID="cri-o://f061470e15b4db94da38ba3328655821452437dec9ef88351bce72f0bc801736" gracePeriod=30 Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.978584 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6c3ae457-67ab-4c0b-a17e-a8264215793b","Type":"ContainerDied","Data":"b9fab5ec2e83c0b4f3126e5998e522d339d6d799423ac29756bb22b80703041c"} Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.978602 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.980338 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/novaapif81d-account-delete-xfdwn" podStartSLOduration=6.980325894 podStartE2EDuration="6.980325894s" podCreationTimestamp="2025-11-23 15:04:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:04:28.968702697 +0000 UTC m=+1364.135699192" watchObservedRunningTime="2025-11-23 15:04:28.980325894 +0000 UTC m=+1364.147322379" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.982052 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.982679 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"d1ce97bd-1ff7-400e-a741-7e757fbd7e17","Type":"ContainerDied","Data":"bdf4f973d1ec653e97e4cd376f6610129add9f44484993c9530763194b55afac"} Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.989142 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-748fb7ccf6-s9qnq" event={"ID":"add2a2e2-5553-4e43-8ddd-b8719949d531","Type":"ContainerDied","Data":"e90aeb6bca5150b94a5a513a2cc4e2b3291efa5c98df658f3c65b5db5139a3c0"} Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.989222 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-748fb7ccf6-s9qnq" Nov 23 15:04:28 crc kubenswrapper[5050]: I1123 15:04:28.990183 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glancebb96-account-delete-qls8c" podStartSLOduration=7.99016916 podStartE2EDuration="7.99016916s" podCreationTimestamp="2025-11-23 15:04:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:04:28.985825588 +0000 UTC m=+1364.152822073" watchObservedRunningTime="2025-11-23 15:04:28.99016916 +0000 UTC m=+1364.157165635" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:28.999998 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-config-data" (OuterVolumeSpecName: "config-data") pod "6c5e9b39-91fa-4f4e-9d95-0599bc22472d" (UID: "6c5e9b39-91fa-4f4e-9d95-0599bc22472d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.002629 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-864fb4756c-xzxnp" event={"ID":"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0","Type":"ContainerDied","Data":"f28cd14e6a182d41c41c1de00a64ae8d66357f165c8c6bc2270a9f9a8b59604b"} Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.002727 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f28cd14e6a182d41c41c1de00a64ae8d66357f165c8c6bc2270a9f9a8b59604b" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.003573 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43" (UID: "8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.006883 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322","Type":"ContainerDied","Data":"695d9f05d4e86563b322744876bb859b745d0d12fe1faf2a90694532e039fc7f"} Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.006995 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.019998 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43","Type":"ContainerDied","Data":"871e395b9a5222660104bedabe687ac3c7c8de7bf3d7ac44d35069ee71894d4a"} Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.020154 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.024388 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "f85034c8-6b79-42e1-849d-646a2ead1a93" (UID: "f85034c8-6b79-42e1-849d-646a2ead1a93"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.024537 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6c5e9b39-91fa-4f4e-9d95-0599bc22472d","Type":"ContainerDied","Data":"ead3684cbd6eabb3234f8cced02e8a5b37dce04bd21bcbc733c85ff1de0b6f1c"} Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.024567 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.038041 5050 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.038072 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.038085 5050 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.038099 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.038110 5050 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/94bbca67-2998-4727-bbe1-95d54277f4aa-kube-state-metrics-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.038121 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.038130 5050 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: E1123 15:04:29.038786 5050 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 23 15:04:29 crc kubenswrapper[5050]: E1123 15:04:29.038850 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-config-data podName:5eff539e-c641-4873-aeae-450aaf0b4ac8 nodeName:}" failed. No retries permitted until 2025-11-23 15:04:37.038828096 +0000 UTC m=+1372.205824581 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-config-data") pod "rabbitmq-server-0" (UID: "5eff539e-c641-4873-aeae-450aaf0b4ac8") : configmap "rabbitmq-config-data" not found Nov 23 15:04:29 crc kubenswrapper[5050]: E1123 15:04:29.039329 5050 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 23 15:04:29 crc kubenswrapper[5050]: E1123 15:04:29.039421 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1bda1a38-71dd-4de4-8e16-b5159a08fdfa-operator-scripts podName:1bda1a38-71dd-4de4-8e16-b5159a08fdfa nodeName:}" failed. No retries permitted until 2025-11-23 15:04:29.539411712 +0000 UTC m=+1364.706408197 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/1bda1a38-71dd-4de4-8e16-b5159a08fdfa-operator-scripts") pod "novaapif81d-account-delete-xfdwn" (UID: "1bda1a38-71dd-4de4-8e16-b5159a08fdfa") : configmap "openstack-scripts" not found Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.040779 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"4d1fe4ad-9245-4af5-b378-c908ce72f08c","Type":"ContainerDied","Data":"62b396d3a144a1d490a54faf3ddd19396d3c2fc2bd4890fdb04f6dad65320ebe"} Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.040816 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="62b396d3a144a1d490a54faf3ddd19396d3c2fc2bd4890fdb04f6dad65320ebe" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.047588 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-77dc855c68-z488p" event={"ID":"f85034c8-6b79-42e1-849d-646a2ead1a93","Type":"ContainerDied","Data":"6a9518a45edf6d685eb5bb4729345f700a5d088b5886213d0be380bab0e90058"} Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.047638 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-77dc855c68-z488p" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.047866 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-config-data" (OuterVolumeSpecName: "config-data") pod "8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43" (UID: "8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.049397 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "f85034c8-6b79-42e1-849d-646a2ead1a93" (UID: "f85034c8-6b79-42e1-849d-646a2ead1a93"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.050850 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-config-data" (OuterVolumeSpecName: "config-data") pod "4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322" (UID: "4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.051415 5050 generic.go:334] "Generic (PLEG): container finished" podID="75e360ca-c84a-4806-a86a-86924a639cfc" containerID="dc18dcf10f3de24a9d22f69583ddef25bdb552099564e7a29e69ffc7a506a441" exitCode=0 Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.051477 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"75e360ca-c84a-4806-a86a-86924a639cfc","Type":"ContainerDied","Data":"dc18dcf10f3de24a9d22f69583ddef25bdb552099564e7a29e69ffc7a506a441"} Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.054846 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.054885 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell03052-account-delete-kz7k7" event={"ID":"3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89","Type":"ContainerStarted","Data":"f88295c1f1bb5ed5c59a421aaeb3b1e96a228d0eedd67e330915a293e3607e55"} Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.056368 5050 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/novacell03052-account-delete-kz7k7" secret="" err="secret \"galera-openstack-dockercfg-lrxnn\" not found" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.072391 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c3ae457-67ab-4c0b-a17e-a8264215793b-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "6c3ae457-67ab-4c0b-a17e-a8264215793b" (UID: "6c3ae457-67ab-4c0b-a17e-a8264215793b"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.086820 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/novacell03052-account-delete-kz7k7" podStartSLOduration=7.086800512 podStartE2EDuration="7.086800512s" podCreationTimestamp="2025-11-23 15:04:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:04:29.085911157 +0000 UTC m=+1364.252907642" watchObservedRunningTime="2025-11-23 15:04:29.086800512 +0000 UTC m=+1364.253796997" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.090373 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-memcached-tls-certs" (OuterVolumeSpecName: "memcached-tls-certs") pod "d1ce97bd-1ff7-400e-a741-7e757fbd7e17" (UID: "d1ce97bd-1ff7-400e-a741-7e757fbd7e17"). InnerVolumeSpecName "memcached-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.090526 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-config-data" (OuterVolumeSpecName: "config-data") pod "add2a2e2-5553-4e43-8ddd-b8719949d531" (UID: "add2a2e2-5553-4e43-8ddd-b8719949d531"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.101978 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "6c5e9b39-91fa-4f4e-9d95-0599bc22472d" (UID: "6c5e9b39-91fa-4f4e-9d95-0599bc22472d"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.102821 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-config-data" (OuterVolumeSpecName: "config-data") pod "f85034c8-6b79-42e1-849d-646a2ead1a93" (UID: "f85034c8-6b79-42e1-849d-646a2ead1a93"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.122520 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-config-data" (OuterVolumeSpecName: "config-data") pod "ba1bb9f6-5ca2-4986-9473-62d50d9bebf0" (UID: "ba1bb9f6-5ca2-4986-9473-62d50d9bebf0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.134757 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.140035 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-864fb4756c-xzxnp" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.142120 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "add2a2e2-5553-4e43-8ddd-b8719949d531" (UID: "add2a2e2-5553-4e43-8ddd-b8719949d531"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.144303 5050 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c3ae457-67ab-4c0b-a17e-a8264215793b-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.144494 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.144698 5050 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.144753 5050 reconciler_common.go:293] "Volume detached for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1ce97bd-1ff7-400e-a741-7e757fbd7e17-memcached-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.144800 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f85034c8-6b79-42e1-849d-646a2ead1a93-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.144871 5050 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.144942 5050 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c5e9b39-91fa-4f4e-9d95-0599bc22472d-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.145013 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.145093 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.145164 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: E1123 15:04:29.145323 5050 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 23 15:04:29 crc kubenswrapper[5050]: E1123 15:04:29.145504 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89-operator-scripts podName:3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89 nodeName:}" failed. No retries permitted until 2025-11-23 15:04:29.645415717 +0000 UTC m=+1364.812412202 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89-operator-scripts") pod "novacell03052-account-delete-kz7k7" (UID: "3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89") : configmap "openstack-scripts" not found Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.147938 5050 scope.go:117] "RemoveContainer" containerID="878b763faeb721f2171524629a6e423a6579824e5ba28a08437820030804f60f" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.159746 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.170762 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "add2a2e2-5553-4e43-8ddd-b8719949d531" (UID: "add2a2e2-5553-4e43-8ddd-b8719949d531"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.170822 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.198872 5050 scope.go:117] "RemoveContainer" containerID="11eb99c3ddd7ac1add778be8c07cea9de7ac571edec1bae0cd0e936ceb624706" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.248093 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-combined-ca-bundle\") pod \"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0\" (UID: \"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0\") " Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.248240 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d1fe4ad-9245-4af5-b378-c908ce72f08c-config-data\") pod \"4d1fe4ad-9245-4af5-b378-c908ce72f08c\" (UID: \"4d1fe4ad-9245-4af5-b378-c908ce72f08c\") " Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.248263 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gjxsg\" (UniqueName: \"kubernetes.io/projected/4d1fe4ad-9245-4af5-b378-c908ce72f08c-kube-api-access-gjxsg\") pod \"4d1fe4ad-9245-4af5-b378-c908ce72f08c\" (UID: \"4d1fe4ad-9245-4af5-b378-c908ce72f08c\") " Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.248301 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-config-data-custom\") pod \"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0\" (UID: \"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0\") " Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.248350 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-logs\") pod \"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0\" (UID: \"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0\") " Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.248408 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-config-data\") pod \"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0\" (UID: \"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0\") " Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.248454 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d1fe4ad-9245-4af5-b378-c908ce72f08c-combined-ca-bundle\") pod \"4d1fe4ad-9245-4af5-b378-c908ce72f08c\" (UID: \"4d1fe4ad-9245-4af5-b378-c908ce72f08c\") " Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.248553 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4q4gk\" (UniqueName: \"kubernetes.io/projected/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-kube-api-access-4q4gk\") pod \"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0\" (UID: \"042c506a-b65f-4cbd-9ca7-3df0ec55ffa0\") " Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.249305 5050 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/add2a2e2-5553-4e43-8ddd-b8719949d531-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.253496 5050 scope.go:117] "RemoveContainer" containerID="b0a2c2bac24d49f1139383209407c61098dfcccf1627c8ba79ec2b65ec146335" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.259495 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "042c506a-b65f-4cbd-9ca7-3df0ec55ffa0" (UID: "042c506a-b65f-4cbd-9ca7-3df0ec55ffa0"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.268658 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-kube-api-access-4q4gk" (OuterVolumeSpecName: "kube-api-access-4q4gk") pod "042c506a-b65f-4cbd-9ca7-3df0ec55ffa0" (UID: "042c506a-b65f-4cbd-9ca7-3df0ec55ffa0"). InnerVolumeSpecName "kube-api-access-4q4gk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.280738 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d1fe4ad-9245-4af5-b378-c908ce72f08c-kube-api-access-gjxsg" (OuterVolumeSpecName: "kube-api-access-gjxsg") pod "4d1fe4ad-9245-4af5-b378-c908ce72f08c" (UID: "4d1fe4ad-9245-4af5-b378-c908ce72f08c"). InnerVolumeSpecName "kube-api-access-gjxsg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.283015 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-logs" (OuterVolumeSpecName: "logs") pod "042c506a-b65f-4cbd-9ca7-3df0ec55ffa0" (UID: "042c506a-b65f-4cbd-9ca7-3df0ec55ffa0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.323152 5050 scope.go:117] "RemoveContainer" containerID="10d8f0ce4d54a26bf81a8c27901423a54c9049d1e6deff9e87bd78ed1978474b" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.374103 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.374280 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gjxsg\" (UniqueName: \"kubernetes.io/projected/4d1fe4ad-9245-4af5-b378-c908ce72f08c-kube-api-access-gjxsg\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.374658 5050 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.374677 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-logs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.374688 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4q4gk\" (UniqueName: \"kubernetes.io/projected/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-kube-api-access-4q4gk\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.374427 5050 scope.go:117] "RemoveContainer" containerID="ce2fe500a1bdad923f79c0d2d37ae5d7c546350b70f8148bfd61c5c68475e3f7" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.379211 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.380425 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d1fe4ad-9245-4af5-b378-c908ce72f08c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4d1fe4ad-9245-4af5-b378-c908ce72f08c" (UID: "4d1fe4ad-9245-4af5-b378-c908ce72f08c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.390697 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.406570 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-748fb7ccf6-s9qnq"] Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.417292 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-config-data" (OuterVolumeSpecName: "config-data") pod "042c506a-b65f-4cbd-9ca7-3df0ec55ffa0" (UID: "042c506a-b65f-4cbd-9ca7-3df0ec55ffa0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.417537 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d1fe4ad-9245-4af5-b378-c908ce72f08c-config-data" (OuterVolumeSpecName: "config-data") pod "4d1fe4ad-9245-4af5-b378-c908ce72f08c" (UID: "4d1fe4ad-9245-4af5-b378-c908ce72f08c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.445462 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-748fb7ccf6-s9qnq"] Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.447989 5050 scope.go:117] "RemoveContainer" containerID="e29903a818a042f29b3e4e52f5b9a3d3f2db969791fe5adce7bb96fbe4dca897" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.448173 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "042c506a-b65f-4cbd-9ca7-3df0ec55ffa0" (UID: "042c506a-b65f-4cbd-9ca7-3df0ec55ffa0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.449061 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder255c-account-delete-4znlv" Nov 23 15:04:29 crc kubenswrapper[5050]: E1123 15:04:29.478264 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e05743a215c5ada706341f4409a1a724e3949e0173a53be6eeb47dacec86519e is running failed: container process not found" containerID="e05743a215c5ada706341f4409a1a724e3949e0173a53be6eeb47dacec86519e" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 23 15:04:29 crc kubenswrapper[5050]: E1123 15:04:29.478829 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e05743a215c5ada706341f4409a1a724e3949e0173a53be6eeb47dacec86519e is running failed: container process not found" containerID="e05743a215c5ada706341f4409a1a724e3949e0173a53be6eeb47dacec86519e" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 23 15:04:29 crc kubenswrapper[5050]: E1123 15:04:29.479523 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e05743a215c5ada706341f4409a1a724e3949e0173a53be6eeb47dacec86519e is running failed: container process not found" containerID="e05743a215c5ada706341f4409a1a724e3949e0173a53be6eeb47dacec86519e" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 23 15:04:29 crc kubenswrapper[5050]: E1123 15:04:29.479560 5050 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e05743a215c5ada706341f4409a1a724e3949e0173a53be6eeb47dacec86519e is running failed: container process not found" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="0a256986-024f-45e6-9b86-b5a724ab5f6e" containerName="galera" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.479633 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/634f01f5-daae-4e5d-811f-5d406bfee9c1-config-data\") pod \"634f01f5-daae-4e5d-811f-5d406bfee9c1\" (UID: \"634f01f5-daae-4e5d-811f-5d406bfee9c1\") " Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.479694 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/634f01f5-daae-4e5d-811f-5d406bfee9c1-combined-ca-bundle\") pod \"634f01f5-daae-4e5d-811f-5d406bfee9c1\" (UID: \"634f01f5-daae-4e5d-811f-5d406bfee9c1\") " Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.479763 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-242qc\" (UniqueName: \"kubernetes.io/projected/634f01f5-daae-4e5d-811f-5d406bfee9c1-kube-api-access-242qc\") pod \"634f01f5-daae-4e5d-811f-5d406bfee9c1\" (UID: \"634f01f5-daae-4e5d-811f-5d406bfee9c1\") " Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.483668 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.483701 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d1fe4ad-9245-4af5-b378-c908ce72f08c-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.483712 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.483724 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d1fe4ad-9245-4af5-b378-c908ce72f08c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.488605 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/634f01f5-daae-4e5d-811f-5d406bfee9c1-kube-api-access-242qc" (OuterVolumeSpecName: "kube-api-access-242qc") pod "634f01f5-daae-4e5d-811f-5d406bfee9c1" (UID: "634f01f5-daae-4e5d-811f-5d406bfee9c1"). InnerVolumeSpecName "kube-api-access-242qc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.519811 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/634f01f5-daae-4e5d-811f-5d406bfee9c1-config-data" (OuterVolumeSpecName: "config-data") pod "634f01f5-daae-4e5d-811f-5d406bfee9c1" (UID: "634f01f5-daae-4e5d-811f-5d406bfee9c1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.529718 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/634f01f5-daae-4e5d-811f-5d406bfee9c1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "634f01f5-daae-4e5d-811f-5d406bfee9c1" (UID: "634f01f5-daae-4e5d-811f-5d406bfee9c1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.569813 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement734b-account-delete-zt8zp" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.571045 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.581647 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican6ffa-account-delete-7wsfp" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.587790 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7lbw7\" (UniqueName: \"kubernetes.io/projected/e78eb875-17e5-431f-82b5-14a1190488b0-kube-api-access-7lbw7\") pod \"e78eb875-17e5-431f-82b5-14a1190488b0\" (UID: \"e78eb875-17e5-431f-82b5-14a1190488b0\") " Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.587879 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e78eb875-17e5-431f-82b5-14a1190488b0-operator-scripts\") pod \"e78eb875-17e5-431f-82b5-14a1190488b0\" (UID: \"e78eb875-17e5-431f-82b5-14a1190488b0\") " Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.588601 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/634f01f5-daae-4e5d-811f-5d406bfee9c1-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.588617 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/634f01f5-daae-4e5d-811f-5d406bfee9c1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.588630 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-242qc\" (UniqueName: \"kubernetes.io/projected/634f01f5-daae-4e5d-811f-5d406bfee9c1-kube-api-access-242qc\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: E1123 15:04:29.588708 5050 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 23 15:04:29 crc kubenswrapper[5050]: E1123 15:04:29.588768 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1bda1a38-71dd-4de4-8e16-b5159a08fdfa-operator-scripts podName:1bda1a38-71dd-4de4-8e16-b5159a08fdfa nodeName:}" failed. No retries permitted until 2025-11-23 15:04:30.588748171 +0000 UTC m=+1365.755744656 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/1bda1a38-71dd-4de4-8e16-b5159a08fdfa-operator-scripts") pod "novaapif81d-account-delete-xfdwn" (UID: "1bda1a38-71dd-4de4-8e16-b5159a08fdfa") : configmap "openstack-scripts" not found Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.589149 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e78eb875-17e5-431f-82b5-14a1190488b0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e78eb875-17e5-431f-82b5-14a1190488b0" (UID: "e78eb875-17e5-431f-82b5-14a1190488b0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.603810 5050 scope.go:117] "RemoveContainer" containerID="63cf2a00b105f3566525e540e376dbc5f791f9441863630deaff0d4a49ab2c58" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.605335 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e78eb875-17e5-431f-82b5-14a1190488b0-kube-api-access-7lbw7" (OuterVolumeSpecName: "kube-api-access-7lbw7") pod "e78eb875-17e5-431f-82b5-14a1190488b0" (UID: "e78eb875-17e5-431f-82b5-14a1190488b0"). InnerVolumeSpecName "kube-api-access-7lbw7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.615397 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0dc7ce44-5e6c-4971-b829-54700cb6334f" path="/var/lib/kubelet/pods/0dc7ce44-5e6c-4971-b829-54700cb6334f/volumes" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.616038 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35ca6cdf-0126-49e9-a731-efa32a5e7edf" path="/var/lib/kubelet/pods/35ca6cdf-0126-49e9-a731-efa32a5e7edf/volumes" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.616579 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94bbca67-2998-4727-bbe1-95d54277f4aa" path="/var/lib/kubelet/pods/94bbca67-2998-4727-bbe1-95d54277f4aa/volumes" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.617665 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d53c4b5-d0a1-4191-9239-8c7b6806f860" path="/var/lib/kubelet/pods/9d53c4b5-d0a1-4191-9239-8c7b6806f860/volumes" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.618282 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="add2a2e2-5553-4e43-8ddd-b8719949d531" path="/var/lib/kubelet/pods/add2a2e2-5553-4e43-8ddd-b8719949d531/volumes" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.623782 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba1bb9f6-5ca2-4986-9473-62d50d9bebf0" path="/var/lib/kubelet/pods/ba1bb9f6-5ca2-4986-9473-62d50d9bebf0/volumes" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.624414 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1187c9d-5557-496e-be1d-8df301d6daa6" path="/var/lib/kubelet/pods/c1187c9d-5557-496e-be1d-8df301d6daa6/volumes" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.625707 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3d96734-832d-489e-9fed-a4eb705f41d7" path="/var/lib/kubelet/pods/d3d96734-832d-489e-9fed-a4eb705f41d7/volumes" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.626246 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7a21645-b633-4073-a971-c8896b6dbc06" path="/var/lib/kubelet/pods/e7a21645-b633-4073-a971-c8896b6dbc06/volumes" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.629690 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron45e2-account-delete-8sq6l" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.632345 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.633541 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/memcached-0"] Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.662770 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.670757 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.682012 5050 scope.go:117] "RemoveContainer" containerID="9f6dbbcaa59ff563c11d0cdfecbaa890da1c52e228d3d8a74b64182f665c6508" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.690241 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d3a2397-697f-4d1b-a5ed-0aaa1b202a12-operator-scripts\") pod \"3d3a2397-697f-4d1b-a5ed-0aaa1b202a12\" (UID: \"3d3a2397-697f-4d1b-a5ed-0aaa1b202a12\") " Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.690370 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6v5qt\" (UniqueName: \"kubernetes.io/projected/75e360ca-c84a-4806-a86a-86924a639cfc-kube-api-access-6v5qt\") pod \"75e360ca-c84a-4806-a86a-86924a639cfc\" (UID: \"75e360ca-c84a-4806-a86a-86924a639cfc\") " Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.691542 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b570632-90a0-4fcd-a067-9913b51ad711-operator-scripts\") pod \"4b570632-90a0-4fcd-a067-9913b51ad711\" (UID: \"4b570632-90a0-4fcd-a067-9913b51ad711\") " Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.691753 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75e360ca-c84a-4806-a86a-86924a639cfc-combined-ca-bundle\") pod \"75e360ca-c84a-4806-a86a-86924a639cfc\" (UID: \"75e360ca-c84a-4806-a86a-86924a639cfc\") " Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.691827 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75e360ca-c84a-4806-a86a-86924a639cfc-config-data\") pod \"75e360ca-c84a-4806-a86a-86924a639cfc\" (UID: \"75e360ca-c84a-4806-a86a-86924a639cfc\") " Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.691861 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pnf5n\" (UniqueName: \"kubernetes.io/projected/4b570632-90a0-4fcd-a067-9913b51ad711-kube-api-access-pnf5n\") pod \"4b570632-90a0-4fcd-a067-9913b51ad711\" (UID: \"4b570632-90a0-4fcd-a067-9913b51ad711\") " Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.691929 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g4lns\" (UniqueName: \"kubernetes.io/projected/3d3a2397-697f-4d1b-a5ed-0aaa1b202a12-kube-api-access-g4lns\") pod \"3d3a2397-697f-4d1b-a5ed-0aaa1b202a12\" (UID: \"3d3a2397-697f-4d1b-a5ed-0aaa1b202a12\") " Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.694397 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7lbw7\" (UniqueName: \"kubernetes.io/projected/e78eb875-17e5-431f-82b5-14a1190488b0-kube-api-access-7lbw7\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.694458 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e78eb875-17e5-431f-82b5-14a1190488b0-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.695255 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d3a2397-697f-4d1b-a5ed-0aaa1b202a12-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3d3a2397-697f-4d1b-a5ed-0aaa1b202a12" (UID: "3d3a2397-697f-4d1b-a5ed-0aaa1b202a12"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.695755 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b570632-90a0-4fcd-a067-9913b51ad711-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4b570632-90a0-4fcd-a067-9913b51ad711" (UID: "4b570632-90a0-4fcd-a067-9913b51ad711"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: E1123 15:04:29.695885 5050 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 23 15:04:29 crc kubenswrapper[5050]: E1123 15:04:29.695970 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89-operator-scripts podName:3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89 nodeName:}" failed. No retries permitted until 2025-11-23 15:04:30.69594115 +0000 UTC m=+1365.862937835 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89-operator-scripts") pod "novacell03052-account-delete-kz7k7" (UID: "3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89") : configmap "openstack-scripts" not found Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.702702 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75e360ca-c84a-4806-a86a-86924a639cfc-kube-api-access-6v5qt" (OuterVolumeSpecName: "kube-api-access-6v5qt") pod "75e360ca-c84a-4806-a86a-86924a639cfc" (UID: "75e360ca-c84a-4806-a86a-86924a639cfc"). InnerVolumeSpecName "kube-api-access-6v5qt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.704069 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.719001 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.722491 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b570632-90a0-4fcd-a067-9913b51ad711-kube-api-access-pnf5n" (OuterVolumeSpecName: "kube-api-access-pnf5n") pod "4b570632-90a0-4fcd-a067-9913b51ad711" (UID: "4b570632-90a0-4fcd-a067-9913b51ad711"). InnerVolumeSpecName "kube-api-access-pnf5n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.723930 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-77dc855c68-z488p"] Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.732378 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-77dc855c68-z488p"] Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.749684 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.752663 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75e360ca-c84a-4806-a86a-86924a639cfc-config-data" (OuterVolumeSpecName: "config-data") pod "75e360ca-c84a-4806-a86a-86924a639cfc" (UID: "75e360ca-c84a-4806-a86a-86924a639cfc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.755724 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d3a2397-697f-4d1b-a5ed-0aaa1b202a12-kube-api-access-g4lns" (OuterVolumeSpecName: "kube-api-access-g4lns") pod "3d3a2397-697f-4d1b-a5ed-0aaa1b202a12" (UID: "3d3a2397-697f-4d1b-a5ed-0aaa1b202a12"). InnerVolumeSpecName "kube-api-access-g4lns". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.761526 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.777466 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75e360ca-c84a-4806-a86a-86924a639cfc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "75e360ca-c84a-4806-a86a-86924a639cfc" (UID: "75e360ca-c84a-4806-a86a-86924a639cfc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.779611 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.788637 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.796115 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qkqpp\" (UniqueName: \"kubernetes.io/projected/9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931-kube-api-access-qkqpp\") pod \"9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931\" (UID: \"9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931\") " Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.796253 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931-operator-scripts\") pod \"9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931\" (UID: \"9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931\") " Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.797002 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b570632-90a0-4fcd-a067-9913b51ad711-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.797032 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75e360ca-c84a-4806-a86a-86924a639cfc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.797044 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75e360ca-c84a-4806-a86a-86924a639cfc-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.797057 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pnf5n\" (UniqueName: \"kubernetes.io/projected/4b570632-90a0-4fcd-a067-9913b51ad711-kube-api-access-pnf5n\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.797076 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g4lns\" (UniqueName: \"kubernetes.io/projected/3d3a2397-697f-4d1b-a5ed-0aaa1b202a12-kube-api-access-g4lns\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.797090 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d3a2397-697f-4d1b-a5ed-0aaa1b202a12-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.797101 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6v5qt\" (UniqueName: \"kubernetes.io/projected/75e360ca-c84a-4806-a86a-86924a639cfc-kube-api-access-6v5qt\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.797185 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931" (UID: "9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.802996 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931-kube-api-access-qkqpp" (OuterVolumeSpecName: "kube-api-access-qkqpp") pod "9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931" (UID: "9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931"). InnerVolumeSpecName "kube-api-access-qkqpp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.900141 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qkqpp\" (UniqueName: \"kubernetes.io/projected/9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931-kube-api-access-qkqpp\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:29 crc kubenswrapper[5050]: I1123 15:04:29.900624 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: E1123 15:04:30.002855 5050 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 23 15:04:30 crc kubenswrapper[5050]: E1123 15:04:30.002927 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-config-data podName:f92353db-5352-4216-ad2d-50242e58dfb7 nodeName:}" failed. No retries permitted until 2025-11-23 15:04:38.002911156 +0000 UTC m=+1373.169907641 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-config-data") pod "rabbitmq-cell1-server-0" (UID: "f92353db-5352-4216-ad2d-50242e58dfb7") : configmap "rabbitmq-cell1-config-data" not found Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.073627 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"75e360ca-c84a-4806-a86a-86924a639cfc","Type":"ContainerDied","Data":"db7d68dc7333fc40bc147bb79c9d5959119d3d682ba64ee4ad8ec86aa29faffc"} Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.073716 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.080937 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican6ffa-account-delete-7wsfp" event={"ID":"4b570632-90a0-4fcd-a067-9913b51ad711","Type":"ContainerDied","Data":"b90b14193dc4172d78e6891a4096b09ad12467704d70cbc1bef0055816bb921e"} Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.081049 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b90b14193dc4172d78e6891a4096b09ad12467704d70cbc1bef0055816bb921e" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.081144 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican6ffa-account-delete-7wsfp" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.099557 5050 generic.go:334] "Generic (PLEG): container finished" podID="0a256986-024f-45e6-9b86-b5a724ab5f6e" containerID="e05743a215c5ada706341f4409a1a724e3949e0173a53be6eeb47dacec86519e" exitCode=0 Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.099666 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"0a256986-024f-45e6-9b86-b5a724ab5f6e","Type":"ContainerDied","Data":"e05743a215c5ada706341f4409a1a724e3949e0173a53be6eeb47dacec86519e"} Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.099697 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"0a256986-024f-45e6-9b86-b5a724ab5f6e","Type":"ContainerDied","Data":"cc769eb507d83ac2fbe0b6d010133c9cf92815a976f80934d722bc938295d965"} Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.099712 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cc769eb507d83ac2fbe0b6d010133c9cf92815a976f80934d722bc938295d965" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.102591 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"634f01f5-daae-4e5d-811f-5d406bfee9c1","Type":"ContainerDied","Data":"f3294b8f7f62c5e00c386225e88f7200776a716d7da4fb46d5facdef8781a296"} Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.102618 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.107855 5050 generic.go:334] "Generic (PLEG): container finished" podID="5eff539e-c641-4873-aeae-450aaf0b4ac8" containerID="a9dbba1cde13781acfcbcff23b41231ce6338e634ff0c22cf2de26d62d3a3f34" exitCode=0 Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.107934 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5eff539e-c641-4873-aeae-450aaf0b4ac8","Type":"ContainerDied","Data":"a9dbba1cde13781acfcbcff23b41231ce6338e634ff0c22cf2de26d62d3a3f34"} Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.121191 5050 generic.go:334] "Generic (PLEG): container finished" podID="e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9" containerID="510cddb97927101d8a412932a0b83e3c82d63904833e599f58c30bcc8dd0541d" exitCode=0 Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.121336 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-ffbf89f58-t9kvk" event={"ID":"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9","Type":"ContainerDied","Data":"510cddb97927101d8a412932a0b83e3c82d63904833e599f58c30bcc8dd0541d"} Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.125783 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder255c-account-delete-4znlv" event={"ID":"e78eb875-17e5-431f-82b5-14a1190488b0","Type":"ContainerDied","Data":"a376d0ead1564a8504ad61643e1d45fd22031515ddaddc6c2c82994faf9e51b4"} Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.125815 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a376d0ead1564a8504ad61643e1d45fd22031515ddaddc6c2c82994faf9e51b4" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.125875 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder255c-account-delete-4znlv" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.132691 5050 generic.go:334] "Generic (PLEG): container finished" podID="f92353db-5352-4216-ad2d-50242e58dfb7" containerID="fbb98bc62f9119abace6a66eef302292172343ff03acadf76beb838abacc4705" exitCode=0 Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.132779 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f92353db-5352-4216-ad2d-50242e58dfb7","Type":"ContainerDied","Data":"fbb98bc62f9119abace6a66eef302292172343ff03acadf76beb838abacc4705"} Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.138671 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron45e2-account-delete-8sq6l" event={"ID":"9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931","Type":"ContainerDied","Data":"da2d7fab422dd01a4c5dd295b8674310420b84ecfd81a0d1cdb28d3cbddda867"} Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.138699 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="da2d7fab422dd01a4c5dd295b8674310420b84ecfd81a0d1cdb28d3cbddda867" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.138770 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron45e2-account-delete-8sq6l" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.144120 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement734b-account-delete-zt8zp" event={"ID":"3d3a2397-697f-4d1b-a5ed-0aaa1b202a12","Type":"ContainerDied","Data":"bf90f8d814c7f6ab836037c9af40b1c9c08c6873fea4b44fead56814d8fa63e2"} Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.144159 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bf90f8d814c7f6ab836037c9af40b1c9c08c6873fea4b44fead56814d8fa63e2" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.144228 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement734b-account-delete-zt8zp" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.169166 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.170986 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-864fb4756c-xzxnp" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.171503 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/novacell03052-account-delete-kz7k7" podUID="3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89" containerName="mariadb-account-delete" containerID="cri-o://f88295c1f1bb5ed5c59a421aaeb3b1e96a228d0eedd67e330915a293e3607e55" gracePeriod=30 Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.171647 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/novaapif81d-account-delete-xfdwn" podUID="1bda1a38-71dd-4de4-8e16-b5159a08fdfa" containerName="mariadb-account-delete" containerID="cri-o://dae7cb177dedffc9bba415c55b29716daa7fad33f3294e8262f68961a6e59713" gracePeriod=30 Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.191802 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.218852 5050 scope.go:117] "RemoveContainer" containerID="6f245a393065748303acdd84cc3863db9cf884ff2abd3737de04d10232960c78" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.264119 5050 scope.go:117] "RemoveContainer" containerID="bf046462bb12cdebcb5455c61d3b2ad24f5e4aea74396128b7021ca13a57dedf" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.281932 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement734b-account-delete-zt8zp"] Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.311484 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0a256986-024f-45e6-9b86-b5a724ab5f6e-config-data-default\") pod \"0a256986-024f-45e6-9b86-b5a724ab5f6e\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.311677 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a256986-024f-45e6-9b86-b5a724ab5f6e-galera-tls-certs\") pod \"0a256986-024f-45e6-9b86-b5a724ab5f6e\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.311754 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0a256986-024f-45e6-9b86-b5a724ab5f6e-config-data-generated\") pod \"0a256986-024f-45e6-9b86-b5a724ab5f6e\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.311809 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"0a256986-024f-45e6-9b86-b5a724ab5f6e\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.311873 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pmfpm\" (UniqueName: \"kubernetes.io/projected/0a256986-024f-45e6-9b86-b5a724ab5f6e-kube-api-access-pmfpm\") pod \"0a256986-024f-45e6-9b86-b5a724ab5f6e\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.311942 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a256986-024f-45e6-9b86-b5a724ab5f6e-combined-ca-bundle\") pod \"0a256986-024f-45e6-9b86-b5a724ab5f6e\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.311974 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0a256986-024f-45e6-9b86-b5a724ab5f6e-kolla-config\") pod \"0a256986-024f-45e6-9b86-b5a724ab5f6e\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.312026 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a256986-024f-45e6-9b86-b5a724ab5f6e-operator-scripts\") pod \"0a256986-024f-45e6-9b86-b5a724ab5f6e\" (UID: \"0a256986-024f-45e6-9b86-b5a724ab5f6e\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.312265 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a256986-024f-45e6-9b86-b5a724ab5f6e-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "0a256986-024f-45e6-9b86-b5a724ab5f6e" (UID: "0a256986-024f-45e6-9b86-b5a724ab5f6e"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.313172 5050 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0a256986-024f-45e6-9b86-b5a724ab5f6e-config-data-default\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.314152 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a256986-024f-45e6-9b86-b5a724ab5f6e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0a256986-024f-45e6-9b86-b5a724ab5f6e" (UID: "0a256986-024f-45e6-9b86-b5a724ab5f6e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.314829 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a256986-024f-45e6-9b86-b5a724ab5f6e-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "0a256986-024f-45e6-9b86-b5a724ab5f6e" (UID: "0a256986-024f-45e6-9b86-b5a724ab5f6e"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.316586 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a256986-024f-45e6-9b86-b5a724ab5f6e-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "0a256986-024f-45e6-9b86-b5a724ab5f6e" (UID: "0a256986-024f-45e6-9b86-b5a724ab5f6e"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.325679 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a256986-024f-45e6-9b86-b5a724ab5f6e-kube-api-access-pmfpm" (OuterVolumeSpecName: "kube-api-access-pmfpm") pod "0a256986-024f-45e6-9b86-b5a724ab5f6e" (UID: "0a256986-024f-45e6-9b86-b5a724ab5f6e"). InnerVolumeSpecName "kube-api-access-pmfpm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.350619 5050 scope.go:117] "RemoveContainer" containerID="afbf2c9723a3c51e2ef343739812462a5a83e6a2cd33bd51d95de96ab628994d" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.356362 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement734b-account-delete-zt8zp"] Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.360627 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "mysql-db") pod "0a256986-024f-45e6-9b86-b5a724ab5f6e" (UID: "0a256986-024f-45e6-9b86-b5a724ab5f6e"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.393115 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.403981 5050 scope.go:117] "RemoveContainer" containerID="29d8b6a8f6bcb0d52d1fb31bd328b23184faf043db74c7e0325581a2ae92bc41" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.412790 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.416967 5050 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0a256986-024f-45e6-9b86-b5a724ab5f6e-config-data-generated\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.417046 5050 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.417067 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pmfpm\" (UniqueName: \"kubernetes.io/projected/0a256986-024f-45e6-9b86-b5a724ab5f6e-kube-api-access-pmfpm\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.417081 5050 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0a256986-024f-45e6-9b86-b5a724ab5f6e-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.417091 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a256986-024f-45e6-9b86-b5a724ab5f6e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.420518 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a256986-024f-45e6-9b86-b5a724ab5f6e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0a256986-024f-45e6-9b86-b5a724ab5f6e" (UID: "0a256986-024f-45e6-9b86-b5a724ab5f6e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.427851 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican6ffa-account-delete-7wsfp"] Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.436206 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican6ffa-account-delete-7wsfp"] Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.439221 5050 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.442996 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron45e2-account-delete-8sq6l"] Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.445528 5050 scope.go:117] "RemoveContainer" containerID="ba962ea3ca9a2e41f992ef21c4cd5215480c219a40218ff8e9e9408b4eeec97b" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.449550 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron45e2-account-delete-8sq6l"] Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.492054 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.504546 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.509082 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a256986-024f-45e6-9b86-b5a724ab5f6e-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "0a256986-024f-45e6-9b86-b5a724ab5f6e" (UID: "0a256986-024f-45e6-9b86-b5a724ab5f6e"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.511348 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.520718 5050 scope.go:117] "RemoveContainer" containerID="ec06abb2a42fb30f410f9e5ebcf42f255dbea4392fc4eb791ab3e885ee8c6c15" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.530919 5050 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a256986-024f-45e6-9b86-b5a724ab5f6e-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.530966 5050 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.530976 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a256986-024f-45e6-9b86-b5a724ab5f6e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.531011 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.550738 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.563670 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-864fb4756c-xzxnp"] Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.581810 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-864fb4756c-xzxnp"] Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.607985 5050 scope.go:117] "RemoveContainer" containerID="dc18dcf10f3de24a9d22f69583ddef25bdb552099564e7a29e69ffc7a506a441" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.623668 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.639412 5050 scope.go:117] "RemoveContainer" containerID="2649b8f823f897f77f83477412dd9f8246b7b8e23f72d10846e06d96f7b4bff1" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.639431 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5eff539e-c641-4873-aeae-450aaf0b4ac8-rabbitmq-plugins\") pod \"5eff539e-c641-4873-aeae-450aaf0b4ac8\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.639793 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5eff539e-c641-4873-aeae-450aaf0b4ac8-erlang-cookie-secret\") pod \"5eff539e-c641-4873-aeae-450aaf0b4ac8\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.639838 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5eff539e-c641-4873-aeae-450aaf0b4ac8-rabbitmq-confd\") pod \"5eff539e-c641-4873-aeae-450aaf0b4ac8\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.640037 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5eff539e-c641-4873-aeae-450aaf0b4ac8-pod-info\") pod \"5eff539e-c641-4873-aeae-450aaf0b4ac8\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.640079 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n788x\" (UniqueName: \"kubernetes.io/projected/5eff539e-c641-4873-aeae-450aaf0b4ac8-kube-api-access-n788x\") pod \"5eff539e-c641-4873-aeae-450aaf0b4ac8\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.640241 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-plugins-conf\") pod \"5eff539e-c641-4873-aeae-450aaf0b4ac8\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.640412 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5eff539e-c641-4873-aeae-450aaf0b4ac8-rabbitmq-tls\") pod \"5eff539e-c641-4873-aeae-450aaf0b4ac8\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.640475 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5eff539e-c641-4873-aeae-450aaf0b4ac8-rabbitmq-erlang-cookie\") pod \"5eff539e-c641-4873-aeae-450aaf0b4ac8\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.640632 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"5eff539e-c641-4873-aeae-450aaf0b4ac8\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.640773 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-config-data\") pod \"5eff539e-c641-4873-aeae-450aaf0b4ac8\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.640795 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-server-conf\") pod \"5eff539e-c641-4873-aeae-450aaf0b4ac8\" (UID: \"5eff539e-c641-4873-aeae-450aaf0b4ac8\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.641162 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5eff539e-c641-4873-aeae-450aaf0b4ac8-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "5eff539e-c641-4873-aeae-450aaf0b4ac8" (UID: "5eff539e-c641-4873-aeae-450aaf0b4ac8"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.641422 5050 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5eff539e-c641-4873-aeae-450aaf0b4ac8-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.641437 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5eff539e-c641-4873-aeae-450aaf0b4ac8-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "5eff539e-c641-4873-aeae-450aaf0b4ac8" (UID: "5eff539e-c641-4873-aeae-450aaf0b4ac8"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: E1123 15:04:30.641866 5050 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 23 15:04:30 crc kubenswrapper[5050]: E1123 15:04:30.642017 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1bda1a38-71dd-4de4-8e16-b5159a08fdfa-operator-scripts podName:1bda1a38-71dd-4de4-8e16-b5159a08fdfa nodeName:}" failed. No retries permitted until 2025-11-23 15:04:32.641998974 +0000 UTC m=+1367.808995459 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/1bda1a38-71dd-4de4-8e16-b5159a08fdfa-operator-scripts") pod "novaapif81d-account-delete-xfdwn" (UID: "1bda1a38-71dd-4de4-8e16-b5159a08fdfa") : configmap "openstack-scripts" not found Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.647425 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "5eff539e-c641-4873-aeae-450aaf0b4ac8" (UID: "5eff539e-c641-4873-aeae-450aaf0b4ac8"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.651528 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.653533 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/5eff539e-c641-4873-aeae-450aaf0b4ac8-pod-info" (OuterVolumeSpecName: "pod-info") pod "5eff539e-c641-4873-aeae-450aaf0b4ac8" (UID: "5eff539e-c641-4873-aeae-450aaf0b4ac8"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.654812 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5eff539e-c641-4873-aeae-450aaf0b4ac8-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "5eff539e-c641-4873-aeae-450aaf0b4ac8" (UID: "5eff539e-c641-4873-aeae-450aaf0b4ac8"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.655559 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5eff539e-c641-4873-aeae-450aaf0b4ac8-kube-api-access-n788x" (OuterVolumeSpecName: "kube-api-access-n788x") pod "5eff539e-c641-4873-aeae-450aaf0b4ac8" (UID: "5eff539e-c641-4873-aeae-450aaf0b4ac8"). InnerVolumeSpecName "kube-api-access-n788x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.656499 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "persistence") pod "5eff539e-c641-4873-aeae-450aaf0b4ac8" (UID: "5eff539e-c641-4873-aeae-450aaf0b4ac8"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.672992 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5eff539e-c641-4873-aeae-450aaf0b4ac8-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "5eff539e-c641-4873-aeae-450aaf0b4ac8" (UID: "5eff539e-c641-4873-aeae-450aaf0b4ac8"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.716226 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-server-conf" (OuterVolumeSpecName: "server-conf") pod "5eff539e-c641-4873-aeae-450aaf0b4ac8" (UID: "5eff539e-c641-4873-aeae-450aaf0b4ac8"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.723174 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-config-data" (OuterVolumeSpecName: "config-data") pod "5eff539e-c641-4873-aeae-450aaf0b4ac8" (UID: "5eff539e-c641-4873-aeae-450aaf0b4ac8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.743120 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f92353db-5352-4216-ad2d-50242e58dfb7-erlang-cookie-secret\") pod \"f92353db-5352-4216-ad2d-50242e58dfb7\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.743262 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-fernet-keys\") pod \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.743307 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-config-data\") pod \"f92353db-5352-4216-ad2d-50242e58dfb7\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.743366 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-server-conf\") pod \"f92353db-5352-4216-ad2d-50242e58dfb7\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.743403 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f92353db-5352-4216-ad2d-50242e58dfb7-rabbitmq-plugins\") pod \"f92353db-5352-4216-ad2d-50242e58dfb7\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.743618 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-config-data\") pod \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.743675 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f92353db-5352-4216-ad2d-50242e58dfb7-rabbitmq-confd\") pod \"f92353db-5352-4216-ad2d-50242e58dfb7\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.743698 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-scripts\") pod \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.743750 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-plugins-conf\") pod \"f92353db-5352-4216-ad2d-50242e58dfb7\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.743810 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f92353db-5352-4216-ad2d-50242e58dfb7-rabbitmq-tls\") pod \"f92353db-5352-4216-ad2d-50242e58dfb7\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.743836 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-credential-keys\") pod \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.743877 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-internal-tls-certs\") pod \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.743902 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f92353db-5352-4216-ad2d-50242e58dfb7-rabbitmq-erlang-cookie\") pod \"f92353db-5352-4216-ad2d-50242e58dfb7\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.743949 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-public-tls-certs\") pod \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.743981 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p4p5t\" (UniqueName: \"kubernetes.io/projected/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-kube-api-access-p4p5t\") pod \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.744028 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q7vgm\" (UniqueName: \"kubernetes.io/projected/f92353db-5352-4216-ad2d-50242e58dfb7-kube-api-access-q7vgm\") pod \"f92353db-5352-4216-ad2d-50242e58dfb7\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.744122 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f92353db-5352-4216-ad2d-50242e58dfb7-pod-info\") pod \"f92353db-5352-4216-ad2d-50242e58dfb7\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.744193 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-combined-ca-bundle\") pod \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\" (UID: \"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.744223 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"f92353db-5352-4216-ad2d-50242e58dfb7\" (UID: \"f92353db-5352-4216-ad2d-50242e58dfb7\") " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.744742 5050 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5eff539e-c641-4873-aeae-450aaf0b4ac8-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.744761 5050 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5eff539e-c641-4873-aeae-450aaf0b4ac8-pod-info\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.744772 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n788x\" (UniqueName: \"kubernetes.io/projected/5eff539e-c641-4873-aeae-450aaf0b4ac8-kube-api-access-n788x\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.744798 5050 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.744808 5050 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5eff539e-c641-4873-aeae-450aaf0b4ac8-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.744829 5050 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.744839 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.744844 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "f92353db-5352-4216-ad2d-50242e58dfb7" (UID: "f92353db-5352-4216-ad2d-50242e58dfb7"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.744869 5050 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5eff539e-c641-4873-aeae-450aaf0b4ac8-server-conf\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.744881 5050 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5eff539e-c641-4873-aeae-450aaf0b4ac8-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.747564 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f92353db-5352-4216-ad2d-50242e58dfb7-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "f92353db-5352-4216-ad2d-50242e58dfb7" (UID: "f92353db-5352-4216-ad2d-50242e58dfb7"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: E1123 15:04:30.749490 5050 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 23 15:04:30 crc kubenswrapper[5050]: E1123 15:04:30.749580 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89-operator-scripts podName:3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89 nodeName:}" failed. No retries permitted until 2025-11-23 15:04:32.749557263 +0000 UTC m=+1367.916553738 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89-operator-scripts") pod "novacell03052-account-delete-kz7k7" (UID: "3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89") : configmap "openstack-scripts" not found Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.750526 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f92353db-5352-4216-ad2d-50242e58dfb7-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "f92353db-5352-4216-ad2d-50242e58dfb7" (UID: "f92353db-5352-4216-ad2d-50242e58dfb7"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.759524 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-scripts" (OuterVolumeSpecName: "scripts") pod "e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9" (UID: "e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.761292 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f92353db-5352-4216-ad2d-50242e58dfb7-kube-api-access-q7vgm" (OuterVolumeSpecName: "kube-api-access-q7vgm") pod "f92353db-5352-4216-ad2d-50242e58dfb7" (UID: "f92353db-5352-4216-ad2d-50242e58dfb7"). InnerVolumeSpecName "kube-api-access-q7vgm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.761319 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9" (UID: "e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.761423 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "persistence") pod "f92353db-5352-4216-ad2d-50242e58dfb7" (UID: "f92353db-5352-4216-ad2d-50242e58dfb7"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.761587 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f92353db-5352-4216-ad2d-50242e58dfb7-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "f92353db-5352-4216-ad2d-50242e58dfb7" (UID: "f92353db-5352-4216-ad2d-50242e58dfb7"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.761955 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9" (UID: "e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.762765 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-kube-api-access-p4p5t" (OuterVolumeSpecName: "kube-api-access-p4p5t") pod "e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9" (UID: "e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9"). InnerVolumeSpecName "kube-api-access-p4p5t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.767626 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f92353db-5352-4216-ad2d-50242e58dfb7-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "f92353db-5352-4216-ad2d-50242e58dfb7" (UID: "f92353db-5352-4216-ad2d-50242e58dfb7"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.785244 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/f92353db-5352-4216-ad2d-50242e58dfb7-pod-info" (OuterVolumeSpecName: "pod-info") pod "f92353db-5352-4216-ad2d-50242e58dfb7" (UID: "f92353db-5352-4216-ad2d-50242e58dfb7"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.808686 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9" (UID: "e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.809687 5050 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.814082 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5eff539e-c641-4873-aeae-450aaf0b4ac8-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "5eff539e-c641-4873-aeae-450aaf0b4ac8" (UID: "5eff539e-c641-4873-aeae-450aaf0b4ac8"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.818613 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-config-data" (OuterVolumeSpecName: "config-data") pod "f92353db-5352-4216-ad2d-50242e58dfb7" (UID: "f92353db-5352-4216-ad2d-50242e58dfb7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.821993 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-server-conf" (OuterVolumeSpecName: "server-conf") pod "f92353db-5352-4216-ad2d-50242e58dfb7" (UID: "f92353db-5352-4216-ad2d-50242e58dfb7"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.831619 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-config-data" (OuterVolumeSpecName: "config-data") pod "e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9" (UID: "e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.832333 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9" (UID: "e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.838499 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9" (UID: "e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.847810 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.847842 5050 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.847858 5050 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f92353db-5352-4216-ad2d-50242e58dfb7-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.847870 5050 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.847882 5050 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.847895 5050 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f92353db-5352-4216-ad2d-50242e58dfb7-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.847909 5050 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.847922 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p4p5t\" (UniqueName: \"kubernetes.io/projected/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-kube-api-access-p4p5t\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.847936 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q7vgm\" (UniqueName: \"kubernetes.io/projected/f92353db-5352-4216-ad2d-50242e58dfb7-kube-api-access-q7vgm\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.847948 5050 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.847959 5050 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f92353db-5352-4216-ad2d-50242e58dfb7-pod-info\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.847971 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.848017 5050 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.848350 5050 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f92353db-5352-4216-ad2d-50242e58dfb7-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.848365 5050 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.848374 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.848384 5050 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f92353db-5352-4216-ad2d-50242e58dfb7-server-conf\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.848397 5050 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f92353db-5352-4216-ad2d-50242e58dfb7-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.848411 5050 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5eff539e-c641-4873-aeae-450aaf0b4ac8-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.848423 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.867142 5050 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 23 15:04:30 crc kubenswrapper[5050]: E1123 15:04:30.875422 5050 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Nov 23 15:04:30 crc kubenswrapper[5050]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2025-11-23T15:04:23Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 23 15:04:30 crc kubenswrapper[5050]: /etc/init.d/functions: line 589: 414 Alarm clock "$@" Nov 23 15:04:30 crc kubenswrapper[5050]: > execCommand=["/usr/share/ovn/scripts/ovn-ctl","stop_controller"] containerName="ovn-controller" pod="openstack/ovn-controller-tbww8" message=< Nov 23 15:04:30 crc kubenswrapper[5050]: Exiting ovn-controller (1) [FAILED] Nov 23 15:04:30 crc kubenswrapper[5050]: Killing ovn-controller (1) [ OK ] Nov 23 15:04:30 crc kubenswrapper[5050]: Killing ovn-controller (1) with SIGKILL [ OK ] Nov 23 15:04:30 crc kubenswrapper[5050]: 2025-11-23T15:04:23Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 23 15:04:30 crc kubenswrapper[5050]: /etc/init.d/functions: line 589: 414 Alarm clock "$@" Nov 23 15:04:30 crc kubenswrapper[5050]: > Nov 23 15:04:30 crc kubenswrapper[5050]: E1123 15:04:30.875511 5050 kuberuntime_container.go:691] "PreStop hook failed" err=< Nov 23 15:04:30 crc kubenswrapper[5050]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2025-11-23T15:04:23Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 23 15:04:30 crc kubenswrapper[5050]: /etc/init.d/functions: line 589: 414 Alarm clock "$@" Nov 23 15:04:30 crc kubenswrapper[5050]: > pod="openstack/ovn-controller-tbww8" podUID="33af3c2e-dea7-4448-8b28-b579d77490b9" containerName="ovn-controller" containerID="cri-o://849e41c94f217d7fbbec3b38946489dc46e00bb66fcfcb08151c2e3f0587eb24" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.875573 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-tbww8" podUID="33af3c2e-dea7-4448-8b28-b579d77490b9" containerName="ovn-controller" containerID="cri-o://849e41c94f217d7fbbec3b38946489dc46e00bb66fcfcb08151c2e3f0587eb24" gracePeriod=22 Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.903229 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f92353db-5352-4216-ad2d-50242e58dfb7-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "f92353db-5352-4216-ad2d-50242e58dfb7" (UID: "f92353db-5352-4216-ad2d-50242e58dfb7"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.951415 5050 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:30 crc kubenswrapper[5050]: I1123 15:04:30.951479 5050 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f92353db-5352-4216-ad2d-50242e58dfb7-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.195080 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-ffbf89f58-t9kvk" event={"ID":"e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9","Type":"ContainerDied","Data":"eb57e605f9b80957edce5518a5e62e697e480f0cb1c6f297b41656e2d021e40c"} Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.195132 5050 scope.go:117] "RemoveContainer" containerID="510cddb97927101d8a412932a0b83e3c82d63904833e599f58c30bcc8dd0541d" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.195230 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-ffbf89f58-t9kvk" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.234485 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-vsf8d"] Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.243729 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-vsf8d"] Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.256792 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-ffbf89f58-t9kvk"] Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.264489 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-ffbf89f58-t9kvk"] Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.267007 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f92353db-5352-4216-ad2d-50242e58dfb7","Type":"ContainerDied","Data":"3acf3f6a01bc622686debb83e7eda59f45ddb4da24b93ae3a1ab1bb2601f52b5"} Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.267114 5050 scope.go:117] "RemoveContainer" containerID="fbb98bc62f9119abace6a66eef302292172343ff03acadf76beb838abacc4705" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.267942 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.277566 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5eff539e-c641-4873-aeae-450aaf0b4ac8","Type":"ContainerDied","Data":"dde128d8d93aaff6ac1a522f7aa4b2c60615a85aeddbadd7ab21a071bac2480a"} Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.277686 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.290046 5050 scope.go:117] "RemoveContainer" containerID="8fa79fa4d3da8bc1f764d8d40be5053a2b08532c54d8bfc67df9434fce12f626" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.293789 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-tbww8_33af3c2e-dea7-4448-8b28-b579d77490b9/ovn-controller/0.log" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.293898 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-tbww8" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.303993 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder255c-account-delete-4znlv"] Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.305209 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-tbww8_33af3c2e-dea7-4448-8b28-b579d77490b9/ovn-controller/0.log" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.305270 5050 generic.go:334] "Generic (PLEG): container finished" podID="33af3c2e-dea7-4448-8b28-b579d77490b9" containerID="849e41c94f217d7fbbec3b38946489dc46e00bb66fcfcb08151c2e3f0587eb24" exitCode=137 Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.305381 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.305356 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-tbww8" event={"ID":"33af3c2e-dea7-4448-8b28-b579d77490b9","Type":"ContainerDied","Data":"849e41c94f217d7fbbec3b38946489dc46e00bb66fcfcb08151c2e3f0587eb24"} Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.322109 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-6b854f87-drlhc" podUID="c1187c9d-5557-496e-be1d-8df301d6daa6" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.168:8080/healthcheck\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.322334 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-6b854f87-drlhc" podUID="c1187c9d-5557-496e-be1d-8df301d6daa6" containerName="proxy-server" probeResult="failure" output="Get \"https://10.217.0.168:8080/healthcheck\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.336345 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder255c-account-delete-4znlv"] Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.343176 5050 scope.go:117] "RemoveContainer" containerID="a9dbba1cde13781acfcbcff23b41231ce6338e634ff0c22cf2de26d62d3a3f34" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.358516 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-255c-account-create-9gdvk"] Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.385945 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-255c-account-create-9gdvk"] Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.390418 5050 scope.go:117] "RemoveContainer" containerID="e92ae2c79d385eac2b74e01fa6c265a4e1892dff4fe0ad87c1b8d47c3cad30bd" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.404588 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.427420 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.438873 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.449786 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.461030 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.463969 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2vh8k\" (UniqueName: \"kubernetes.io/projected/33af3c2e-dea7-4448-8b28-b579d77490b9-kube-api-access-2vh8k\") pod \"33af3c2e-dea7-4448-8b28-b579d77490b9\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.464047 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33af3c2e-dea7-4448-8b28-b579d77490b9-combined-ca-bundle\") pod \"33af3c2e-dea7-4448-8b28-b579d77490b9\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.465013 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/33af3c2e-dea7-4448-8b28-b579d77490b9-ovn-controller-tls-certs\") pod \"33af3c2e-dea7-4448-8b28-b579d77490b9\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.465135 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/33af3c2e-dea7-4448-8b28-b579d77490b9-var-log-ovn\") pod \"33af3c2e-dea7-4448-8b28-b579d77490b9\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.465195 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/33af3c2e-dea7-4448-8b28-b579d77490b9-scripts\") pod \"33af3c2e-dea7-4448-8b28-b579d77490b9\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.465867 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/33af3c2e-dea7-4448-8b28-b579d77490b9-var-run-ovn\") pod \"33af3c2e-dea7-4448-8b28-b579d77490b9\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.465904 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/33af3c2e-dea7-4448-8b28-b579d77490b9-var-run\") pod \"33af3c2e-dea7-4448-8b28-b579d77490b9\" (UID: \"33af3c2e-dea7-4448-8b28-b579d77490b9\") " Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.466912 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/33af3c2e-dea7-4448-8b28-b579d77490b9-var-run" (OuterVolumeSpecName: "var-run") pod "33af3c2e-dea7-4448-8b28-b579d77490b9" (UID: "33af3c2e-dea7-4448-8b28-b579d77490b9"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.466964 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/33af3c2e-dea7-4448-8b28-b579d77490b9-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "33af3c2e-dea7-4448-8b28-b579d77490b9" (UID: "33af3c2e-dea7-4448-8b28-b579d77490b9"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.467629 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/33af3c2e-dea7-4448-8b28-b579d77490b9-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "33af3c2e-dea7-4448-8b28-b579d77490b9" (UID: "33af3c2e-dea7-4448-8b28-b579d77490b9"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.467963 5050 scope.go:117] "RemoveContainer" containerID="849e41c94f217d7fbbec3b38946489dc46e00bb66fcfcb08151c2e3f0587eb24" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.470081 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33af3c2e-dea7-4448-8b28-b579d77490b9-scripts" (OuterVolumeSpecName: "scripts") pod "33af3c2e-dea7-4448-8b28-b579d77490b9" (UID: "33af3c2e-dea7-4448-8b28-b579d77490b9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.486877 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33af3c2e-dea7-4448-8b28-b579d77490b9-kube-api-access-2vh8k" (OuterVolumeSpecName: "kube-api-access-2vh8k") pod "33af3c2e-dea7-4448-8b28-b579d77490b9" (UID: "33af3c2e-dea7-4448-8b28-b579d77490b9"). InnerVolumeSpecName "kube-api-access-2vh8k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.490518 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-galera-0"] Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.526217 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33af3c2e-dea7-4448-8b28-b579d77490b9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "33af3c2e-dea7-4448-8b28-b579d77490b9" (UID: "33af3c2e-dea7-4448-8b28-b579d77490b9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.562917 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33af3c2e-dea7-4448-8b28-b579d77490b9-ovn-controller-tls-certs" (OuterVolumeSpecName: "ovn-controller-tls-certs") pod "33af3c2e-dea7-4448-8b28-b579d77490b9" (UID: "33af3c2e-dea7-4448-8b28-b579d77490b9"). InnerVolumeSpecName "ovn-controller-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.566237 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="042c506a-b65f-4cbd-9ca7-3df0ec55ffa0" path="/var/lib/kubelet/pods/042c506a-b65f-4cbd-9ca7-3df0ec55ffa0/volumes" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.567893 5050 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/33af3c2e-dea7-4448-8b28-b579d77490b9-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.567978 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/33af3c2e-dea7-4448-8b28-b579d77490b9-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.568032 5050 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/33af3c2e-dea7-4448-8b28-b579d77490b9-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.568095 5050 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/33af3c2e-dea7-4448-8b28-b579d77490b9-var-run\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.568176 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2vh8k\" (UniqueName: \"kubernetes.io/projected/33af3c2e-dea7-4448-8b28-b579d77490b9-kube-api-access-2vh8k\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.568231 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33af3c2e-dea7-4448-8b28-b579d77490b9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.568284 5050 reconciler_common.go:293] "Volume detached for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/33af3c2e-dea7-4448-8b28-b579d77490b9-ovn-controller-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.569001 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a256986-024f-45e6-9b86-b5a724ab5f6e" path="/var/lib/kubelet/pods/0a256986-024f-45e6-9b86-b5a724ab5f6e/volumes" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.569772 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d3a2397-697f-4d1b-a5ed-0aaa1b202a12" path="/var/lib/kubelet/pods/3d3a2397-697f-4d1b-a5ed-0aaa1b202a12/volumes" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.570419 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b570632-90a0-4fcd-a067-9913b51ad711" path="/var/lib/kubelet/pods/4b570632-90a0-4fcd-a067-9913b51ad711/volumes" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.572076 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d1fe4ad-9245-4af5-b378-c908ce72f08c" path="/var/lib/kubelet/pods/4d1fe4ad-9245-4af5-b378-c908ce72f08c/volumes" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.572855 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322" path="/var/lib/kubelet/pods/4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322/volumes" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.574215 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5eff539e-c641-4873-aeae-450aaf0b4ac8" path="/var/lib/kubelet/pods/5eff539e-c641-4873-aeae-450aaf0b4ac8/volumes" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.575350 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="634f01f5-daae-4e5d-811f-5d406bfee9c1" path="/var/lib/kubelet/pods/634f01f5-daae-4e5d-811f-5d406bfee9c1/volumes" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.576122 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c3ae457-67ab-4c0b-a17e-a8264215793b" path="/var/lib/kubelet/pods/6c3ae457-67ab-4c0b-a17e-a8264215793b/volumes" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.577351 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c5e9b39-91fa-4f4e-9d95-0599bc22472d" path="/var/lib/kubelet/pods/6c5e9b39-91fa-4f4e-9d95-0599bc22472d/volumes" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.578146 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="75e360ca-c84a-4806-a86a-86924a639cfc" path="/var/lib/kubelet/pods/75e360ca-c84a-4806-a86a-86924a639cfc/volumes" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.578861 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f33bb25-e6da-43a1-8afd-08a47636b172" path="/var/lib/kubelet/pods/7f33bb25-e6da-43a1-8afd-08a47636b172/volumes" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.579997 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43" path="/var/lib/kubelet/pods/8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43/volumes" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.580660 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931" path="/var/lib/kubelet/pods/9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931/volumes" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.581289 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1ce97bd-1ff7-400e-a741-7e757fbd7e17" path="/var/lib/kubelet/pods/d1ce97bd-1ff7-400e-a741-7e757fbd7e17/volumes" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.581854 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9" path="/var/lib/kubelet/pods/e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9/volumes" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.582915 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e78eb875-17e5-431f-82b5-14a1190488b0" path="/var/lib/kubelet/pods/e78eb875-17e5-431f-82b5-14a1190488b0/volumes" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.583514 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9b2ddd2-95ef-4086-b1dc-b13acef90cb5" path="/var/lib/kubelet/pods/e9b2ddd2-95ef-4086-b1dc-b13acef90cb5/volumes" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.584735 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85034c8-6b79-42e1-849d-646a2ead1a93" path="/var/lib/kubelet/pods/f85034c8-6b79-42e1-849d-646a2ead1a93/volumes" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.586863 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f92353db-5352-4216-ad2d-50242e58dfb7" path="/var/lib/kubelet/pods/f92353db-5352-4216-ad2d-50242e58dfb7/volumes" Nov 23 15:04:31 crc kubenswrapper[5050]: E1123 15:04:31.862997 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8 is running failed: container process not found" containerID="1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 23 15:04:31 crc kubenswrapper[5050]: E1123 15:04:31.863564 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8 is running failed: container process not found" containerID="1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 23 15:04:31 crc kubenswrapper[5050]: E1123 15:04:31.863921 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8 is running failed: container process not found" containerID="1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 23 15:04:31 crc kubenswrapper[5050]: E1123 15:04:31.864155 5050 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-n9v8v" podUID="c21366ac-a7da-4cac-8332-753659210595" containerName="ovsdb-server" Nov 23 15:04:31 crc kubenswrapper[5050]: E1123 15:04:31.865470 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a0820261b299cc33db42946f12f1f4467f892f1598cff4f933ab45def65d8af9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 23 15:04:31 crc kubenswrapper[5050]: E1123 15:04:31.867815 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a0820261b299cc33db42946f12f1f4467f892f1598cff4f933ab45def65d8af9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 23 15:04:31 crc kubenswrapper[5050]: E1123 15:04:31.869272 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a0820261b299cc33db42946f12f1f4467f892f1598cff4f933ab45def65d8af9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 23 15:04:31 crc kubenswrapper[5050]: E1123 15:04:31.869324 5050 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-n9v8v" podUID="c21366ac-a7da-4cac-8332-753659210595" containerName="ovs-vswitchd" Nov 23 15:04:31 crc kubenswrapper[5050]: I1123 15:04:31.966611 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.081610 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-config-data\") pod \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\" (UID: \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\") " Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.081836 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-combined-ca-bundle\") pod \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\" (UID: \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\") " Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.081871 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-config-data-custom\") pod \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\" (UID: \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\") " Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.081906 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p5sqc\" (UniqueName: \"kubernetes.io/projected/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-kube-api-access-p5sqc\") pod \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\" (UID: \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\") " Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.081951 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-etc-machine-id\") pod \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\" (UID: \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\") " Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.082371 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-scripts\") pod \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\" (UID: \"ba4a4a57-47b0-423d-8bb1-76953fb3a37b\") " Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.083310 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "ba4a4a57-47b0-423d-8bb1-76953fb3a37b" (UID: "ba4a4a57-47b0-423d-8bb1-76953fb3a37b"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.097874 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-kube-api-access-p5sqc" (OuterVolumeSpecName: "kube-api-access-p5sqc") pod "ba4a4a57-47b0-423d-8bb1-76953fb3a37b" (UID: "ba4a4a57-47b0-423d-8bb1-76953fb3a37b"). InnerVolumeSpecName "kube-api-access-p5sqc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.120090 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-scripts" (OuterVolumeSpecName: "scripts") pod "ba4a4a57-47b0-423d-8bb1-76953fb3a37b" (UID: "ba4a4a57-47b0-423d-8bb1-76953fb3a37b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.120400 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "ba4a4a57-47b0-423d-8bb1-76953fb3a37b" (UID: "ba4a4a57-47b0-423d-8bb1-76953fb3a37b"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.160344 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ba4a4a57-47b0-423d-8bb1-76953fb3a37b" (UID: "ba4a4a57-47b0-423d-8bb1-76953fb3a37b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.182880 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-config-data" (OuterVolumeSpecName: "config-data") pod "ba4a4a57-47b0-423d-8bb1-76953fb3a37b" (UID: "ba4a4a57-47b0-423d-8bb1-76953fb3a37b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.185178 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.185218 5050 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.185228 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p5sqc\" (UniqueName: \"kubernetes.io/projected/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-kube-api-access-p5sqc\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.185242 5050 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.185252 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.185276 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba4a4a57-47b0-423d-8bb1-76953fb3a37b-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.205175 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_27e26dea-41e9-4d40-9adc-f902e5f4791f/ovn-northd/0.log" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.205280 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.286597 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/27e26dea-41e9-4d40-9adc-f902e5f4791f-scripts\") pod \"27e26dea-41e9-4d40-9adc-f902e5f4791f\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.287695 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/27e26dea-41e9-4d40-9adc-f902e5f4791f-ovn-rundir\") pod \"27e26dea-41e9-4d40-9adc-f902e5f4791f\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.287759 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27e26dea-41e9-4d40-9adc-f902e5f4791f-combined-ca-bundle\") pod \"27e26dea-41e9-4d40-9adc-f902e5f4791f\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.287792 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/27e26dea-41e9-4d40-9adc-f902e5f4791f-metrics-certs-tls-certs\") pod \"27e26dea-41e9-4d40-9adc-f902e5f4791f\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.287961 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/27e26dea-41e9-4d40-9adc-f902e5f4791f-ovn-northd-tls-certs\") pod \"27e26dea-41e9-4d40-9adc-f902e5f4791f\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.288053 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qtd92\" (UniqueName: \"kubernetes.io/projected/27e26dea-41e9-4d40-9adc-f902e5f4791f-kube-api-access-qtd92\") pod \"27e26dea-41e9-4d40-9adc-f902e5f4791f\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.288075 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27e26dea-41e9-4d40-9adc-f902e5f4791f-config\") pod \"27e26dea-41e9-4d40-9adc-f902e5f4791f\" (UID: \"27e26dea-41e9-4d40-9adc-f902e5f4791f\") " Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.288286 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/27e26dea-41e9-4d40-9adc-f902e5f4791f-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "27e26dea-41e9-4d40-9adc-f902e5f4791f" (UID: "27e26dea-41e9-4d40-9adc-f902e5f4791f"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.288829 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27e26dea-41e9-4d40-9adc-f902e5f4791f-scripts" (OuterVolumeSpecName: "scripts") pod "27e26dea-41e9-4d40-9adc-f902e5f4791f" (UID: "27e26dea-41e9-4d40-9adc-f902e5f4791f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.289037 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27e26dea-41e9-4d40-9adc-f902e5f4791f-config" (OuterVolumeSpecName: "config") pod "27e26dea-41e9-4d40-9adc-f902e5f4791f" (UID: "27e26dea-41e9-4d40-9adc-f902e5f4791f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.289246 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/27e26dea-41e9-4d40-9adc-f902e5f4791f-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.289270 5050 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/27e26dea-41e9-4d40-9adc-f902e5f4791f-ovn-rundir\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.289286 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27e26dea-41e9-4d40-9adc-f902e5f4791f-config\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.295947 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27e26dea-41e9-4d40-9adc-f902e5f4791f-kube-api-access-qtd92" (OuterVolumeSpecName: "kube-api-access-qtd92") pod "27e26dea-41e9-4d40-9adc-f902e5f4791f" (UID: "27e26dea-41e9-4d40-9adc-f902e5f4791f"). InnerVolumeSpecName "kube-api-access-qtd92". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.336066 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_27e26dea-41e9-4d40-9adc-f902e5f4791f/ovn-northd/0.log" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.336139 5050 generic.go:334] "Generic (PLEG): container finished" podID="27e26dea-41e9-4d40-9adc-f902e5f4791f" containerID="deafd7234d6f0ca5e0fe921dff3e5ef022a4b569f1d6016eec6fa8e609b595e2" exitCode=139 Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.336212 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.336244 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"27e26dea-41e9-4d40-9adc-f902e5f4791f","Type":"ContainerDied","Data":"deafd7234d6f0ca5e0fe921dff3e5ef022a4b569f1d6016eec6fa8e609b595e2"} Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.336299 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"27e26dea-41e9-4d40-9adc-f902e5f4791f","Type":"ContainerDied","Data":"c792fa7250904d4d39bdbb042f089fe70ac0de73b9dd4b95958a61bca56e3a3c"} Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.336324 5050 scope.go:117] "RemoveContainer" containerID="8b448c0c28f62c7ec1d3e7f3dd39d921c151ee6325f2379cf0312b266ebedf37" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.338754 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27e26dea-41e9-4d40-9adc-f902e5f4791f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "27e26dea-41e9-4d40-9adc-f902e5f4791f" (UID: "27e26dea-41e9-4d40-9adc-f902e5f4791f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.346203 5050 generic.go:334] "Generic (PLEG): container finished" podID="ba4a4a57-47b0-423d-8bb1-76953fb3a37b" containerID="88de6b6d747657765b520632673420c57f5942e93770f67efd5b9461639f7cdd" exitCode=0 Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.346281 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ba4a4a57-47b0-423d-8bb1-76953fb3a37b","Type":"ContainerDied","Data":"88de6b6d747657765b520632673420c57f5942e93770f67efd5b9461639f7cdd"} Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.346312 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ba4a4a57-47b0-423d-8bb1-76953fb3a37b","Type":"ContainerDied","Data":"47e88a52634118d105d529a37a802d99e06eb6cbd871b2344b30d68517e46f9c"} Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.346486 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.363735 5050 generic.go:334] "Generic (PLEG): container finished" podID="528697e8-1ddc-4ab0-aa0e-008631905a4c" containerID="888990a4380d2a527f44498ff0a147a611909bb72bf4f2fad48c2ae5096b8882" exitCode=0 Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.363857 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"528697e8-1ddc-4ab0-aa0e-008631905a4c","Type":"ContainerDied","Data":"888990a4380d2a527f44498ff0a147a611909bb72bf4f2fad48c2ae5096b8882"} Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.364963 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-tbww8" event={"ID":"33af3c2e-dea7-4448-8b28-b579d77490b9","Type":"ContainerDied","Data":"c3195f1364743c6f2ef8f040c543b8a404bd64e7d77030ed0e05333791fa6afd"} Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.365134 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-tbww8" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.391793 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qtd92\" (UniqueName: \"kubernetes.io/projected/27e26dea-41e9-4d40-9adc-f902e5f4791f-kube-api-access-qtd92\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.391830 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27e26dea-41e9-4d40-9adc-f902e5f4791f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.395554 5050 scope.go:117] "RemoveContainer" containerID="deafd7234d6f0ca5e0fe921dff3e5ef022a4b569f1d6016eec6fa8e609b595e2" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.424478 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-tbww8"] Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.424900 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27e26dea-41e9-4d40-9adc-f902e5f4791f-ovn-northd-tls-certs" (OuterVolumeSpecName: "ovn-northd-tls-certs") pod "27e26dea-41e9-4d40-9adc-f902e5f4791f" (UID: "27e26dea-41e9-4d40-9adc-f902e5f4791f"). InnerVolumeSpecName "ovn-northd-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.429902 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-tbww8"] Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.430227 5050 scope.go:117] "RemoveContainer" containerID="8b448c0c28f62c7ec1d3e7f3dd39d921c151ee6325f2379cf0312b266ebedf37" Nov 23 15:04:32 crc kubenswrapper[5050]: E1123 15:04:32.430784 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b448c0c28f62c7ec1d3e7f3dd39d921c151ee6325f2379cf0312b266ebedf37\": container with ID starting with 8b448c0c28f62c7ec1d3e7f3dd39d921c151ee6325f2379cf0312b266ebedf37 not found: ID does not exist" containerID="8b448c0c28f62c7ec1d3e7f3dd39d921c151ee6325f2379cf0312b266ebedf37" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.430812 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b448c0c28f62c7ec1d3e7f3dd39d921c151ee6325f2379cf0312b266ebedf37"} err="failed to get container status \"8b448c0c28f62c7ec1d3e7f3dd39d921c151ee6325f2379cf0312b266ebedf37\": rpc error: code = NotFound desc = could not find container \"8b448c0c28f62c7ec1d3e7f3dd39d921c151ee6325f2379cf0312b266ebedf37\": container with ID starting with 8b448c0c28f62c7ec1d3e7f3dd39d921c151ee6325f2379cf0312b266ebedf37 not found: ID does not exist" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.430839 5050 scope.go:117] "RemoveContainer" containerID="deafd7234d6f0ca5e0fe921dff3e5ef022a4b569f1d6016eec6fa8e609b595e2" Nov 23 15:04:32 crc kubenswrapper[5050]: E1123 15:04:32.431206 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"deafd7234d6f0ca5e0fe921dff3e5ef022a4b569f1d6016eec6fa8e609b595e2\": container with ID starting with deafd7234d6f0ca5e0fe921dff3e5ef022a4b569f1d6016eec6fa8e609b595e2 not found: ID does not exist" containerID="deafd7234d6f0ca5e0fe921dff3e5ef022a4b569f1d6016eec6fa8e609b595e2" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.431223 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"deafd7234d6f0ca5e0fe921dff3e5ef022a4b569f1d6016eec6fa8e609b595e2"} err="failed to get container status \"deafd7234d6f0ca5e0fe921dff3e5ef022a4b569f1d6016eec6fa8e609b595e2\": rpc error: code = NotFound desc = could not find container \"deafd7234d6f0ca5e0fe921dff3e5ef022a4b569f1d6016eec6fa8e609b595e2\": container with ID starting with deafd7234d6f0ca5e0fe921dff3e5ef022a4b569f1d6016eec6fa8e609b595e2 not found: ID does not exist" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.431235 5050 scope.go:117] "RemoveContainer" containerID="ff0e97bc91a901d22909a988b2e36c290f89a2317abfa813abba814502aa0edc" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.435255 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.435754 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27e26dea-41e9-4d40-9adc-f902e5f4791f-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "27e26dea-41e9-4d40-9adc-f902e5f4791f" (UID: "27e26dea-41e9-4d40-9adc-f902e5f4791f"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.440461 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.458228 5050 scope.go:117] "RemoveContainer" containerID="88de6b6d747657765b520632673420c57f5942e93770f67efd5b9461639f7cdd" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.485500 5050 scope.go:117] "RemoveContainer" containerID="ff0e97bc91a901d22909a988b2e36c290f89a2317abfa813abba814502aa0edc" Nov 23 15:04:32 crc kubenswrapper[5050]: E1123 15:04:32.486203 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff0e97bc91a901d22909a988b2e36c290f89a2317abfa813abba814502aa0edc\": container with ID starting with ff0e97bc91a901d22909a988b2e36c290f89a2317abfa813abba814502aa0edc not found: ID does not exist" containerID="ff0e97bc91a901d22909a988b2e36c290f89a2317abfa813abba814502aa0edc" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.486265 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff0e97bc91a901d22909a988b2e36c290f89a2317abfa813abba814502aa0edc"} err="failed to get container status \"ff0e97bc91a901d22909a988b2e36c290f89a2317abfa813abba814502aa0edc\": rpc error: code = NotFound desc = could not find container \"ff0e97bc91a901d22909a988b2e36c290f89a2317abfa813abba814502aa0edc\": container with ID starting with ff0e97bc91a901d22909a988b2e36c290f89a2317abfa813abba814502aa0edc not found: ID does not exist" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.486461 5050 scope.go:117] "RemoveContainer" containerID="88de6b6d747657765b520632673420c57f5942e93770f67efd5b9461639f7cdd" Nov 23 15:04:32 crc kubenswrapper[5050]: E1123 15:04:32.486944 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88de6b6d747657765b520632673420c57f5942e93770f67efd5b9461639f7cdd\": container with ID starting with 88de6b6d747657765b520632673420c57f5942e93770f67efd5b9461639f7cdd not found: ID does not exist" containerID="88de6b6d747657765b520632673420c57f5942e93770f67efd5b9461639f7cdd" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.486969 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88de6b6d747657765b520632673420c57f5942e93770f67efd5b9461639f7cdd"} err="failed to get container status \"88de6b6d747657765b520632673420c57f5942e93770f67efd5b9461639f7cdd\": rpc error: code = NotFound desc = could not find container \"88de6b6d747657765b520632673420c57f5942e93770f67efd5b9461639f7cdd\": container with ID starting with 88de6b6d747657765b520632673420c57f5942e93770f67efd5b9461639f7cdd not found: ID does not exist" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.494084 5050 reconciler_common.go:293] "Volume detached for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/27e26dea-41e9-4d40-9adc-f902e5f4791f-ovn-northd-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.494103 5050 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/27e26dea-41e9-4d40-9adc-f902e5f4791f-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.526005 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.675693 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.681576 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-northd-0"] Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.701273 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-scripts\") pod \"528697e8-1ddc-4ab0-aa0e-008631905a4c\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.701349 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-config-data\") pod \"528697e8-1ddc-4ab0-aa0e-008631905a4c\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.701421 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-combined-ca-bundle\") pod \"528697e8-1ddc-4ab0-aa0e-008631905a4c\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.701555 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/528697e8-1ddc-4ab0-aa0e-008631905a4c-run-httpd\") pod \"528697e8-1ddc-4ab0-aa0e-008631905a4c\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.701600 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/528697e8-1ddc-4ab0-aa0e-008631905a4c-log-httpd\") pod \"528697e8-1ddc-4ab0-aa0e-008631905a4c\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.701729 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5s6nb\" (UniqueName: \"kubernetes.io/projected/528697e8-1ddc-4ab0-aa0e-008631905a4c-kube-api-access-5s6nb\") pod \"528697e8-1ddc-4ab0-aa0e-008631905a4c\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.701799 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-sg-core-conf-yaml\") pod \"528697e8-1ddc-4ab0-aa0e-008631905a4c\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.701858 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-ceilometer-tls-certs\") pod \"528697e8-1ddc-4ab0-aa0e-008631905a4c\" (UID: \"528697e8-1ddc-4ab0-aa0e-008631905a4c\") " Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.704038 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/528697e8-1ddc-4ab0-aa0e-008631905a4c-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "528697e8-1ddc-4ab0-aa0e-008631905a4c" (UID: "528697e8-1ddc-4ab0-aa0e-008631905a4c"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.704402 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/528697e8-1ddc-4ab0-aa0e-008631905a4c-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "528697e8-1ddc-4ab0-aa0e-008631905a4c" (UID: "528697e8-1ddc-4ab0-aa0e-008631905a4c"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:04:32 crc kubenswrapper[5050]: E1123 15:04:32.705427 5050 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 23 15:04:32 crc kubenswrapper[5050]: E1123 15:04:32.705596 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1bda1a38-71dd-4de4-8e16-b5159a08fdfa-operator-scripts podName:1bda1a38-71dd-4de4-8e16-b5159a08fdfa nodeName:}" failed. No retries permitted until 2025-11-23 15:04:36.705562283 +0000 UTC m=+1371.872558948 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/1bda1a38-71dd-4de4-8e16-b5159a08fdfa-operator-scripts") pod "novaapif81d-account-delete-xfdwn" (UID: "1bda1a38-71dd-4de4-8e16-b5159a08fdfa") : configmap "openstack-scripts" not found Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.709398 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/528697e8-1ddc-4ab0-aa0e-008631905a4c-kube-api-access-5s6nb" (OuterVolumeSpecName: "kube-api-access-5s6nb") pod "528697e8-1ddc-4ab0-aa0e-008631905a4c" (UID: "528697e8-1ddc-4ab0-aa0e-008631905a4c"). InnerVolumeSpecName "kube-api-access-5s6nb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.712164 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-scripts" (OuterVolumeSpecName: "scripts") pod "528697e8-1ddc-4ab0-aa0e-008631905a4c" (UID: "528697e8-1ddc-4ab0-aa0e-008631905a4c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.754000 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "528697e8-1ddc-4ab0-aa0e-008631905a4c" (UID: "528697e8-1ddc-4ab0-aa0e-008631905a4c"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.760680 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "528697e8-1ddc-4ab0-aa0e-008631905a4c" (UID: "528697e8-1ddc-4ab0-aa0e-008631905a4c"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.772315 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "528697e8-1ddc-4ab0-aa0e-008631905a4c" (UID: "528697e8-1ddc-4ab0-aa0e-008631905a4c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.803820 5050 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/528697e8-1ddc-4ab0-aa0e-008631905a4c-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.803859 5050 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/528697e8-1ddc-4ab0-aa0e-008631905a4c-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.803869 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5s6nb\" (UniqueName: \"kubernetes.io/projected/528697e8-1ddc-4ab0-aa0e-008631905a4c-kube-api-access-5s6nb\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.803879 5050 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.803888 5050 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.803897 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.803905 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:32 crc kubenswrapper[5050]: E1123 15:04:32.803930 5050 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 23 15:04:32 crc kubenswrapper[5050]: E1123 15:04:32.804087 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89-operator-scripts podName:3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89 nodeName:}" failed. No retries permitted until 2025-11-23 15:04:36.804037027 +0000 UTC m=+1371.971033702 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89-operator-scripts") pod "novacell03052-account-delete-kz7k7" (UID: "3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89") : configmap "openstack-scripts" not found Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.823479 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-config-data" (OuterVolumeSpecName: "config-data") pod "528697e8-1ddc-4ab0-aa0e-008631905a4c" (UID: "528697e8-1ddc-4ab0-aa0e-008631905a4c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:32 crc kubenswrapper[5050]: I1123 15:04:32.906614 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/528697e8-1ddc-4ab0-aa0e-008631905a4c-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:33 crc kubenswrapper[5050]: I1123 15:04:33.388302 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"528697e8-1ddc-4ab0-aa0e-008631905a4c","Type":"ContainerDied","Data":"0460cd1868cf32b301bc534ae55e7cf03d3645a42adf46dedf5dd733b165c3c2"} Nov 23 15:04:33 crc kubenswrapper[5050]: I1123 15:04:33.388403 5050 scope.go:117] "RemoveContainer" containerID="5f913c69d91ed75b757a4d06691b6d5cb9dec9b9a7273d12971e80ccd91082db" Nov 23 15:04:33 crc kubenswrapper[5050]: I1123 15:04:33.388424 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 15:04:33 crc kubenswrapper[5050]: I1123 15:04:33.429830 5050 scope.go:117] "RemoveContainer" containerID="0db0ab1375fb7487d196ec345c0738e0de0a29049ea856246e4379363ccc3e46" Nov 23 15:04:33 crc kubenswrapper[5050]: I1123 15:04:33.435554 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:04:33 crc kubenswrapper[5050]: I1123 15:04:33.443372 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 23 15:04:33 crc kubenswrapper[5050]: I1123 15:04:33.457054 5050 scope.go:117] "RemoveContainer" containerID="888990a4380d2a527f44498ff0a147a611909bb72bf4f2fad48c2ae5096b8882" Nov 23 15:04:33 crc kubenswrapper[5050]: I1123 15:04:33.493728 5050 scope.go:117] "RemoveContainer" containerID="54bb6bc8b8fe2be064d6b1b7c3753c2cce5da608c4510b93b32b5cb3298cf7f7" Nov 23 15:04:33 crc kubenswrapper[5050]: I1123 15:04:33.568987 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27e26dea-41e9-4d40-9adc-f902e5f4791f" path="/var/lib/kubelet/pods/27e26dea-41e9-4d40-9adc-f902e5f4791f/volumes" Nov 23 15:04:33 crc kubenswrapper[5050]: I1123 15:04:33.570824 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33af3c2e-dea7-4448-8b28-b579d77490b9" path="/var/lib/kubelet/pods/33af3c2e-dea7-4448-8b28-b579d77490b9/volumes" Nov 23 15:04:33 crc kubenswrapper[5050]: I1123 15:04:33.572230 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="528697e8-1ddc-4ab0-aa0e-008631905a4c" path="/var/lib/kubelet/pods/528697e8-1ddc-4ab0-aa0e-008631905a4c/volumes" Nov 23 15:04:33 crc kubenswrapper[5050]: I1123 15:04:33.575088 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba4a4a57-47b0-423d-8bb1-76953fb3a37b" path="/var/lib/kubelet/pods/ba4a4a57-47b0-423d-8bb1-76953fb3a37b/volumes" Nov 23 15:04:36 crc kubenswrapper[5050]: E1123 15:04:36.804903 5050 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 23 15:04:36 crc kubenswrapper[5050]: E1123 15:04:36.805544 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1bda1a38-71dd-4de4-8e16-b5159a08fdfa-operator-scripts podName:1bda1a38-71dd-4de4-8e16-b5159a08fdfa nodeName:}" failed. No retries permitted until 2025-11-23 15:04:44.805512271 +0000 UTC m=+1379.972508796 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/1bda1a38-71dd-4de4-8e16-b5159a08fdfa-operator-scripts") pod "novaapif81d-account-delete-xfdwn" (UID: "1bda1a38-71dd-4de4-8e16-b5159a08fdfa") : configmap "openstack-scripts" not found Nov 23 15:04:36 crc kubenswrapper[5050]: E1123 15:04:36.804996 5050 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 23 15:04:36 crc kubenswrapper[5050]: E1123 15:04:36.805765 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89-operator-scripts podName:3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89 nodeName:}" failed. No retries permitted until 2025-11-23 15:04:44.805707236 +0000 UTC m=+1379.972703761 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89-operator-scripts") pod "novacell03052-account-delete-kz7k7" (UID: "3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89") : configmap "openstack-scripts" not found Nov 23 15:04:36 crc kubenswrapper[5050]: E1123 15:04:36.863963 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8 is running failed: container process not found" containerID="1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 23 15:04:36 crc kubenswrapper[5050]: E1123 15:04:36.864670 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8 is running failed: container process not found" containerID="1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 23 15:04:36 crc kubenswrapper[5050]: E1123 15:04:36.865269 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8 is running failed: container process not found" containerID="1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 23 15:04:36 crc kubenswrapper[5050]: E1123 15:04:36.865356 5050 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-n9v8v" podUID="c21366ac-a7da-4cac-8332-753659210595" containerName="ovsdb-server" Nov 23 15:04:36 crc kubenswrapper[5050]: E1123 15:04:36.867292 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a0820261b299cc33db42946f12f1f4467f892f1598cff4f933ab45def65d8af9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 23 15:04:36 crc kubenswrapper[5050]: E1123 15:04:36.869647 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a0820261b299cc33db42946f12f1f4467f892f1598cff4f933ab45def65d8af9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 23 15:04:36 crc kubenswrapper[5050]: E1123 15:04:36.871936 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a0820261b299cc33db42946f12f1f4467f892f1598cff4f933ab45def65d8af9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 23 15:04:36 crc kubenswrapper[5050]: E1123 15:04:36.871978 5050 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-n9v8v" podUID="c21366ac-a7da-4cac-8332-753659210595" containerName="ovs-vswitchd" Nov 23 15:04:38 crc kubenswrapper[5050]: E1123 15:04:38.083496 5050 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1b3a1560_8762_47b9_8d1c_fe94eb46bec2.slice/crio-c8744bf42aa8122e20bb79dbbcfbe437abb2154864233d844f2967643789a84f.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1b3a1560_8762_47b9_8d1c_fe94eb46bec2.slice/crio-conmon-c8744bf42aa8122e20bb79dbbcfbe437abb2154864233d844f2967643789a84f.scope\": RecentStats: unable to find data in memory cache]" Nov 23 15:04:38 crc kubenswrapper[5050]: I1123 15:04:38.470898 5050 generic.go:334] "Generic (PLEG): container finished" podID="1b3a1560-8762-47b9-8d1c-fe94eb46bec2" containerID="c8744bf42aa8122e20bb79dbbcfbe437abb2154864233d844f2967643789a84f" exitCode=0 Nov 23 15:04:38 crc kubenswrapper[5050]: I1123 15:04:38.470986 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7cb7995f89-k8h9t" event={"ID":"1b3a1560-8762-47b9-8d1c-fe94eb46bec2","Type":"ContainerDied","Data":"c8744bf42aa8122e20bb79dbbcfbe437abb2154864233d844f2967643789a84f"} Nov 23 15:04:38 crc kubenswrapper[5050]: I1123 15:04:38.596561 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7cb7995f89-k8h9t" Nov 23 15:04:38 crc kubenswrapper[5050]: I1123 15:04:38.773522 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-internal-tls-certs\") pod \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " Nov 23 15:04:38 crc kubenswrapper[5050]: I1123 15:04:38.773604 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-config\") pod \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " Nov 23 15:04:38 crc kubenswrapper[5050]: I1123 15:04:38.773665 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2q76j\" (UniqueName: \"kubernetes.io/projected/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-kube-api-access-2q76j\") pod \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " Nov 23 15:04:38 crc kubenswrapper[5050]: I1123 15:04:38.773827 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-public-tls-certs\") pod \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " Nov 23 15:04:38 crc kubenswrapper[5050]: I1123 15:04:38.773875 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-combined-ca-bundle\") pod \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " Nov 23 15:04:38 crc kubenswrapper[5050]: I1123 15:04:38.773954 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-ovndb-tls-certs\") pod \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " Nov 23 15:04:38 crc kubenswrapper[5050]: I1123 15:04:38.774009 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-httpd-config\") pod \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\" (UID: \"1b3a1560-8762-47b9-8d1c-fe94eb46bec2\") " Nov 23 15:04:38 crc kubenswrapper[5050]: I1123 15:04:38.783166 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "1b3a1560-8762-47b9-8d1c-fe94eb46bec2" (UID: "1b3a1560-8762-47b9-8d1c-fe94eb46bec2"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:38 crc kubenswrapper[5050]: I1123 15:04:38.804352 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-kube-api-access-2q76j" (OuterVolumeSpecName: "kube-api-access-2q76j") pod "1b3a1560-8762-47b9-8d1c-fe94eb46bec2" (UID: "1b3a1560-8762-47b9-8d1c-fe94eb46bec2"). InnerVolumeSpecName "kube-api-access-2q76j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:38 crc kubenswrapper[5050]: I1123 15:04:38.831902 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "1b3a1560-8762-47b9-8d1c-fe94eb46bec2" (UID: "1b3a1560-8762-47b9-8d1c-fe94eb46bec2"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:38 crc kubenswrapper[5050]: I1123 15:04:38.832703 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1b3a1560-8762-47b9-8d1c-fe94eb46bec2" (UID: "1b3a1560-8762-47b9-8d1c-fe94eb46bec2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:38 crc kubenswrapper[5050]: I1123 15:04:38.833087 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-config" (OuterVolumeSpecName: "config") pod "1b3a1560-8762-47b9-8d1c-fe94eb46bec2" (UID: "1b3a1560-8762-47b9-8d1c-fe94eb46bec2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:38 crc kubenswrapper[5050]: I1123 15:04:38.846121 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "1b3a1560-8762-47b9-8d1c-fe94eb46bec2" (UID: "1b3a1560-8762-47b9-8d1c-fe94eb46bec2"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:38 crc kubenswrapper[5050]: I1123 15:04:38.867356 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "1b3a1560-8762-47b9-8d1c-fe94eb46bec2" (UID: "1b3a1560-8762-47b9-8d1c-fe94eb46bec2"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:04:38 crc kubenswrapper[5050]: I1123 15:04:38.877047 5050 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:38 crc kubenswrapper[5050]: I1123 15:04:38.877096 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:38 crc kubenswrapper[5050]: I1123 15:04:38.877110 5050 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:38 crc kubenswrapper[5050]: I1123 15:04:38.877124 5050 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:38 crc kubenswrapper[5050]: I1123 15:04:38.877137 5050 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:38 crc kubenswrapper[5050]: I1123 15:04:38.877153 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-config\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:38 crc kubenswrapper[5050]: I1123 15:04:38.877166 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2q76j\" (UniqueName: \"kubernetes.io/projected/1b3a1560-8762-47b9-8d1c-fe94eb46bec2-kube-api-access-2q76j\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:39 crc kubenswrapper[5050]: I1123 15:04:39.487330 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7cb7995f89-k8h9t" event={"ID":"1b3a1560-8762-47b9-8d1c-fe94eb46bec2","Type":"ContainerDied","Data":"382a3ec48599cb331a7724a9e7f942e6c4119e74d5e1b0252f6209ec9449b088"} Nov 23 15:04:39 crc kubenswrapper[5050]: I1123 15:04:39.487474 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7cb7995f89-k8h9t" Nov 23 15:04:39 crc kubenswrapper[5050]: I1123 15:04:39.487497 5050 scope.go:117] "RemoveContainer" containerID="8b5d3ae6f2b8800aa1cfd0b5a953b873c14fdd59195485538c97159fa050f837" Nov 23 15:04:39 crc kubenswrapper[5050]: I1123 15:04:39.547421 5050 scope.go:117] "RemoveContainer" containerID="c8744bf42aa8122e20bb79dbbcfbe437abb2154864233d844f2967643789a84f" Nov 23 15:04:39 crc kubenswrapper[5050]: I1123 15:04:39.570743 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7cb7995f89-k8h9t"] Nov 23 15:04:39 crc kubenswrapper[5050]: I1123 15:04:39.570820 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-7cb7995f89-k8h9t"] Nov 23 15:04:41 crc kubenswrapper[5050]: I1123 15:04:41.567718 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b3a1560-8762-47b9-8d1c-fe94eb46bec2" path="/var/lib/kubelet/pods/1b3a1560-8762-47b9-8d1c-fe94eb46bec2/volumes" Nov 23 15:04:41 crc kubenswrapper[5050]: E1123 15:04:41.863055 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8 is running failed: container process not found" containerID="1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 23 15:04:41 crc kubenswrapper[5050]: E1123 15:04:41.864173 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8 is running failed: container process not found" containerID="1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 23 15:04:41 crc kubenswrapper[5050]: E1123 15:04:41.864876 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8 is running failed: container process not found" containerID="1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 23 15:04:41 crc kubenswrapper[5050]: E1123 15:04:41.865059 5050 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-n9v8v" podUID="c21366ac-a7da-4cac-8332-753659210595" containerName="ovsdb-server" Nov 23 15:04:41 crc kubenswrapper[5050]: E1123 15:04:41.865566 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a0820261b299cc33db42946f12f1f4467f892f1598cff4f933ab45def65d8af9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 23 15:04:41 crc kubenswrapper[5050]: E1123 15:04:41.867437 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a0820261b299cc33db42946f12f1f4467f892f1598cff4f933ab45def65d8af9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 23 15:04:41 crc kubenswrapper[5050]: E1123 15:04:41.870278 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a0820261b299cc33db42946f12f1f4467f892f1598cff4f933ab45def65d8af9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 23 15:04:41 crc kubenswrapper[5050]: E1123 15:04:41.870327 5050 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-n9v8v" podUID="c21366ac-a7da-4cac-8332-753659210595" containerName="ovs-vswitchd" Nov 23 15:04:44 crc kubenswrapper[5050]: E1123 15:04:44.817254 5050 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 23 15:04:44 crc kubenswrapper[5050]: E1123 15:04:44.817363 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89-operator-scripts podName:3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89 nodeName:}" failed. No retries permitted until 2025-11-23 15:05:00.817340507 +0000 UTC m=+1395.984336992 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89-operator-scripts") pod "novacell03052-account-delete-kz7k7" (UID: "3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89") : configmap "openstack-scripts" not found Nov 23 15:04:44 crc kubenswrapper[5050]: E1123 15:04:44.817550 5050 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 23 15:04:44 crc kubenswrapper[5050]: E1123 15:04:44.817746 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1bda1a38-71dd-4de4-8e16-b5159a08fdfa-operator-scripts podName:1bda1a38-71dd-4de4-8e16-b5159a08fdfa nodeName:}" failed. No retries permitted until 2025-11-23 15:05:00.817697147 +0000 UTC m=+1395.984693752 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/1bda1a38-71dd-4de4-8e16-b5159a08fdfa-operator-scripts") pod "novaapif81d-account-delete-xfdwn" (UID: "1bda1a38-71dd-4de4-8e16-b5159a08fdfa") : configmap "openstack-scripts" not found Nov 23 15:04:46 crc kubenswrapper[5050]: E1123 15:04:46.862727 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8 is running failed: container process not found" containerID="1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 23 15:04:46 crc kubenswrapper[5050]: E1123 15:04:46.863392 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8 is running failed: container process not found" containerID="1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 23 15:04:46 crc kubenswrapper[5050]: E1123 15:04:46.863744 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8 is running failed: container process not found" containerID="1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 23 15:04:46 crc kubenswrapper[5050]: E1123 15:04:46.863775 5050 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-n9v8v" podUID="c21366ac-a7da-4cac-8332-753659210595" containerName="ovsdb-server" Nov 23 15:04:46 crc kubenswrapper[5050]: E1123 15:04:46.864837 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a0820261b299cc33db42946f12f1f4467f892f1598cff4f933ab45def65d8af9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 23 15:04:46 crc kubenswrapper[5050]: E1123 15:04:46.866600 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a0820261b299cc33db42946f12f1f4467f892f1598cff4f933ab45def65d8af9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 23 15:04:46 crc kubenswrapper[5050]: E1123 15:04:46.868109 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a0820261b299cc33db42946f12f1f4467f892f1598cff4f933ab45def65d8af9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 23 15:04:46 crc kubenswrapper[5050]: E1123 15:04:46.868150 5050 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-n9v8v" podUID="c21366ac-a7da-4cac-8332-753659210595" containerName="ovs-vswitchd" Nov 23 15:04:51 crc kubenswrapper[5050]: E1123 15:04:51.864224 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8 is running failed: container process not found" containerID="1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 23 15:04:51 crc kubenswrapper[5050]: E1123 15:04:51.865674 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8 is running failed: container process not found" containerID="1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 23 15:04:51 crc kubenswrapper[5050]: E1123 15:04:51.865905 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a0820261b299cc33db42946f12f1f4467f892f1598cff4f933ab45def65d8af9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 23 15:04:51 crc kubenswrapper[5050]: E1123 15:04:51.866355 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8 is running failed: container process not found" containerID="1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 23 15:04:51 crc kubenswrapper[5050]: E1123 15:04:51.866407 5050 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-n9v8v" podUID="c21366ac-a7da-4cac-8332-753659210595" containerName="ovsdb-server" Nov 23 15:04:51 crc kubenswrapper[5050]: E1123 15:04:51.868506 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a0820261b299cc33db42946f12f1f4467f892f1598cff4f933ab45def65d8af9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 23 15:04:51 crc kubenswrapper[5050]: E1123 15:04:51.870255 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a0820261b299cc33db42946f12f1f4467f892f1598cff4f933ab45def65d8af9" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 23 15:04:51 crc kubenswrapper[5050]: E1123 15:04:51.870329 5050 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-n9v8v" podUID="c21366ac-a7da-4cac-8332-753659210595" containerName="ovs-vswitchd" Nov 23 15:04:52 crc kubenswrapper[5050]: I1123 15:04:52.647530 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-n9v8v_c21366ac-a7da-4cac-8332-753659210595/ovs-vswitchd/0.log" Nov 23 15:04:52 crc kubenswrapper[5050]: I1123 15:04:52.649046 5050 generic.go:334] "Generic (PLEG): container finished" podID="c21366ac-a7da-4cac-8332-753659210595" containerID="a0820261b299cc33db42946f12f1f4467f892f1598cff4f933ab45def65d8af9" exitCode=137 Nov 23 15:04:52 crc kubenswrapper[5050]: I1123 15:04:52.649110 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-n9v8v" event={"ID":"c21366ac-a7da-4cac-8332-753659210595","Type":"ContainerDied","Data":"a0820261b299cc33db42946f12f1f4467f892f1598cff4f933ab45def65d8af9"} Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.101802 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-n9v8v_c21366ac-a7da-4cac-8332-753659210595/ovs-vswitchd/0.log" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.103723 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-n9v8v" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.178663 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c21366ac-a7da-4cac-8332-753659210595-etc-ovs\") pod \"c21366ac-a7da-4cac-8332-753659210595\" (UID: \"c21366ac-a7da-4cac-8332-753659210595\") " Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.178754 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b7cr7\" (UniqueName: \"kubernetes.io/projected/c21366ac-a7da-4cac-8332-753659210595-kube-api-access-b7cr7\") pod \"c21366ac-a7da-4cac-8332-753659210595\" (UID: \"c21366ac-a7da-4cac-8332-753659210595\") " Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.178888 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c21366ac-a7da-4cac-8332-753659210595-scripts\") pod \"c21366ac-a7da-4cac-8332-753659210595\" (UID: \"c21366ac-a7da-4cac-8332-753659210595\") " Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.178872 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c21366ac-a7da-4cac-8332-753659210595-etc-ovs" (OuterVolumeSpecName: "etc-ovs") pod "c21366ac-a7da-4cac-8332-753659210595" (UID: "c21366ac-a7da-4cac-8332-753659210595"). InnerVolumeSpecName "etc-ovs". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.178959 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c21366ac-a7da-4cac-8332-753659210595-var-run\") pod \"c21366ac-a7da-4cac-8332-753659210595\" (UID: \"c21366ac-a7da-4cac-8332-753659210595\") " Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.179041 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c21366ac-a7da-4cac-8332-753659210595-var-lib\") pod \"c21366ac-a7da-4cac-8332-753659210595\" (UID: \"c21366ac-a7da-4cac-8332-753659210595\") " Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.179117 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c21366ac-a7da-4cac-8332-753659210595-var-log\") pod \"c21366ac-a7da-4cac-8332-753659210595\" (UID: \"c21366ac-a7da-4cac-8332-753659210595\") " Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.179685 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c21366ac-a7da-4cac-8332-753659210595-var-run" (OuterVolumeSpecName: "var-run") pod "c21366ac-a7da-4cac-8332-753659210595" (UID: "c21366ac-a7da-4cac-8332-753659210595"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.179750 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c21366ac-a7da-4cac-8332-753659210595-var-lib" (OuterVolumeSpecName: "var-lib") pod "c21366ac-a7da-4cac-8332-753659210595" (UID: "c21366ac-a7da-4cac-8332-753659210595"). InnerVolumeSpecName "var-lib". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.179858 5050 reconciler_common.go:293] "Volume detached for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c21366ac-a7da-4cac-8332-753659210595-etc-ovs\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.179850 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c21366ac-a7da-4cac-8332-753659210595-var-log" (OuterVolumeSpecName: "var-log") pod "c21366ac-a7da-4cac-8332-753659210595" (UID: "c21366ac-a7da-4cac-8332-753659210595"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.180794 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c21366ac-a7da-4cac-8332-753659210595-scripts" (OuterVolumeSpecName: "scripts") pod "c21366ac-a7da-4cac-8332-753659210595" (UID: "c21366ac-a7da-4cac-8332-753659210595"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.186734 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c21366ac-a7da-4cac-8332-753659210595-kube-api-access-b7cr7" (OuterVolumeSpecName: "kube-api-access-b7cr7") pod "c21366ac-a7da-4cac-8332-753659210595" (UID: "c21366ac-a7da-4cac-8332-753659210595"). InnerVolumeSpecName "kube-api-access-b7cr7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.281497 5050 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c21366ac-a7da-4cac-8332-753659210595-var-log\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.281563 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b7cr7\" (UniqueName: \"kubernetes.io/projected/c21366ac-a7da-4cac-8332-753659210595-kube-api-access-b7cr7\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.281579 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c21366ac-a7da-4cac-8332-753659210595-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.281592 5050 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c21366ac-a7da-4cac-8332-753659210595-var-run\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.281603 5050 reconciler_common.go:293] "Volume detached for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c21366ac-a7da-4cac-8332-753659210595-var-lib\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.510028 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.600968 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/4947b94e-805e-457e-bb17-f7faea3b5fca-cache\") pod \"4947b94e-805e-457e-bb17-f7faea3b5fca\" (UID: \"4947b94e-805e-457e-bb17-f7faea3b5fca\") " Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.601361 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/4947b94e-805e-457e-bb17-f7faea3b5fca-lock\") pod \"4947b94e-805e-457e-bb17-f7faea3b5fca\" (UID: \"4947b94e-805e-457e-bb17-f7faea3b5fca\") " Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.601561 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4947b94e-805e-457e-bb17-f7faea3b5fca-etc-swift\") pod \"4947b94e-805e-457e-bb17-f7faea3b5fca\" (UID: \"4947b94e-805e-457e-bb17-f7faea3b5fca\") " Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.601715 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4947b94e-805e-457e-bb17-f7faea3b5fca-cache" (OuterVolumeSpecName: "cache") pod "4947b94e-805e-457e-bb17-f7faea3b5fca" (UID: "4947b94e-805e-457e-bb17-f7faea3b5fca"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.601746 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"4947b94e-805e-457e-bb17-f7faea3b5fca\" (UID: \"4947b94e-805e-457e-bb17-f7faea3b5fca\") " Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.602068 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nmcsm\" (UniqueName: \"kubernetes.io/projected/4947b94e-805e-457e-bb17-f7faea3b5fca-kube-api-access-nmcsm\") pod \"4947b94e-805e-457e-bb17-f7faea3b5fca\" (UID: \"4947b94e-805e-457e-bb17-f7faea3b5fca\") " Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.602719 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4947b94e-805e-457e-bb17-f7faea3b5fca-lock" (OuterVolumeSpecName: "lock") pod "4947b94e-805e-457e-bb17-f7faea3b5fca" (UID: "4947b94e-805e-457e-bb17-f7faea3b5fca"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.603005 5050 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/4947b94e-805e-457e-bb17-f7faea3b5fca-cache\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.603133 5050 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/4947b94e-805e-457e-bb17-f7faea3b5fca-lock\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.608167 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "swift") pod "4947b94e-805e-457e-bb17-f7faea3b5fca" (UID: "4947b94e-805e-457e-bb17-f7faea3b5fca"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.627341 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4947b94e-805e-457e-bb17-f7faea3b5fca-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "4947b94e-805e-457e-bb17-f7faea3b5fca" (UID: "4947b94e-805e-457e-bb17-f7faea3b5fca"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.644471 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4947b94e-805e-457e-bb17-f7faea3b5fca-kube-api-access-nmcsm" (OuterVolumeSpecName: "kube-api-access-nmcsm") pod "4947b94e-805e-457e-bb17-f7faea3b5fca" (UID: "4947b94e-805e-457e-bb17-f7faea3b5fca"). InnerVolumeSpecName "kube-api-access-nmcsm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.670859 5050 generic.go:334] "Generic (PLEG): container finished" podID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerID="61d7218bc7db3ee0defb5601931d237c30251b28aeaf13b1fc22ffcbd83b4e84" exitCode=137 Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.670945 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerDied","Data":"61d7218bc7db3ee0defb5601931d237c30251b28aeaf13b1fc22ffcbd83b4e84"} Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.670987 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4947b94e-805e-457e-bb17-f7faea3b5fca","Type":"ContainerDied","Data":"fa7fbbc9ee7bf5c539752bbd4d457f5b3344a2a87f7c2acaa651ed614e052891"} Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.671008 5050 scope.go:117] "RemoveContainer" containerID="61d7218bc7db3ee0defb5601931d237c30251b28aeaf13b1fc22ffcbd83b4e84" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.671737 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.674111 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-n9v8v_c21366ac-a7da-4cac-8332-753659210595/ovs-vswitchd/0.log" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.674691 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-n9v8v" event={"ID":"c21366ac-a7da-4cac-8332-753659210595","Type":"ContainerDied","Data":"4719503c5841ca5b62f9ea359a8f6b09eedc13f56f362e1c8530c0d6c42b7a46"} Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.674775 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-n9v8v" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.711982 5050 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4947b94e-805e-457e-bb17-f7faea3b5fca-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.712057 5050 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.712080 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nmcsm\" (UniqueName: \"kubernetes.io/projected/4947b94e-805e-457e-bb17-f7faea3b5fca-kube-api-access-nmcsm\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.717411 5050 scope.go:117] "RemoveContainer" containerID="b22acf79827fd3ac0a34b514be2b00ed37d6b8a1dac44f85032fc97fb4bd5540" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.723827 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-n9v8v"] Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.736339 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-ovs-n9v8v"] Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.742272 5050 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.747404 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.754254 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-storage-0"] Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.755959 5050 scope.go:117] "RemoveContainer" containerID="0e36c69aa375c539b26f58a19aaffeb696f806ace3d6046abb778218a238b336" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.781186 5050 scope.go:117] "RemoveContainer" containerID="e0f8af78c1a802c881b2c04585b63daab17211922673261ca7523f7d84b8e187" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.808993 5050 scope.go:117] "RemoveContainer" containerID="8dae616c0d8bc5a23b5cfb9e119b706796daaa7ba5419e533d3fb7176e7bf222" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.813967 5050 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.833982 5050 scope.go:117] "RemoveContainer" containerID="1fcdee9bec472e24e3ee2fa6e158b77914f69e25bb807a1be9ca536d728a1ddf" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.863183 5050 scope.go:117] "RemoveContainer" containerID="53cc065da29b9c9511ae76a606c6be3934715316c98087e1bceb4aedfb391ba5" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.891466 5050 scope.go:117] "RemoveContainer" containerID="f9d2cfcc4ee11e5c2631dbc7f3cfcfc91553670b329dfc79c70030ffc3518f25" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.925781 5050 scope.go:117] "RemoveContainer" containerID="9d6c10414c5bea42690cbd8da8cba7bb0cf669191242a30c4e88b58e7464d151" Nov 23 15:04:53 crc kubenswrapper[5050]: I1123 15:04:53.979091 5050 scope.go:117] "RemoveContainer" containerID="b4f43cb835f2029a79a141c233333ef0e2ea5ee6f6d05bd0c246b7323bb38310" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.011952 5050 scope.go:117] "RemoveContainer" containerID="f9e7a939140c48c7a80ae5b9b609b9b098e6c60e33ce09927c76177950647981" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.041172 5050 scope.go:117] "RemoveContainer" containerID="1cec0ebe1304d052bb25cc3f36043c71b0777d6f8cbf3b9d8a22f1e057b1fddb" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.072506 5050 scope.go:117] "RemoveContainer" containerID="9a3ab1117cae597494105d7c65afeef7161729ed4e56b6205f5b1b5b9c2c0980" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.107561 5050 scope.go:117] "RemoveContainer" containerID="4211198ac477c9fe45e3ef0bd5dffc47dd9911807782e0d46558842b2d2b48fd" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.132401 5050 scope.go:117] "RemoveContainer" containerID="0001c3500cc85fc0a925e13354d27829e719cc2b2c22f8ba291e2016b3e3b365" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.159632 5050 scope.go:117] "RemoveContainer" containerID="61d7218bc7db3ee0defb5601931d237c30251b28aeaf13b1fc22ffcbd83b4e84" Nov 23 15:04:54 crc kubenswrapper[5050]: E1123 15:04:54.161033 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61d7218bc7db3ee0defb5601931d237c30251b28aeaf13b1fc22ffcbd83b4e84\": container with ID starting with 61d7218bc7db3ee0defb5601931d237c30251b28aeaf13b1fc22ffcbd83b4e84 not found: ID does not exist" containerID="61d7218bc7db3ee0defb5601931d237c30251b28aeaf13b1fc22ffcbd83b4e84" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.161095 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61d7218bc7db3ee0defb5601931d237c30251b28aeaf13b1fc22ffcbd83b4e84"} err="failed to get container status \"61d7218bc7db3ee0defb5601931d237c30251b28aeaf13b1fc22ffcbd83b4e84\": rpc error: code = NotFound desc = could not find container \"61d7218bc7db3ee0defb5601931d237c30251b28aeaf13b1fc22ffcbd83b4e84\": container with ID starting with 61d7218bc7db3ee0defb5601931d237c30251b28aeaf13b1fc22ffcbd83b4e84 not found: ID does not exist" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.161143 5050 scope.go:117] "RemoveContainer" containerID="b22acf79827fd3ac0a34b514be2b00ed37d6b8a1dac44f85032fc97fb4bd5540" Nov 23 15:04:54 crc kubenswrapper[5050]: E1123 15:04:54.162339 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b22acf79827fd3ac0a34b514be2b00ed37d6b8a1dac44f85032fc97fb4bd5540\": container with ID starting with b22acf79827fd3ac0a34b514be2b00ed37d6b8a1dac44f85032fc97fb4bd5540 not found: ID does not exist" containerID="b22acf79827fd3ac0a34b514be2b00ed37d6b8a1dac44f85032fc97fb4bd5540" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.162371 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b22acf79827fd3ac0a34b514be2b00ed37d6b8a1dac44f85032fc97fb4bd5540"} err="failed to get container status \"b22acf79827fd3ac0a34b514be2b00ed37d6b8a1dac44f85032fc97fb4bd5540\": rpc error: code = NotFound desc = could not find container \"b22acf79827fd3ac0a34b514be2b00ed37d6b8a1dac44f85032fc97fb4bd5540\": container with ID starting with b22acf79827fd3ac0a34b514be2b00ed37d6b8a1dac44f85032fc97fb4bd5540 not found: ID does not exist" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.162397 5050 scope.go:117] "RemoveContainer" containerID="0e36c69aa375c539b26f58a19aaffeb696f806ace3d6046abb778218a238b336" Nov 23 15:04:54 crc kubenswrapper[5050]: E1123 15:04:54.162784 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0e36c69aa375c539b26f58a19aaffeb696f806ace3d6046abb778218a238b336\": container with ID starting with 0e36c69aa375c539b26f58a19aaffeb696f806ace3d6046abb778218a238b336 not found: ID does not exist" containerID="0e36c69aa375c539b26f58a19aaffeb696f806ace3d6046abb778218a238b336" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.162847 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e36c69aa375c539b26f58a19aaffeb696f806ace3d6046abb778218a238b336"} err="failed to get container status \"0e36c69aa375c539b26f58a19aaffeb696f806ace3d6046abb778218a238b336\": rpc error: code = NotFound desc = could not find container \"0e36c69aa375c539b26f58a19aaffeb696f806ace3d6046abb778218a238b336\": container with ID starting with 0e36c69aa375c539b26f58a19aaffeb696f806ace3d6046abb778218a238b336 not found: ID does not exist" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.163019 5050 scope.go:117] "RemoveContainer" containerID="e0f8af78c1a802c881b2c04585b63daab17211922673261ca7523f7d84b8e187" Nov 23 15:04:54 crc kubenswrapper[5050]: E1123 15:04:54.163498 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e0f8af78c1a802c881b2c04585b63daab17211922673261ca7523f7d84b8e187\": container with ID starting with e0f8af78c1a802c881b2c04585b63daab17211922673261ca7523f7d84b8e187 not found: ID does not exist" containerID="e0f8af78c1a802c881b2c04585b63daab17211922673261ca7523f7d84b8e187" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.163537 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e0f8af78c1a802c881b2c04585b63daab17211922673261ca7523f7d84b8e187"} err="failed to get container status \"e0f8af78c1a802c881b2c04585b63daab17211922673261ca7523f7d84b8e187\": rpc error: code = NotFound desc = could not find container \"e0f8af78c1a802c881b2c04585b63daab17211922673261ca7523f7d84b8e187\": container with ID starting with e0f8af78c1a802c881b2c04585b63daab17211922673261ca7523f7d84b8e187 not found: ID does not exist" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.163563 5050 scope.go:117] "RemoveContainer" containerID="8dae616c0d8bc5a23b5cfb9e119b706796daaa7ba5419e533d3fb7176e7bf222" Nov 23 15:04:54 crc kubenswrapper[5050]: E1123 15:04:54.163902 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8dae616c0d8bc5a23b5cfb9e119b706796daaa7ba5419e533d3fb7176e7bf222\": container with ID starting with 8dae616c0d8bc5a23b5cfb9e119b706796daaa7ba5419e533d3fb7176e7bf222 not found: ID does not exist" containerID="8dae616c0d8bc5a23b5cfb9e119b706796daaa7ba5419e533d3fb7176e7bf222" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.163936 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8dae616c0d8bc5a23b5cfb9e119b706796daaa7ba5419e533d3fb7176e7bf222"} err="failed to get container status \"8dae616c0d8bc5a23b5cfb9e119b706796daaa7ba5419e533d3fb7176e7bf222\": rpc error: code = NotFound desc = could not find container \"8dae616c0d8bc5a23b5cfb9e119b706796daaa7ba5419e533d3fb7176e7bf222\": container with ID starting with 8dae616c0d8bc5a23b5cfb9e119b706796daaa7ba5419e533d3fb7176e7bf222 not found: ID does not exist" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.163962 5050 scope.go:117] "RemoveContainer" containerID="1fcdee9bec472e24e3ee2fa6e158b77914f69e25bb807a1be9ca536d728a1ddf" Nov 23 15:04:54 crc kubenswrapper[5050]: E1123 15:04:54.164386 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1fcdee9bec472e24e3ee2fa6e158b77914f69e25bb807a1be9ca536d728a1ddf\": container with ID starting with 1fcdee9bec472e24e3ee2fa6e158b77914f69e25bb807a1be9ca536d728a1ddf not found: ID does not exist" containerID="1fcdee9bec472e24e3ee2fa6e158b77914f69e25bb807a1be9ca536d728a1ddf" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.164417 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1fcdee9bec472e24e3ee2fa6e158b77914f69e25bb807a1be9ca536d728a1ddf"} err="failed to get container status \"1fcdee9bec472e24e3ee2fa6e158b77914f69e25bb807a1be9ca536d728a1ddf\": rpc error: code = NotFound desc = could not find container \"1fcdee9bec472e24e3ee2fa6e158b77914f69e25bb807a1be9ca536d728a1ddf\": container with ID starting with 1fcdee9bec472e24e3ee2fa6e158b77914f69e25bb807a1be9ca536d728a1ddf not found: ID does not exist" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.164440 5050 scope.go:117] "RemoveContainer" containerID="53cc065da29b9c9511ae76a606c6be3934715316c98087e1bceb4aedfb391ba5" Nov 23 15:04:54 crc kubenswrapper[5050]: E1123 15:04:54.164828 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"53cc065da29b9c9511ae76a606c6be3934715316c98087e1bceb4aedfb391ba5\": container with ID starting with 53cc065da29b9c9511ae76a606c6be3934715316c98087e1bceb4aedfb391ba5 not found: ID does not exist" containerID="53cc065da29b9c9511ae76a606c6be3934715316c98087e1bceb4aedfb391ba5" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.164861 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"53cc065da29b9c9511ae76a606c6be3934715316c98087e1bceb4aedfb391ba5"} err="failed to get container status \"53cc065da29b9c9511ae76a606c6be3934715316c98087e1bceb4aedfb391ba5\": rpc error: code = NotFound desc = could not find container \"53cc065da29b9c9511ae76a606c6be3934715316c98087e1bceb4aedfb391ba5\": container with ID starting with 53cc065da29b9c9511ae76a606c6be3934715316c98087e1bceb4aedfb391ba5 not found: ID does not exist" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.164890 5050 scope.go:117] "RemoveContainer" containerID="f9d2cfcc4ee11e5c2631dbc7f3cfcfc91553670b329dfc79c70030ffc3518f25" Nov 23 15:04:54 crc kubenswrapper[5050]: E1123 15:04:54.165307 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9d2cfcc4ee11e5c2631dbc7f3cfcfc91553670b329dfc79c70030ffc3518f25\": container with ID starting with f9d2cfcc4ee11e5c2631dbc7f3cfcfc91553670b329dfc79c70030ffc3518f25 not found: ID does not exist" containerID="f9d2cfcc4ee11e5c2631dbc7f3cfcfc91553670b329dfc79c70030ffc3518f25" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.165338 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9d2cfcc4ee11e5c2631dbc7f3cfcfc91553670b329dfc79c70030ffc3518f25"} err="failed to get container status \"f9d2cfcc4ee11e5c2631dbc7f3cfcfc91553670b329dfc79c70030ffc3518f25\": rpc error: code = NotFound desc = could not find container \"f9d2cfcc4ee11e5c2631dbc7f3cfcfc91553670b329dfc79c70030ffc3518f25\": container with ID starting with f9d2cfcc4ee11e5c2631dbc7f3cfcfc91553670b329dfc79c70030ffc3518f25 not found: ID does not exist" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.165363 5050 scope.go:117] "RemoveContainer" containerID="9d6c10414c5bea42690cbd8da8cba7bb0cf669191242a30c4e88b58e7464d151" Nov 23 15:04:54 crc kubenswrapper[5050]: E1123 15:04:54.165751 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d6c10414c5bea42690cbd8da8cba7bb0cf669191242a30c4e88b58e7464d151\": container with ID starting with 9d6c10414c5bea42690cbd8da8cba7bb0cf669191242a30c4e88b58e7464d151 not found: ID does not exist" containerID="9d6c10414c5bea42690cbd8da8cba7bb0cf669191242a30c4e88b58e7464d151" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.165843 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d6c10414c5bea42690cbd8da8cba7bb0cf669191242a30c4e88b58e7464d151"} err="failed to get container status \"9d6c10414c5bea42690cbd8da8cba7bb0cf669191242a30c4e88b58e7464d151\": rpc error: code = NotFound desc = could not find container \"9d6c10414c5bea42690cbd8da8cba7bb0cf669191242a30c4e88b58e7464d151\": container with ID starting with 9d6c10414c5bea42690cbd8da8cba7bb0cf669191242a30c4e88b58e7464d151 not found: ID does not exist" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.165908 5050 scope.go:117] "RemoveContainer" containerID="b4f43cb835f2029a79a141c233333ef0e2ea5ee6f6d05bd0c246b7323bb38310" Nov 23 15:04:54 crc kubenswrapper[5050]: E1123 15:04:54.166440 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4f43cb835f2029a79a141c233333ef0e2ea5ee6f6d05bd0c246b7323bb38310\": container with ID starting with b4f43cb835f2029a79a141c233333ef0e2ea5ee6f6d05bd0c246b7323bb38310 not found: ID does not exist" containerID="b4f43cb835f2029a79a141c233333ef0e2ea5ee6f6d05bd0c246b7323bb38310" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.166516 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4f43cb835f2029a79a141c233333ef0e2ea5ee6f6d05bd0c246b7323bb38310"} err="failed to get container status \"b4f43cb835f2029a79a141c233333ef0e2ea5ee6f6d05bd0c246b7323bb38310\": rpc error: code = NotFound desc = could not find container \"b4f43cb835f2029a79a141c233333ef0e2ea5ee6f6d05bd0c246b7323bb38310\": container with ID starting with b4f43cb835f2029a79a141c233333ef0e2ea5ee6f6d05bd0c246b7323bb38310 not found: ID does not exist" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.166545 5050 scope.go:117] "RemoveContainer" containerID="f9e7a939140c48c7a80ae5b9b609b9b098e6c60e33ce09927c76177950647981" Nov 23 15:04:54 crc kubenswrapper[5050]: E1123 15:04:54.166863 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9e7a939140c48c7a80ae5b9b609b9b098e6c60e33ce09927c76177950647981\": container with ID starting with f9e7a939140c48c7a80ae5b9b609b9b098e6c60e33ce09927c76177950647981 not found: ID does not exist" containerID="f9e7a939140c48c7a80ae5b9b609b9b098e6c60e33ce09927c76177950647981" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.166890 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9e7a939140c48c7a80ae5b9b609b9b098e6c60e33ce09927c76177950647981"} err="failed to get container status \"f9e7a939140c48c7a80ae5b9b609b9b098e6c60e33ce09927c76177950647981\": rpc error: code = NotFound desc = could not find container \"f9e7a939140c48c7a80ae5b9b609b9b098e6c60e33ce09927c76177950647981\": container with ID starting with f9e7a939140c48c7a80ae5b9b609b9b098e6c60e33ce09927c76177950647981 not found: ID does not exist" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.166906 5050 scope.go:117] "RemoveContainer" containerID="1cec0ebe1304d052bb25cc3f36043c71b0777d6f8cbf3b9d8a22f1e057b1fddb" Nov 23 15:04:54 crc kubenswrapper[5050]: E1123 15:04:54.167241 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1cec0ebe1304d052bb25cc3f36043c71b0777d6f8cbf3b9d8a22f1e057b1fddb\": container with ID starting with 1cec0ebe1304d052bb25cc3f36043c71b0777d6f8cbf3b9d8a22f1e057b1fddb not found: ID does not exist" containerID="1cec0ebe1304d052bb25cc3f36043c71b0777d6f8cbf3b9d8a22f1e057b1fddb" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.167262 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1cec0ebe1304d052bb25cc3f36043c71b0777d6f8cbf3b9d8a22f1e057b1fddb"} err="failed to get container status \"1cec0ebe1304d052bb25cc3f36043c71b0777d6f8cbf3b9d8a22f1e057b1fddb\": rpc error: code = NotFound desc = could not find container \"1cec0ebe1304d052bb25cc3f36043c71b0777d6f8cbf3b9d8a22f1e057b1fddb\": container with ID starting with 1cec0ebe1304d052bb25cc3f36043c71b0777d6f8cbf3b9d8a22f1e057b1fddb not found: ID does not exist" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.167279 5050 scope.go:117] "RemoveContainer" containerID="9a3ab1117cae597494105d7c65afeef7161729ed4e56b6205f5b1b5b9c2c0980" Nov 23 15:04:54 crc kubenswrapper[5050]: E1123 15:04:54.167552 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a3ab1117cae597494105d7c65afeef7161729ed4e56b6205f5b1b5b9c2c0980\": container with ID starting with 9a3ab1117cae597494105d7c65afeef7161729ed4e56b6205f5b1b5b9c2c0980 not found: ID does not exist" containerID="9a3ab1117cae597494105d7c65afeef7161729ed4e56b6205f5b1b5b9c2c0980" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.167574 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a3ab1117cae597494105d7c65afeef7161729ed4e56b6205f5b1b5b9c2c0980"} err="failed to get container status \"9a3ab1117cae597494105d7c65afeef7161729ed4e56b6205f5b1b5b9c2c0980\": rpc error: code = NotFound desc = could not find container \"9a3ab1117cae597494105d7c65afeef7161729ed4e56b6205f5b1b5b9c2c0980\": container with ID starting with 9a3ab1117cae597494105d7c65afeef7161729ed4e56b6205f5b1b5b9c2c0980 not found: ID does not exist" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.167588 5050 scope.go:117] "RemoveContainer" containerID="4211198ac477c9fe45e3ef0bd5dffc47dd9911807782e0d46558842b2d2b48fd" Nov 23 15:04:54 crc kubenswrapper[5050]: E1123 15:04:54.167927 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4211198ac477c9fe45e3ef0bd5dffc47dd9911807782e0d46558842b2d2b48fd\": container with ID starting with 4211198ac477c9fe45e3ef0bd5dffc47dd9911807782e0d46558842b2d2b48fd not found: ID does not exist" containerID="4211198ac477c9fe45e3ef0bd5dffc47dd9911807782e0d46558842b2d2b48fd" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.167969 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4211198ac477c9fe45e3ef0bd5dffc47dd9911807782e0d46558842b2d2b48fd"} err="failed to get container status \"4211198ac477c9fe45e3ef0bd5dffc47dd9911807782e0d46558842b2d2b48fd\": rpc error: code = NotFound desc = could not find container \"4211198ac477c9fe45e3ef0bd5dffc47dd9911807782e0d46558842b2d2b48fd\": container with ID starting with 4211198ac477c9fe45e3ef0bd5dffc47dd9911807782e0d46558842b2d2b48fd not found: ID does not exist" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.167999 5050 scope.go:117] "RemoveContainer" containerID="0001c3500cc85fc0a925e13354d27829e719cc2b2c22f8ba291e2016b3e3b365" Nov 23 15:04:54 crc kubenswrapper[5050]: E1123 15:04:54.168309 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0001c3500cc85fc0a925e13354d27829e719cc2b2c22f8ba291e2016b3e3b365\": container with ID starting with 0001c3500cc85fc0a925e13354d27829e719cc2b2c22f8ba291e2016b3e3b365 not found: ID does not exist" containerID="0001c3500cc85fc0a925e13354d27829e719cc2b2c22f8ba291e2016b3e3b365" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.168346 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0001c3500cc85fc0a925e13354d27829e719cc2b2c22f8ba291e2016b3e3b365"} err="failed to get container status \"0001c3500cc85fc0a925e13354d27829e719cc2b2c22f8ba291e2016b3e3b365\": rpc error: code = NotFound desc = could not find container \"0001c3500cc85fc0a925e13354d27829e719cc2b2c22f8ba291e2016b3e3b365\": container with ID starting with 0001c3500cc85fc0a925e13354d27829e719cc2b2c22f8ba291e2016b3e3b365 not found: ID does not exist" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.168372 5050 scope.go:117] "RemoveContainer" containerID="a0820261b299cc33db42946f12f1f4467f892f1598cff4f933ab45def65d8af9" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.197072 5050 scope.go:117] "RemoveContainer" containerID="1b3d79783367fa4b7fae45d92ec2a13146512723daef532131bce7af8af900e8" Nov 23 15:04:54 crc kubenswrapper[5050]: I1123 15:04:54.231720 5050 scope.go:117] "RemoveContainer" containerID="d79191eb437e5f7ec421da5ab0ca602912b38ed265ec750ace6b1b344d1ac58d" Nov 23 15:04:55 crc kubenswrapper[5050]: I1123 15:04:55.565988 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" path="/var/lib/kubelet/pods/4947b94e-805e-457e-bb17-f7faea3b5fca/volumes" Nov 23 15:04:55 crc kubenswrapper[5050]: I1123 15:04:55.569194 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c21366ac-a7da-4cac-8332-753659210595" path="/var/lib/kubelet/pods/c21366ac-a7da-4cac-8332-753659210595/volumes" Nov 23 15:04:58 crc kubenswrapper[5050]: I1123 15:04:58.263220 5050 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod85ca597a-bf71-43bb-b57c-f840b37f196f"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod85ca597a-bf71-43bb-b57c-f840b37f196f] : Timed out while waiting for systemd to remove kubepods-besteffort-pod85ca597a_bf71_43bb_b57c_f840b37f196f.slice" Nov 23 15:04:58 crc kubenswrapper[5050]: E1123 15:04:58.264698 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort pod85ca597a-bf71-43bb-b57c-f840b37f196f] : unable to destroy cgroup paths for cgroup [kubepods besteffort pod85ca597a-bf71-43bb-b57c-f840b37f196f] : Timed out while waiting for systemd to remove kubepods-besteffort-pod85ca597a_bf71_43bb_b57c_f840b37f196f.slice" pod="openstack/openstack-cell1-galera-0" podUID="85ca597a-bf71-43bb-b57c-f840b37f196f" Nov 23 15:04:58 crc kubenswrapper[5050]: I1123 15:04:58.737929 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 23 15:04:58 crc kubenswrapper[5050]: I1123 15:04:58.770480 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 23 15:04:58 crc kubenswrapper[5050]: I1123 15:04:58.780203 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 23 15:04:59 crc kubenswrapper[5050]: I1123 15:04:59.379630 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glancebb96-account-delete-qls8c" Nov 23 15:04:59 crc kubenswrapper[5050]: I1123 15:04:59.534544 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/13ecb6d2-cd50-4239-8939-6465176afd8e-operator-scripts\") pod \"13ecb6d2-cd50-4239-8939-6465176afd8e\" (UID: \"13ecb6d2-cd50-4239-8939-6465176afd8e\") " Nov 23 15:04:59 crc kubenswrapper[5050]: I1123 15:04:59.534846 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cgsql\" (UniqueName: \"kubernetes.io/projected/13ecb6d2-cd50-4239-8939-6465176afd8e-kube-api-access-cgsql\") pod \"13ecb6d2-cd50-4239-8939-6465176afd8e\" (UID: \"13ecb6d2-cd50-4239-8939-6465176afd8e\") " Nov 23 15:04:59 crc kubenswrapper[5050]: I1123 15:04:59.535709 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13ecb6d2-cd50-4239-8939-6465176afd8e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "13ecb6d2-cd50-4239-8939-6465176afd8e" (UID: "13ecb6d2-cd50-4239-8939-6465176afd8e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:04:59 crc kubenswrapper[5050]: I1123 15:04:59.542550 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13ecb6d2-cd50-4239-8939-6465176afd8e-kube-api-access-cgsql" (OuterVolumeSpecName: "kube-api-access-cgsql") pod "13ecb6d2-cd50-4239-8939-6465176afd8e" (UID: "13ecb6d2-cd50-4239-8939-6465176afd8e"). InnerVolumeSpecName "kube-api-access-cgsql". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:04:59 crc kubenswrapper[5050]: I1123 15:04:59.570146 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="85ca597a-bf71-43bb-b57c-f840b37f196f" path="/var/lib/kubelet/pods/85ca597a-bf71-43bb-b57c-f840b37f196f/volumes" Nov 23 15:04:59 crc kubenswrapper[5050]: I1123 15:04:59.637864 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cgsql\" (UniqueName: \"kubernetes.io/projected/13ecb6d2-cd50-4239-8939-6465176afd8e-kube-api-access-cgsql\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:59 crc kubenswrapper[5050]: I1123 15:04:59.638169 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/13ecb6d2-cd50-4239-8939-6465176afd8e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:04:59 crc kubenswrapper[5050]: I1123 15:04:59.753087 5050 generic.go:334] "Generic (PLEG): container finished" podID="13ecb6d2-cd50-4239-8939-6465176afd8e" containerID="f061470e15b4db94da38ba3328655821452437dec9ef88351bce72f0bc801736" exitCode=137 Nov 23 15:04:59 crc kubenswrapper[5050]: I1123 15:04:59.753168 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glancebb96-account-delete-qls8c" event={"ID":"13ecb6d2-cd50-4239-8939-6465176afd8e","Type":"ContainerDied","Data":"f061470e15b4db94da38ba3328655821452437dec9ef88351bce72f0bc801736"} Nov 23 15:04:59 crc kubenswrapper[5050]: I1123 15:04:59.753221 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glancebb96-account-delete-qls8c" event={"ID":"13ecb6d2-cd50-4239-8939-6465176afd8e","Type":"ContainerDied","Data":"06f7ec8eaee623028a2fb5166e2c0a70be842f5548766d5f65f74ceabacb80f5"} Nov 23 15:04:59 crc kubenswrapper[5050]: I1123 15:04:59.753257 5050 scope.go:117] "RemoveContainer" containerID="f061470e15b4db94da38ba3328655821452437dec9ef88351bce72f0bc801736" Nov 23 15:04:59 crc kubenswrapper[5050]: I1123 15:04:59.753547 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glancebb96-account-delete-qls8c" Nov 23 15:04:59 crc kubenswrapper[5050]: I1123 15:04:59.784997 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glancebb96-account-delete-qls8c"] Nov 23 15:04:59 crc kubenswrapper[5050]: I1123 15:04:59.791675 5050 scope.go:117] "RemoveContainer" containerID="f061470e15b4db94da38ba3328655821452437dec9ef88351bce72f0bc801736" Nov 23 15:04:59 crc kubenswrapper[5050]: E1123 15:04:59.792408 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f061470e15b4db94da38ba3328655821452437dec9ef88351bce72f0bc801736\": container with ID starting with f061470e15b4db94da38ba3328655821452437dec9ef88351bce72f0bc801736 not found: ID does not exist" containerID="f061470e15b4db94da38ba3328655821452437dec9ef88351bce72f0bc801736" Nov 23 15:04:59 crc kubenswrapper[5050]: I1123 15:04:59.792491 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f061470e15b4db94da38ba3328655821452437dec9ef88351bce72f0bc801736"} err="failed to get container status \"f061470e15b4db94da38ba3328655821452437dec9ef88351bce72f0bc801736\": rpc error: code = NotFound desc = could not find container \"f061470e15b4db94da38ba3328655821452437dec9ef88351bce72f0bc801736\": container with ID starting with f061470e15b4db94da38ba3328655821452437dec9ef88351bce72f0bc801736 not found: ID does not exist" Nov 23 15:04:59 crc kubenswrapper[5050]: I1123 15:04:59.796741 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glancebb96-account-delete-qls8c"] Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.630720 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novaapif81d-account-delete-xfdwn" Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.642266 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell03052-account-delete-kz7k7" Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.653574 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vlrvp\" (UniqueName: \"kubernetes.io/projected/1bda1a38-71dd-4de4-8e16-b5159a08fdfa-kube-api-access-vlrvp\") pod \"1bda1a38-71dd-4de4-8e16-b5159a08fdfa\" (UID: \"1bda1a38-71dd-4de4-8e16-b5159a08fdfa\") " Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.653622 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-747lj\" (UniqueName: \"kubernetes.io/projected/3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89-kube-api-access-747lj\") pod \"3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89\" (UID: \"3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89\") " Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.653680 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89-operator-scripts\") pod \"3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89\" (UID: \"3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89\") " Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.653714 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1bda1a38-71dd-4de4-8e16-b5159a08fdfa-operator-scripts\") pod \"1bda1a38-71dd-4de4-8e16-b5159a08fdfa\" (UID: \"1bda1a38-71dd-4de4-8e16-b5159a08fdfa\") " Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.654790 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bda1a38-71dd-4de4-8e16-b5159a08fdfa-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1bda1a38-71dd-4de4-8e16-b5159a08fdfa" (UID: "1bda1a38-71dd-4de4-8e16-b5159a08fdfa"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.654831 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89" (UID: "3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.659508 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89-kube-api-access-747lj" (OuterVolumeSpecName: "kube-api-access-747lj") pod "3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89" (UID: "3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89"). InnerVolumeSpecName "kube-api-access-747lj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.675732 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bda1a38-71dd-4de4-8e16-b5159a08fdfa-kube-api-access-vlrvp" (OuterVolumeSpecName: "kube-api-access-vlrvp") pod "1bda1a38-71dd-4de4-8e16-b5159a08fdfa" (UID: "1bda1a38-71dd-4de4-8e16-b5159a08fdfa"). InnerVolumeSpecName "kube-api-access-vlrvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.756682 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.756731 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1bda1a38-71dd-4de4-8e16-b5159a08fdfa-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.756747 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vlrvp\" (UniqueName: \"kubernetes.io/projected/1bda1a38-71dd-4de4-8e16-b5159a08fdfa-kube-api-access-vlrvp\") on node \"crc\" DevicePath \"\"" Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.756764 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-747lj\" (UniqueName: \"kubernetes.io/projected/3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89-kube-api-access-747lj\") on node \"crc\" DevicePath \"\"" Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.768189 5050 generic.go:334] "Generic (PLEG): container finished" podID="3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89" containerID="f88295c1f1bb5ed5c59a421aaeb3b1e96a228d0eedd67e330915a293e3607e55" exitCode=137 Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.768349 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell03052-account-delete-kz7k7" Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.768437 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell03052-account-delete-kz7k7" event={"ID":"3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89","Type":"ContainerDied","Data":"f88295c1f1bb5ed5c59a421aaeb3b1e96a228d0eedd67e330915a293e3607e55"} Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.768530 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell03052-account-delete-kz7k7" event={"ID":"3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89","Type":"ContainerDied","Data":"e5e99474b6c49167c6c2d8f690d0cc421ed1b7ed5f2c94f6c9ff2d4ecfc7bbe1"} Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.768557 5050 scope.go:117] "RemoveContainer" containerID="f88295c1f1bb5ed5c59a421aaeb3b1e96a228d0eedd67e330915a293e3607e55" Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.772597 5050 generic.go:334] "Generic (PLEG): container finished" podID="1bda1a38-71dd-4de4-8e16-b5159a08fdfa" containerID="dae7cb177dedffc9bba415c55b29716daa7fad33f3294e8262f68961a6e59713" exitCode=137 Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.772650 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapif81d-account-delete-xfdwn" event={"ID":"1bda1a38-71dd-4de4-8e16-b5159a08fdfa","Type":"ContainerDied","Data":"dae7cb177dedffc9bba415c55b29716daa7fad33f3294e8262f68961a6e59713"} Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.772671 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapif81d-account-delete-xfdwn" event={"ID":"1bda1a38-71dd-4de4-8e16-b5159a08fdfa","Type":"ContainerDied","Data":"7fd4f46c7d4a6b10b51c4634653ef5eea9723e27b89cb170f3b6260e7ce0f0fa"} Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.772719 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novaapif81d-account-delete-xfdwn" Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.807714 5050 scope.go:117] "RemoveContainer" containerID="f88295c1f1bb5ed5c59a421aaeb3b1e96a228d0eedd67e330915a293e3607e55" Nov 23 15:05:00 crc kubenswrapper[5050]: E1123 15:05:00.808500 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f88295c1f1bb5ed5c59a421aaeb3b1e96a228d0eedd67e330915a293e3607e55\": container with ID starting with f88295c1f1bb5ed5c59a421aaeb3b1e96a228d0eedd67e330915a293e3607e55 not found: ID does not exist" containerID="f88295c1f1bb5ed5c59a421aaeb3b1e96a228d0eedd67e330915a293e3607e55" Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.808562 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f88295c1f1bb5ed5c59a421aaeb3b1e96a228d0eedd67e330915a293e3607e55"} err="failed to get container status \"f88295c1f1bb5ed5c59a421aaeb3b1e96a228d0eedd67e330915a293e3607e55\": rpc error: code = NotFound desc = could not find container \"f88295c1f1bb5ed5c59a421aaeb3b1e96a228d0eedd67e330915a293e3607e55\": container with ID starting with f88295c1f1bb5ed5c59a421aaeb3b1e96a228d0eedd67e330915a293e3607e55 not found: ID does not exist" Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.808602 5050 scope.go:117] "RemoveContainer" containerID="dae7cb177dedffc9bba415c55b29716daa7fad33f3294e8262f68961a6e59713" Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.810533 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell03052-account-delete-kz7k7"] Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.823339 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novacell03052-account-delete-kz7k7"] Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.846247 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novaapif81d-account-delete-xfdwn"] Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.863535 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novaapif81d-account-delete-xfdwn"] Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.878867 5050 scope.go:117] "RemoveContainer" containerID="dae7cb177dedffc9bba415c55b29716daa7fad33f3294e8262f68961a6e59713" Nov 23 15:05:00 crc kubenswrapper[5050]: E1123 15:05:00.880252 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dae7cb177dedffc9bba415c55b29716daa7fad33f3294e8262f68961a6e59713\": container with ID starting with dae7cb177dedffc9bba415c55b29716daa7fad33f3294e8262f68961a6e59713 not found: ID does not exist" containerID="dae7cb177dedffc9bba415c55b29716daa7fad33f3294e8262f68961a6e59713" Nov 23 15:05:00 crc kubenswrapper[5050]: I1123 15:05:00.880331 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dae7cb177dedffc9bba415c55b29716daa7fad33f3294e8262f68961a6e59713"} err="failed to get container status \"dae7cb177dedffc9bba415c55b29716daa7fad33f3294e8262f68961a6e59713\": rpc error: code = NotFound desc = could not find container \"dae7cb177dedffc9bba415c55b29716daa7fad33f3294e8262f68961a6e59713\": container with ID starting with dae7cb177dedffc9bba415c55b29716daa7fad33f3294e8262f68961a6e59713 not found: ID does not exist" Nov 23 15:05:01 crc kubenswrapper[5050]: I1123 15:05:01.566370 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13ecb6d2-cd50-4239-8939-6465176afd8e" path="/var/lib/kubelet/pods/13ecb6d2-cd50-4239-8939-6465176afd8e/volumes" Nov 23 15:05:01 crc kubenswrapper[5050]: I1123 15:05:01.567428 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bda1a38-71dd-4de4-8e16-b5159a08fdfa" path="/var/lib/kubelet/pods/1bda1a38-71dd-4de4-8e16-b5159a08fdfa/volumes" Nov 23 15:05:01 crc kubenswrapper[5050]: I1123 15:05:01.568405 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89" path="/var/lib/kubelet/pods/3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89/volumes" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.180617 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-wknvj"] Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.181907 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="528697e8-1ddc-4ab0-aa0e-008631905a4c" containerName="ceilometer-central-agent" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.181976 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="528697e8-1ddc-4ab0-aa0e-008631905a4c" containerName="ceilometer-central-agent" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.182004 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c3ae457-67ab-4c0b-a17e-a8264215793b" containerName="nova-api-log" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.182020 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c3ae457-67ab-4c0b-a17e-a8264215793b" containerName="nova-api-log" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.182046 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c21366ac-a7da-4cac-8332-753659210595" containerName="ovs-vswitchd" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.182060 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="c21366ac-a7da-4cac-8332-753659210595" containerName="ovs-vswitchd" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.182073 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43" containerName="nova-metadata-log" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.182084 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43" containerName="nova-metadata-log" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.182099 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b570632-90a0-4fcd-a067-9913b51ad711" containerName="mariadb-account-delete" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.182110 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b570632-90a0-4fcd-a067-9913b51ad711" containerName="mariadb-account-delete" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.182131 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89" containerName="mariadb-account-delete" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.182141 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89" containerName="mariadb-account-delete" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.182161 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="account-reaper" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.182174 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="account-reaper" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.182195 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c5e9b39-91fa-4f4e-9d95-0599bc22472d" containerName="glance-log" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.182206 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c5e9b39-91fa-4f4e-9d95-0599bc22472d" containerName="glance-log" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.182221 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f92353db-5352-4216-ad2d-50242e58dfb7" containerName="setup-container" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.182232 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f92353db-5352-4216-ad2d-50242e58dfb7" containerName="setup-container" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.182251 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94bbca67-2998-4727-bbe1-95d54277f4aa" containerName="kube-state-metrics" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.182261 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="94bbca67-2998-4727-bbe1-95d54277f4aa" containerName="kube-state-metrics" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.182273 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1187c9d-5557-496e-be1d-8df301d6daa6" containerName="proxy-httpd" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.182284 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1187c9d-5557-496e-be1d-8df301d6daa6" containerName="proxy-httpd" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.182301 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba4a4a57-47b0-423d-8bb1-76953fb3a37b" containerName="cinder-scheduler" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.182312 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba4a4a57-47b0-423d-8bb1-76953fb3a37b" containerName="cinder-scheduler" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.182329 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="container-updater" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.182339 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="container-updater" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.182361 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85034c8-6b79-42e1-849d-646a2ead1a93" containerName="barbican-api-log" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.182373 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85034c8-6b79-42e1-849d-646a2ead1a93" containerName="barbican-api-log" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.182391 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1ce97bd-1ff7-400e-a741-7e757fbd7e17" containerName="memcached" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.182401 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1ce97bd-1ff7-400e-a741-7e757fbd7e17" containerName="memcached" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.182412 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="container-server" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.182423 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="container-server" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.182438 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="add2a2e2-5553-4e43-8ddd-b8719949d531" containerName="placement-log" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.182490 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="add2a2e2-5553-4e43-8ddd-b8719949d531" containerName="placement-log" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.182511 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="account-auditor" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.182539 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="account-auditor" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.182551 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e78eb875-17e5-431f-82b5-14a1190488b0" containerName="mariadb-account-delete" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.182562 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e78eb875-17e5-431f-82b5-14a1190488b0" containerName="mariadb-account-delete" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.182583 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b3a1560-8762-47b9-8d1c-fe94eb46bec2" containerName="neutron-httpd" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.182594 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b3a1560-8762-47b9-8d1c-fe94eb46bec2" containerName="neutron-httpd" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.182611 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f81d4e06-4245-4355-89d6-ddc438c06f29" containerName="openstack-network-exporter" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.182621 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f81d4e06-4245-4355-89d6-ddc438c06f29" containerName="openstack-network-exporter" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.182638 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="528697e8-1ddc-4ab0-aa0e-008631905a4c" containerName="ceilometer-notification-agent" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.182652 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="528697e8-1ddc-4ab0-aa0e-008631905a4c" containerName="ceilometer-notification-agent" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.182664 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931" containerName="mariadb-account-delete" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.182675 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931" containerName="mariadb-account-delete" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.182694 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9" containerName="keystone-api" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.182704 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9" containerName="keystone-api" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.182725 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5eff539e-c641-4873-aeae-450aaf0b4ac8" containerName="rabbitmq" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.182735 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="5eff539e-c641-4873-aeae-450aaf0b4ac8" containerName="rabbitmq" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.182749 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="528697e8-1ddc-4ab0-aa0e-008631905a4c" containerName="proxy-httpd" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.182763 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="528697e8-1ddc-4ab0-aa0e-008631905a4c" containerName="proxy-httpd" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.182774 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="716a14cd-88b9-4e18-a781-6dbfaad7634c" containerName="ovsdbserver-nb" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.182784 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="716a14cd-88b9-4e18-a781-6dbfaad7634c" containerName="ovsdbserver-nb" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.182803 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f81d4e06-4245-4355-89d6-ddc438c06f29" containerName="ovsdbserver-sb" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.183194 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f81d4e06-4245-4355-89d6-ddc438c06f29" containerName="ovsdbserver-sb" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.183218 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27e26dea-41e9-4d40-9adc-f902e5f4791f" containerName="openstack-network-exporter" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.183230 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="27e26dea-41e9-4d40-9adc-f902e5f4791f" containerName="openstack-network-exporter" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.183243 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba1bb9f6-5ca2-4986-9473-62d50d9bebf0" containerName="cinder-api" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.183254 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba1bb9f6-5ca2-4986-9473-62d50d9bebf0" containerName="cinder-api" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.183299 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322" containerName="glance-httpd" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.183310 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322" containerName="glance-httpd" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.183327 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7a4774e-f304-40ca-8595-bbe9c381f466" containerName="init" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.183337 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7a4774e-f304-40ca-8595-bbe9c381f466" containerName="init" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.183354 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7a4774e-f304-40ca-8595-bbe9c381f466" containerName="dnsmasq-dns" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.183365 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7a4774e-f304-40ca-8595-bbe9c381f466" containerName="dnsmasq-dns" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.183381 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75e360ca-c84a-4806-a86a-86924a639cfc" containerName="nova-cell1-conductor-conductor" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.183391 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="75e360ca-c84a-4806-a86a-86924a639cfc" containerName="nova-cell1-conductor-conductor" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.183407 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c21366ac-a7da-4cac-8332-753659210595" containerName="ovsdb-server-init" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.183926 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="c21366ac-a7da-4cac-8332-753659210595" containerName="ovsdb-server-init" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.183958 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="container-auditor" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.183970 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="container-auditor" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.183991 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d53c4b5-d0a1-4191-9239-8c7b6806f860" containerName="barbican-keystone-listener-log" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184002 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d53c4b5-d0a1-4191-9239-8c7b6806f860" containerName="barbican-keystone-listener-log" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184019 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a7ed50c-3990-423d-9fd6-1165da59c5c9" containerName="openstack-network-exporter" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184032 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a7ed50c-3990-423d-9fd6-1165da59c5c9" containerName="openstack-network-exporter" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184045 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d53c4b5-d0a1-4191-9239-8c7b6806f860" containerName="barbican-keystone-listener" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184057 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d53c4b5-d0a1-4191-9239-8c7b6806f860" containerName="barbican-keystone-listener" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184071 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3d96734-832d-489e-9fed-a4eb705f41d7" containerName="nova-cell1-novncproxy-novncproxy" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184083 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3d96734-832d-489e-9fed-a4eb705f41d7" containerName="nova-cell1-novncproxy-novncproxy" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184096 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c3ae457-67ab-4c0b-a17e-a8264215793b" containerName="nova-api-api" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184108 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c3ae457-67ab-4c0b-a17e-a8264215793b" containerName="nova-api-api" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184120 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33af3c2e-dea7-4448-8b28-b579d77490b9" containerName="ovn-controller" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184131 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="33af3c2e-dea7-4448-8b28-b579d77490b9" containerName="ovn-controller" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184150 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="634f01f5-daae-4e5d-811f-5d406bfee9c1" containerName="nova-scheduler-scheduler" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184162 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="634f01f5-daae-4e5d-811f-5d406bfee9c1" containerName="nova-scheduler-scheduler" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184179 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27e26dea-41e9-4d40-9adc-f902e5f4791f" containerName="ovn-northd" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184190 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="27e26dea-41e9-4d40-9adc-f902e5f4791f" containerName="ovn-northd" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184212 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="account-server" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184223 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="account-server" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184236 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d3a2397-697f-4d1b-a5ed-0aaa1b202a12" containerName="mariadb-account-delete" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184248 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d3a2397-697f-4d1b-a5ed-0aaa1b202a12" containerName="mariadb-account-delete" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184264 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="account-replicator" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184275 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="account-replicator" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184285 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="object-replicator" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184297 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="object-replicator" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184312 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a256986-024f-45e6-9b86-b5a724ab5f6e" containerName="galera" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184323 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a256986-024f-45e6-9b86-b5a724ab5f6e" containerName="galera" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184337 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="object-expirer" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184347 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="object-expirer" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184362 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c5e9b39-91fa-4f4e-9d95-0599bc22472d" containerName="glance-httpd" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184373 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c5e9b39-91fa-4f4e-9d95-0599bc22472d" containerName="glance-httpd" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184388 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85ca597a-bf71-43bb-b57c-f840b37f196f" containerName="mysql-bootstrap" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184399 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="85ca597a-bf71-43bb-b57c-f840b37f196f" containerName="mysql-bootstrap" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184410 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="object-updater" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184422 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="object-updater" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184436 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43" containerName="nova-metadata-metadata" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184496 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43" containerName="nova-metadata-metadata" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184518 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="object-server" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184530 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="object-server" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184549 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a256986-024f-45e6-9b86-b5a724ab5f6e" containerName="mysql-bootstrap" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184562 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a256986-024f-45e6-9b86-b5a724ab5f6e" containerName="mysql-bootstrap" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184574 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="add2a2e2-5553-4e43-8ddd-b8719949d531" containerName="placement-api" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184585 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="add2a2e2-5553-4e43-8ddd-b8719949d531" containerName="placement-api" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184598 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="042c506a-b65f-4cbd-9ca7-3df0ec55ffa0" containerName="barbican-worker-log" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184610 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="042c506a-b65f-4cbd-9ca7-3df0ec55ffa0" containerName="barbican-worker-log" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184629 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85034c8-6b79-42e1-849d-646a2ead1a93" containerName="barbican-api" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184639 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85034c8-6b79-42e1-849d-646a2ead1a93" containerName="barbican-api" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184653 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bda1a38-71dd-4de4-8e16-b5159a08fdfa" containerName="mariadb-account-delete" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184665 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bda1a38-71dd-4de4-8e16-b5159a08fdfa" containerName="mariadb-account-delete" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184677 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="object-auditor" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184688 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="object-auditor" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184703 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13ecb6d2-cd50-4239-8939-6465176afd8e" containerName="mariadb-account-delete" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184715 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="13ecb6d2-cd50-4239-8939-6465176afd8e" containerName="mariadb-account-delete" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184736 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="container-replicator" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184748 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="container-replicator" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184767 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba1bb9f6-5ca2-4986-9473-62d50d9bebf0" containerName="cinder-api-log" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184779 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba1bb9f6-5ca2-4986-9473-62d50d9bebf0" containerName="cinder-api-log" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184792 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="716a14cd-88b9-4e18-a781-6dbfaad7634c" containerName="openstack-network-exporter" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184803 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="716a14cd-88b9-4e18-a781-6dbfaad7634c" containerName="openstack-network-exporter" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184821 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba4a4a57-47b0-423d-8bb1-76953fb3a37b" containerName="probe" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184832 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba4a4a57-47b0-423d-8bb1-76953fb3a37b" containerName="probe" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184878 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322" containerName="glance-log" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184890 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322" containerName="glance-log" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184906 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b3a1560-8762-47b9-8d1c-fe94eb46bec2" containerName="neutron-api" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184917 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b3a1560-8762-47b9-8d1c-fe94eb46bec2" containerName="neutron-api" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184931 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="rsync" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184946 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="rsync" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184960 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5eff539e-c641-4873-aeae-450aaf0b4ac8" containerName="setup-container" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184971 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="5eff539e-c641-4873-aeae-450aaf0b4ac8" containerName="setup-container" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.184986 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d1fe4ad-9245-4af5-b378-c908ce72f08c" containerName="nova-cell0-conductor-conductor" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.184997 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d1fe4ad-9245-4af5-b378-c908ce72f08c" containerName="nova-cell0-conductor-conductor" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.185019 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="swift-recon-cron" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185030 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="swift-recon-cron" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.185044 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c21366ac-a7da-4cac-8332-753659210595" containerName="ovsdb-server" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185056 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="c21366ac-a7da-4cac-8332-753659210595" containerName="ovsdb-server" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.185070 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f92353db-5352-4216-ad2d-50242e58dfb7" containerName="rabbitmq" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185082 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f92353db-5352-4216-ad2d-50242e58dfb7" containerName="rabbitmq" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.185104 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="042c506a-b65f-4cbd-9ca7-3df0ec55ffa0" containerName="barbican-worker" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185116 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="042c506a-b65f-4cbd-9ca7-3df0ec55ffa0" containerName="barbican-worker" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.185129 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1187c9d-5557-496e-be1d-8df301d6daa6" containerName="proxy-server" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185140 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1187c9d-5557-496e-be1d-8df301d6daa6" containerName="proxy-server" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.185160 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85ca597a-bf71-43bb-b57c-f840b37f196f" containerName="galera" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185172 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="85ca597a-bf71-43bb-b57c-f840b37f196f" containerName="galera" Nov 23 15:05:28 crc kubenswrapper[5050]: E1123 15:05:28.185193 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="528697e8-1ddc-4ab0-aa0e-008631905a4c" containerName="sg-core" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185204 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="528697e8-1ddc-4ab0-aa0e-008631905a4c" containerName="sg-core" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185556 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="add2a2e2-5553-4e43-8ddd-b8719949d531" containerName="placement-api" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185588 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d53c4b5-d0a1-4191-9239-8c7b6806f860" containerName="barbican-keystone-listener" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185612 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322" containerName="glance-httpd" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185630 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="container-replicator" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185645 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="object-replicator" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185654 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="add2a2e2-5553-4e43-8ddd-b8719949d531" containerName="placement-log" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185667 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b570632-90a0-4fcd-a067-9913b51ad711" containerName="mariadb-account-delete" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185679 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba1bb9f6-5ca2-4986-9473-62d50d9bebf0" containerName="cinder-api" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185692 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="75e360ca-c84a-4806-a86a-86924a639cfc" containerName="nova-cell1-conductor-conductor" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185708 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c5e9b39-91fa-4f4e-9d95-0599bc22472d" containerName="glance-httpd" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185728 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="27e26dea-41e9-4d40-9adc-f902e5f4791f" containerName="openstack-network-exporter" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185739 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="042c506a-b65f-4cbd-9ca7-3df0ec55ffa0" containerName="barbican-worker" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185755 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a256986-024f-45e6-9b86-b5a724ab5f6e" containerName="galera" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185767 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43" containerName="nova-metadata-metadata" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185782 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="swift-recon-cron" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185800 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d53c4b5-d0a1-4191-9239-8c7b6806f860" containerName="barbican-keystone-listener-log" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185819 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1187c9d-5557-496e-be1d-8df301d6daa6" containerName="proxy-httpd" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185830 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="528697e8-1ddc-4ab0-aa0e-008631905a4c" containerName="proxy-httpd" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185846 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="e78eb875-17e5-431f-82b5-14a1190488b0" containerName="mariadb-account-delete" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185859 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b3a1560-8762-47b9-8d1c-fe94eb46bec2" containerName="neutron-api" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185869 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="container-server" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185881 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="object-expirer" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185894 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="716a14cd-88b9-4e18-a781-6dbfaad7634c" containerName="ovsdbserver-nb" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185905 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="object-auditor" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185922 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="rsync" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185938 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d1fe4ad-9245-4af5-b378-c908ce72f08c" containerName="nova-cell0-conductor-conductor" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185948 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="716a14cd-88b9-4e18-a781-6dbfaad7634c" containerName="openstack-network-exporter" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185961 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="5eff539e-c641-4873-aeae-450aaf0b4ac8" containerName="rabbitmq" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185974 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3d96734-832d-489e-9fed-a4eb705f41d7" containerName="nova-cell1-novncproxy-novncproxy" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.185991 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="528697e8-1ddc-4ab0-aa0e-008631905a4c" containerName="ceilometer-central-agent" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186005 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="f92353db-5352-4216-ad2d-50242e58dfb7" containerName="rabbitmq" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186018 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="container-auditor" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186032 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1187c9d-5557-496e-be1d-8df301d6daa6" containerName="proxy-server" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186051 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba4a4a57-47b0-423d-8bb1-76953fb3a37b" containerName="cinder-scheduler" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186066 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d3c8093-cbaa-4414-9f1d-c7ae7a2b9322" containerName="glance-log" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186077 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1ce97bd-1ff7-400e-a741-7e757fbd7e17" containerName="memcached" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186095 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="1bda1a38-71dd-4de4-8e16-b5159a08fdfa" containerName="mariadb-account-delete" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186109 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d6e4b67-5d33-49cd-8a2a-69c3fc2c9d89" containerName="mariadb-account-delete" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186124 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="528697e8-1ddc-4ab0-aa0e-008631905a4c" containerName="ceilometer-notification-agent" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186139 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="528697e8-1ddc-4ab0-aa0e-008631905a4c" containerName="sg-core" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186153 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a7ed50c-3990-423d-9fd6-1165da59c5c9" containerName="openstack-network-exporter" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186173 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c5e9b39-91fa-4f4e-9d95-0599bc22472d" containerName="glance-log" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186186 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85034c8-6b79-42e1-849d-646a2ead1a93" containerName="barbican-api-log" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186198 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba4a4a57-47b0-423d-8bb1-76953fb3a37b" containerName="probe" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186211 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba1bb9f6-5ca2-4986-9473-62d50d9bebf0" containerName="cinder-api-log" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186224 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c3ae457-67ab-4c0b-a17e-a8264215793b" containerName="nova-api-log" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186236 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e597d4a-3ff4-4b9e-ab6a-c0364f6afb43" containerName="nova-metadata-log" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186254 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="042c506a-b65f-4cbd-9ca7-3df0ec55ffa0" containerName="barbican-worker-log" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186270 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d3a2397-697f-4d1b-a5ed-0aaa1b202a12" containerName="mariadb-account-delete" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186282 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b3a1560-8762-47b9-8d1c-fe94eb46bec2" containerName="neutron-httpd" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186295 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="85ca597a-bf71-43bb-b57c-f840b37f196f" containerName="galera" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186313 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ea4c6e2-e26b-454f-a6e1-b8fd7df8a931" containerName="mariadb-account-delete" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186333 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="object-server" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186349 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="account-auditor" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186367 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7a4774e-f304-40ca-8595-bbe9c381f466" containerName="dnsmasq-dns" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186383 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="33af3c2e-dea7-4448-8b28-b579d77490b9" containerName="ovn-controller" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186399 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="f81d4e06-4245-4355-89d6-ddc438c06f29" containerName="openstack-network-exporter" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186416 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="account-replicator" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186432 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="container-updater" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186468 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="object-updater" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186481 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3fbc320-b4b0-47f2-9fc6-a6efc63a3ee9" containerName="keystone-api" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186508 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c3ae457-67ab-4c0b-a17e-a8264215793b" containerName="nova-api-api" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186526 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="13ecb6d2-cd50-4239-8939-6465176afd8e" containerName="mariadb-account-delete" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186543 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="c21366ac-a7da-4cac-8332-753659210595" containerName="ovsdb-server" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186554 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="c21366ac-a7da-4cac-8332-753659210595" containerName="ovs-vswitchd" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186570 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="account-server" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186585 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="94bbca67-2998-4727-bbe1-95d54277f4aa" containerName="kube-state-metrics" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186599 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85034c8-6b79-42e1-849d-646a2ead1a93" containerName="barbican-api" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186937 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="27e26dea-41e9-4d40-9adc-f902e5f4791f" containerName="ovn-northd" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186952 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4947b94e-805e-457e-bb17-f7faea3b5fca" containerName="account-reaper" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186972 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="f81d4e06-4245-4355-89d6-ddc438c06f29" containerName="ovsdbserver-sb" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.186983 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="634f01f5-daae-4e5d-811f-5d406bfee9c1" containerName="nova-scheduler-scheduler" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.188864 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wknvj" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.203137 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wknvj"] Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.304623 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wjkjb\" (UniqueName: \"kubernetes.io/projected/91c3902b-76d1-4a73-b2f7-3668800cc4c9-kube-api-access-wjkjb\") pod \"redhat-marketplace-wknvj\" (UID: \"91c3902b-76d1-4a73-b2f7-3668800cc4c9\") " pod="openshift-marketplace/redhat-marketplace-wknvj" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.304725 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91c3902b-76d1-4a73-b2f7-3668800cc4c9-catalog-content\") pod \"redhat-marketplace-wknvj\" (UID: \"91c3902b-76d1-4a73-b2f7-3668800cc4c9\") " pod="openshift-marketplace/redhat-marketplace-wknvj" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.304768 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91c3902b-76d1-4a73-b2f7-3668800cc4c9-utilities\") pod \"redhat-marketplace-wknvj\" (UID: \"91c3902b-76d1-4a73-b2f7-3668800cc4c9\") " pod="openshift-marketplace/redhat-marketplace-wknvj" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.406086 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wjkjb\" (UniqueName: \"kubernetes.io/projected/91c3902b-76d1-4a73-b2f7-3668800cc4c9-kube-api-access-wjkjb\") pod \"redhat-marketplace-wknvj\" (UID: \"91c3902b-76d1-4a73-b2f7-3668800cc4c9\") " pod="openshift-marketplace/redhat-marketplace-wknvj" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.406163 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91c3902b-76d1-4a73-b2f7-3668800cc4c9-catalog-content\") pod \"redhat-marketplace-wknvj\" (UID: \"91c3902b-76d1-4a73-b2f7-3668800cc4c9\") " pod="openshift-marketplace/redhat-marketplace-wknvj" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.406195 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91c3902b-76d1-4a73-b2f7-3668800cc4c9-utilities\") pod \"redhat-marketplace-wknvj\" (UID: \"91c3902b-76d1-4a73-b2f7-3668800cc4c9\") " pod="openshift-marketplace/redhat-marketplace-wknvj" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.406848 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91c3902b-76d1-4a73-b2f7-3668800cc4c9-utilities\") pod \"redhat-marketplace-wknvj\" (UID: \"91c3902b-76d1-4a73-b2f7-3668800cc4c9\") " pod="openshift-marketplace/redhat-marketplace-wknvj" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.406993 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91c3902b-76d1-4a73-b2f7-3668800cc4c9-catalog-content\") pod \"redhat-marketplace-wknvj\" (UID: \"91c3902b-76d1-4a73-b2f7-3668800cc4c9\") " pod="openshift-marketplace/redhat-marketplace-wknvj" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.433033 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wjkjb\" (UniqueName: \"kubernetes.io/projected/91c3902b-76d1-4a73-b2f7-3668800cc4c9-kube-api-access-wjkjb\") pod \"redhat-marketplace-wknvj\" (UID: \"91c3902b-76d1-4a73-b2f7-3668800cc4c9\") " pod="openshift-marketplace/redhat-marketplace-wknvj" Nov 23 15:05:28 crc kubenswrapper[5050]: I1123 15:05:28.520098 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wknvj" Nov 23 15:05:29 crc kubenswrapper[5050]: I1123 15:05:29.021275 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wknvj"] Nov 23 15:05:29 crc kubenswrapper[5050]: I1123 15:05:29.179055 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wknvj" event={"ID":"91c3902b-76d1-4a73-b2f7-3668800cc4c9","Type":"ContainerStarted","Data":"a1916e73469a59045133d712d2e7d35d160e41418320c02a66644285ac5a66b8"} Nov 23 15:05:29 crc kubenswrapper[5050]: I1123 15:05:29.225096 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:05:29 crc kubenswrapper[5050]: I1123 15:05:29.225227 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:05:30 crc kubenswrapper[5050]: I1123 15:05:30.196476 5050 generic.go:334] "Generic (PLEG): container finished" podID="91c3902b-76d1-4a73-b2f7-3668800cc4c9" containerID="508685029b476eadd6038b041d696ec9fa2a28a2e924eb95d265c2adef18f0f8" exitCode=0 Nov 23 15:05:30 crc kubenswrapper[5050]: I1123 15:05:30.196563 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wknvj" event={"ID":"91c3902b-76d1-4a73-b2f7-3668800cc4c9","Type":"ContainerDied","Data":"508685029b476eadd6038b041d696ec9fa2a28a2e924eb95d265c2adef18f0f8"} Nov 23 15:05:32 crc kubenswrapper[5050]: I1123 15:05:32.226093 5050 generic.go:334] "Generic (PLEG): container finished" podID="91c3902b-76d1-4a73-b2f7-3668800cc4c9" containerID="7aed932a50064e3cb8cfa3a6ea310c583d0fa120d3053477e6e5bdafb955f86b" exitCode=0 Nov 23 15:05:32 crc kubenswrapper[5050]: I1123 15:05:32.226529 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wknvj" event={"ID":"91c3902b-76d1-4a73-b2f7-3668800cc4c9","Type":"ContainerDied","Data":"7aed932a50064e3cb8cfa3a6ea310c583d0fa120d3053477e6e5bdafb955f86b"} Nov 23 15:05:32 crc kubenswrapper[5050]: I1123 15:05:32.751507 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-lw9jb"] Nov 23 15:05:32 crc kubenswrapper[5050]: I1123 15:05:32.753351 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lw9jb" Nov 23 15:05:32 crc kubenswrapper[5050]: I1123 15:05:32.759536 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lw9jb"] Nov 23 15:05:32 crc kubenswrapper[5050]: I1123 15:05:32.882786 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4snx\" (UniqueName: \"kubernetes.io/projected/9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a-kube-api-access-x4snx\") pod \"redhat-operators-lw9jb\" (UID: \"9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a\") " pod="openshift-marketplace/redhat-operators-lw9jb" Nov 23 15:05:32 crc kubenswrapper[5050]: I1123 15:05:32.882965 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a-utilities\") pod \"redhat-operators-lw9jb\" (UID: \"9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a\") " pod="openshift-marketplace/redhat-operators-lw9jb" Nov 23 15:05:32 crc kubenswrapper[5050]: I1123 15:05:32.883003 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a-catalog-content\") pod \"redhat-operators-lw9jb\" (UID: \"9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a\") " pod="openshift-marketplace/redhat-operators-lw9jb" Nov 23 15:05:32 crc kubenswrapper[5050]: I1123 15:05:32.984317 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a-utilities\") pod \"redhat-operators-lw9jb\" (UID: \"9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a\") " pod="openshift-marketplace/redhat-operators-lw9jb" Nov 23 15:05:32 crc kubenswrapper[5050]: I1123 15:05:32.984385 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a-catalog-content\") pod \"redhat-operators-lw9jb\" (UID: \"9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a\") " pod="openshift-marketplace/redhat-operators-lw9jb" Nov 23 15:05:32 crc kubenswrapper[5050]: I1123 15:05:32.984415 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4snx\" (UniqueName: \"kubernetes.io/projected/9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a-kube-api-access-x4snx\") pod \"redhat-operators-lw9jb\" (UID: \"9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a\") " pod="openshift-marketplace/redhat-operators-lw9jb" Nov 23 15:05:32 crc kubenswrapper[5050]: I1123 15:05:32.985077 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a-utilities\") pod \"redhat-operators-lw9jb\" (UID: \"9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a\") " pod="openshift-marketplace/redhat-operators-lw9jb" Nov 23 15:05:32 crc kubenswrapper[5050]: I1123 15:05:32.985153 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a-catalog-content\") pod \"redhat-operators-lw9jb\" (UID: \"9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a\") " pod="openshift-marketplace/redhat-operators-lw9jb" Nov 23 15:05:33 crc kubenswrapper[5050]: I1123 15:05:33.006916 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4snx\" (UniqueName: \"kubernetes.io/projected/9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a-kube-api-access-x4snx\") pod \"redhat-operators-lw9jb\" (UID: \"9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a\") " pod="openshift-marketplace/redhat-operators-lw9jb" Nov 23 15:05:33 crc kubenswrapper[5050]: I1123 15:05:33.108953 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lw9jb" Nov 23 15:05:33 crc kubenswrapper[5050]: I1123 15:05:33.246874 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wknvj" event={"ID":"91c3902b-76d1-4a73-b2f7-3668800cc4c9","Type":"ContainerStarted","Data":"e0f993d4c1300a330980e23f94cce0a83a466bb1fa6185011680613374ef3b35"} Nov 23 15:05:33 crc kubenswrapper[5050]: I1123 15:05:33.278901 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-wknvj" podStartSLOduration=2.810866001 podStartE2EDuration="5.278881263s" podCreationTimestamp="2025-11-23 15:05:28 +0000 UTC" firstStartedPulling="2025-11-23 15:05:30.199122471 +0000 UTC m=+1425.366118966" lastFinishedPulling="2025-11-23 15:05:32.667137743 +0000 UTC m=+1427.834134228" observedRunningTime="2025-11-23 15:05:33.277935137 +0000 UTC m=+1428.444931642" watchObservedRunningTime="2025-11-23 15:05:33.278881263 +0000 UTC m=+1428.445877748" Nov 23 15:05:33 crc kubenswrapper[5050]: W1123 15:05:33.657689 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9e2ca38b_0c15_4ea5_9b78_89b1c2a0382a.slice/crio-c343289209ef7146d7dcef8e5a6a42560131abaa140c5da39bbea3a219e6562c WatchSource:0}: Error finding container c343289209ef7146d7dcef8e5a6a42560131abaa140c5da39bbea3a219e6562c: Status 404 returned error can't find the container with id c343289209ef7146d7dcef8e5a6a42560131abaa140c5da39bbea3a219e6562c Nov 23 15:05:33 crc kubenswrapper[5050]: I1123 15:05:33.670988 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lw9jb"] Nov 23 15:05:34 crc kubenswrapper[5050]: I1123 15:05:34.257748 5050 generic.go:334] "Generic (PLEG): container finished" podID="9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a" containerID="7050e134f92ed422b0be0d856bbf4ea10a14f87a26e8fc6322e8e21624065739" exitCode=0 Nov 23 15:05:34 crc kubenswrapper[5050]: I1123 15:05:34.257876 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lw9jb" event={"ID":"9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a","Type":"ContainerDied","Data":"7050e134f92ed422b0be0d856bbf4ea10a14f87a26e8fc6322e8e21624065739"} Nov 23 15:05:34 crc kubenswrapper[5050]: I1123 15:05:34.258404 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lw9jb" event={"ID":"9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a","Type":"ContainerStarted","Data":"c343289209ef7146d7dcef8e5a6a42560131abaa140c5da39bbea3a219e6562c"} Nov 23 15:05:35 crc kubenswrapper[5050]: I1123 15:05:35.274113 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lw9jb" event={"ID":"9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a","Type":"ContainerStarted","Data":"6ee667e1574b0ab7b0777bcb4cd3520bc2c61478cb24350a52e726b2e3779206"} Nov 23 15:05:36 crc kubenswrapper[5050]: I1123 15:05:36.290136 5050 generic.go:334] "Generic (PLEG): container finished" podID="9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a" containerID="6ee667e1574b0ab7b0777bcb4cd3520bc2c61478cb24350a52e726b2e3779206" exitCode=0 Nov 23 15:05:36 crc kubenswrapper[5050]: I1123 15:05:36.290201 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lw9jb" event={"ID":"9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a","Type":"ContainerDied","Data":"6ee667e1574b0ab7b0777bcb4cd3520bc2c61478cb24350a52e726b2e3779206"} Nov 23 15:05:37 crc kubenswrapper[5050]: I1123 15:05:37.311179 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lw9jb" event={"ID":"9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a","Type":"ContainerStarted","Data":"2e66206e04ca18b5347485f9817b342d52dd17d7ae2388f1f7d5f3815d40fc3f"} Nov 23 15:05:37 crc kubenswrapper[5050]: I1123 15:05:37.337341 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-lw9jb" podStartSLOduration=2.842286305 podStartE2EDuration="5.337321695s" podCreationTimestamp="2025-11-23 15:05:32 +0000 UTC" firstStartedPulling="2025-11-23 15:05:34.25957961 +0000 UTC m=+1429.426576085" lastFinishedPulling="2025-11-23 15:05:36.75461495 +0000 UTC m=+1431.921611475" observedRunningTime="2025-11-23 15:05:37.333678613 +0000 UTC m=+1432.500675108" watchObservedRunningTime="2025-11-23 15:05:37.337321695 +0000 UTC m=+1432.504318190" Nov 23 15:05:38 crc kubenswrapper[5050]: I1123 15:05:38.520521 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-wknvj" Nov 23 15:05:38 crc kubenswrapper[5050]: I1123 15:05:38.520615 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-wknvj" Nov 23 15:05:38 crc kubenswrapper[5050]: I1123 15:05:38.587282 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-wknvj" Nov 23 15:05:39 crc kubenswrapper[5050]: I1123 15:05:39.411529 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-wknvj" Nov 23 15:05:39 crc kubenswrapper[5050]: I1123 15:05:39.725993 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wknvj"] Nov 23 15:05:41 crc kubenswrapper[5050]: I1123 15:05:41.355470 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-wknvj" podUID="91c3902b-76d1-4a73-b2f7-3668800cc4c9" containerName="registry-server" containerID="cri-o://e0f993d4c1300a330980e23f94cce0a83a466bb1fa6185011680613374ef3b35" gracePeriod=2 Nov 23 15:05:42 crc kubenswrapper[5050]: I1123 15:05:42.332286 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wknvj" Nov 23 15:05:42 crc kubenswrapper[5050]: I1123 15:05:42.381018 5050 generic.go:334] "Generic (PLEG): container finished" podID="91c3902b-76d1-4a73-b2f7-3668800cc4c9" containerID="e0f993d4c1300a330980e23f94cce0a83a466bb1fa6185011680613374ef3b35" exitCode=0 Nov 23 15:05:42 crc kubenswrapper[5050]: I1123 15:05:42.381156 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wknvj" Nov 23 15:05:42 crc kubenswrapper[5050]: I1123 15:05:42.381094 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wknvj" event={"ID":"91c3902b-76d1-4a73-b2f7-3668800cc4c9","Type":"ContainerDied","Data":"e0f993d4c1300a330980e23f94cce0a83a466bb1fa6185011680613374ef3b35"} Nov 23 15:05:42 crc kubenswrapper[5050]: I1123 15:05:42.381332 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wknvj" event={"ID":"91c3902b-76d1-4a73-b2f7-3668800cc4c9","Type":"ContainerDied","Data":"a1916e73469a59045133d712d2e7d35d160e41418320c02a66644285ac5a66b8"} Nov 23 15:05:42 crc kubenswrapper[5050]: I1123 15:05:42.381368 5050 scope.go:117] "RemoveContainer" containerID="e0f993d4c1300a330980e23f94cce0a83a466bb1fa6185011680613374ef3b35" Nov 23 15:05:42 crc kubenswrapper[5050]: I1123 15:05:42.413155 5050 scope.go:117] "RemoveContainer" containerID="7aed932a50064e3cb8cfa3a6ea310c583d0fa120d3053477e6e5bdafb955f86b" Nov 23 15:05:42 crc kubenswrapper[5050]: I1123 15:05:42.440175 5050 scope.go:117] "RemoveContainer" containerID="508685029b476eadd6038b041d696ec9fa2a28a2e924eb95d265c2adef18f0f8" Nov 23 15:05:42 crc kubenswrapper[5050]: I1123 15:05:42.465032 5050 scope.go:117] "RemoveContainer" containerID="e0f993d4c1300a330980e23f94cce0a83a466bb1fa6185011680613374ef3b35" Nov 23 15:05:42 crc kubenswrapper[5050]: E1123 15:05:42.465959 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e0f993d4c1300a330980e23f94cce0a83a466bb1fa6185011680613374ef3b35\": container with ID starting with e0f993d4c1300a330980e23f94cce0a83a466bb1fa6185011680613374ef3b35 not found: ID does not exist" containerID="e0f993d4c1300a330980e23f94cce0a83a466bb1fa6185011680613374ef3b35" Nov 23 15:05:42 crc kubenswrapper[5050]: I1123 15:05:42.466052 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e0f993d4c1300a330980e23f94cce0a83a466bb1fa6185011680613374ef3b35"} err="failed to get container status \"e0f993d4c1300a330980e23f94cce0a83a466bb1fa6185011680613374ef3b35\": rpc error: code = NotFound desc = could not find container \"e0f993d4c1300a330980e23f94cce0a83a466bb1fa6185011680613374ef3b35\": container with ID starting with e0f993d4c1300a330980e23f94cce0a83a466bb1fa6185011680613374ef3b35 not found: ID does not exist" Nov 23 15:05:42 crc kubenswrapper[5050]: I1123 15:05:42.466121 5050 scope.go:117] "RemoveContainer" containerID="7aed932a50064e3cb8cfa3a6ea310c583d0fa120d3053477e6e5bdafb955f86b" Nov 23 15:05:42 crc kubenswrapper[5050]: E1123 15:05:42.466540 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7aed932a50064e3cb8cfa3a6ea310c583d0fa120d3053477e6e5bdafb955f86b\": container with ID starting with 7aed932a50064e3cb8cfa3a6ea310c583d0fa120d3053477e6e5bdafb955f86b not found: ID does not exist" containerID="7aed932a50064e3cb8cfa3a6ea310c583d0fa120d3053477e6e5bdafb955f86b" Nov 23 15:05:42 crc kubenswrapper[5050]: I1123 15:05:42.466597 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7aed932a50064e3cb8cfa3a6ea310c583d0fa120d3053477e6e5bdafb955f86b"} err="failed to get container status \"7aed932a50064e3cb8cfa3a6ea310c583d0fa120d3053477e6e5bdafb955f86b\": rpc error: code = NotFound desc = could not find container \"7aed932a50064e3cb8cfa3a6ea310c583d0fa120d3053477e6e5bdafb955f86b\": container with ID starting with 7aed932a50064e3cb8cfa3a6ea310c583d0fa120d3053477e6e5bdafb955f86b not found: ID does not exist" Nov 23 15:05:42 crc kubenswrapper[5050]: I1123 15:05:42.466637 5050 scope.go:117] "RemoveContainer" containerID="508685029b476eadd6038b041d696ec9fa2a28a2e924eb95d265c2adef18f0f8" Nov 23 15:05:42 crc kubenswrapper[5050]: E1123 15:05:42.466991 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"508685029b476eadd6038b041d696ec9fa2a28a2e924eb95d265c2adef18f0f8\": container with ID starting with 508685029b476eadd6038b041d696ec9fa2a28a2e924eb95d265c2adef18f0f8 not found: ID does not exist" containerID="508685029b476eadd6038b041d696ec9fa2a28a2e924eb95d265c2adef18f0f8" Nov 23 15:05:42 crc kubenswrapper[5050]: I1123 15:05:42.467059 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"508685029b476eadd6038b041d696ec9fa2a28a2e924eb95d265c2adef18f0f8"} err="failed to get container status \"508685029b476eadd6038b041d696ec9fa2a28a2e924eb95d265c2adef18f0f8\": rpc error: code = NotFound desc = could not find container \"508685029b476eadd6038b041d696ec9fa2a28a2e924eb95d265c2adef18f0f8\": container with ID starting with 508685029b476eadd6038b041d696ec9fa2a28a2e924eb95d265c2adef18f0f8 not found: ID does not exist" Nov 23 15:05:42 crc kubenswrapper[5050]: I1123 15:05:42.479237 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91c3902b-76d1-4a73-b2f7-3668800cc4c9-catalog-content\") pod \"91c3902b-76d1-4a73-b2f7-3668800cc4c9\" (UID: \"91c3902b-76d1-4a73-b2f7-3668800cc4c9\") " Nov 23 15:05:42 crc kubenswrapper[5050]: I1123 15:05:42.479496 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wjkjb\" (UniqueName: \"kubernetes.io/projected/91c3902b-76d1-4a73-b2f7-3668800cc4c9-kube-api-access-wjkjb\") pod \"91c3902b-76d1-4a73-b2f7-3668800cc4c9\" (UID: \"91c3902b-76d1-4a73-b2f7-3668800cc4c9\") " Nov 23 15:05:42 crc kubenswrapper[5050]: I1123 15:05:42.479539 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91c3902b-76d1-4a73-b2f7-3668800cc4c9-utilities\") pod \"91c3902b-76d1-4a73-b2f7-3668800cc4c9\" (UID: \"91c3902b-76d1-4a73-b2f7-3668800cc4c9\") " Nov 23 15:05:42 crc kubenswrapper[5050]: I1123 15:05:42.481467 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/91c3902b-76d1-4a73-b2f7-3668800cc4c9-utilities" (OuterVolumeSpecName: "utilities") pod "91c3902b-76d1-4a73-b2f7-3668800cc4c9" (UID: "91c3902b-76d1-4a73-b2f7-3668800cc4c9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:05:42 crc kubenswrapper[5050]: I1123 15:05:42.489401 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91c3902b-76d1-4a73-b2f7-3668800cc4c9-kube-api-access-wjkjb" (OuterVolumeSpecName: "kube-api-access-wjkjb") pod "91c3902b-76d1-4a73-b2f7-3668800cc4c9" (UID: "91c3902b-76d1-4a73-b2f7-3668800cc4c9"). InnerVolumeSpecName "kube-api-access-wjkjb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:05:42 crc kubenswrapper[5050]: I1123 15:05:42.500601 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/91c3902b-76d1-4a73-b2f7-3668800cc4c9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "91c3902b-76d1-4a73-b2f7-3668800cc4c9" (UID: "91c3902b-76d1-4a73-b2f7-3668800cc4c9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:05:42 crc kubenswrapper[5050]: I1123 15:05:42.580972 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wjkjb\" (UniqueName: \"kubernetes.io/projected/91c3902b-76d1-4a73-b2f7-3668800cc4c9-kube-api-access-wjkjb\") on node \"crc\" DevicePath \"\"" Nov 23 15:05:42 crc kubenswrapper[5050]: I1123 15:05:42.581017 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91c3902b-76d1-4a73-b2f7-3668800cc4c9-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 15:05:42 crc kubenswrapper[5050]: I1123 15:05:42.581027 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91c3902b-76d1-4a73-b2f7-3668800cc4c9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 15:05:42 crc kubenswrapper[5050]: I1123 15:05:42.737385 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wknvj"] Nov 23 15:05:42 crc kubenswrapper[5050]: I1123 15:05:42.744719 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-wknvj"] Nov 23 15:05:43 crc kubenswrapper[5050]: I1123 15:05:43.109129 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-lw9jb" Nov 23 15:05:43 crc kubenswrapper[5050]: I1123 15:05:43.109202 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-lw9jb" Nov 23 15:05:43 crc kubenswrapper[5050]: I1123 15:05:43.567419 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91c3902b-76d1-4a73-b2f7-3668800cc4c9" path="/var/lib/kubelet/pods/91c3902b-76d1-4a73-b2f7-3668800cc4c9/volumes" Nov 23 15:05:44 crc kubenswrapper[5050]: I1123 15:05:44.158917 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-lw9jb" podUID="9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a" containerName="registry-server" probeResult="failure" output=< Nov 23 15:05:44 crc kubenswrapper[5050]: timeout: failed to connect service ":50051" within 1s Nov 23 15:05:44 crc kubenswrapper[5050]: > Nov 23 15:05:45 crc kubenswrapper[5050]: I1123 15:05:45.340661 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-c62vp"] Nov 23 15:05:45 crc kubenswrapper[5050]: E1123 15:05:45.341213 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91c3902b-76d1-4a73-b2f7-3668800cc4c9" containerName="extract-utilities" Nov 23 15:05:45 crc kubenswrapper[5050]: I1123 15:05:45.341238 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="91c3902b-76d1-4a73-b2f7-3668800cc4c9" containerName="extract-utilities" Nov 23 15:05:45 crc kubenswrapper[5050]: E1123 15:05:45.341294 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91c3902b-76d1-4a73-b2f7-3668800cc4c9" containerName="registry-server" Nov 23 15:05:45 crc kubenswrapper[5050]: I1123 15:05:45.341307 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="91c3902b-76d1-4a73-b2f7-3668800cc4c9" containerName="registry-server" Nov 23 15:05:45 crc kubenswrapper[5050]: E1123 15:05:45.341337 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91c3902b-76d1-4a73-b2f7-3668800cc4c9" containerName="extract-content" Nov 23 15:05:45 crc kubenswrapper[5050]: I1123 15:05:45.341349 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="91c3902b-76d1-4a73-b2f7-3668800cc4c9" containerName="extract-content" Nov 23 15:05:45 crc kubenswrapper[5050]: I1123 15:05:45.341654 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="91c3902b-76d1-4a73-b2f7-3668800cc4c9" containerName="registry-server" Nov 23 15:05:45 crc kubenswrapper[5050]: I1123 15:05:45.343682 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c62vp" Nov 23 15:05:45 crc kubenswrapper[5050]: I1123 15:05:45.361975 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c62vp"] Nov 23 15:05:45 crc kubenswrapper[5050]: I1123 15:05:45.531788 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nvrc2\" (UniqueName: \"kubernetes.io/projected/f9d66f78-2d89-4f96-a7be-dbbfe7181da3-kube-api-access-nvrc2\") pod \"certified-operators-c62vp\" (UID: \"f9d66f78-2d89-4f96-a7be-dbbfe7181da3\") " pod="openshift-marketplace/certified-operators-c62vp" Nov 23 15:05:45 crc kubenswrapper[5050]: I1123 15:05:45.532300 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9d66f78-2d89-4f96-a7be-dbbfe7181da3-utilities\") pod \"certified-operators-c62vp\" (UID: \"f9d66f78-2d89-4f96-a7be-dbbfe7181da3\") " pod="openshift-marketplace/certified-operators-c62vp" Nov 23 15:05:45 crc kubenswrapper[5050]: I1123 15:05:45.532340 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9d66f78-2d89-4f96-a7be-dbbfe7181da3-catalog-content\") pod \"certified-operators-c62vp\" (UID: \"f9d66f78-2d89-4f96-a7be-dbbfe7181da3\") " pod="openshift-marketplace/certified-operators-c62vp" Nov 23 15:05:45 crc kubenswrapper[5050]: I1123 15:05:45.633879 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nvrc2\" (UniqueName: \"kubernetes.io/projected/f9d66f78-2d89-4f96-a7be-dbbfe7181da3-kube-api-access-nvrc2\") pod \"certified-operators-c62vp\" (UID: \"f9d66f78-2d89-4f96-a7be-dbbfe7181da3\") " pod="openshift-marketplace/certified-operators-c62vp" Nov 23 15:05:45 crc kubenswrapper[5050]: I1123 15:05:45.633983 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9d66f78-2d89-4f96-a7be-dbbfe7181da3-utilities\") pod \"certified-operators-c62vp\" (UID: \"f9d66f78-2d89-4f96-a7be-dbbfe7181da3\") " pod="openshift-marketplace/certified-operators-c62vp" Nov 23 15:05:45 crc kubenswrapper[5050]: I1123 15:05:45.634013 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9d66f78-2d89-4f96-a7be-dbbfe7181da3-catalog-content\") pod \"certified-operators-c62vp\" (UID: \"f9d66f78-2d89-4f96-a7be-dbbfe7181da3\") " pod="openshift-marketplace/certified-operators-c62vp" Nov 23 15:05:45 crc kubenswrapper[5050]: I1123 15:05:45.634874 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9d66f78-2d89-4f96-a7be-dbbfe7181da3-catalog-content\") pod \"certified-operators-c62vp\" (UID: \"f9d66f78-2d89-4f96-a7be-dbbfe7181da3\") " pod="openshift-marketplace/certified-operators-c62vp" Nov 23 15:05:45 crc kubenswrapper[5050]: I1123 15:05:45.634897 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9d66f78-2d89-4f96-a7be-dbbfe7181da3-utilities\") pod \"certified-operators-c62vp\" (UID: \"f9d66f78-2d89-4f96-a7be-dbbfe7181da3\") " pod="openshift-marketplace/certified-operators-c62vp" Nov 23 15:05:45 crc kubenswrapper[5050]: I1123 15:05:45.656907 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nvrc2\" (UniqueName: \"kubernetes.io/projected/f9d66f78-2d89-4f96-a7be-dbbfe7181da3-kube-api-access-nvrc2\") pod \"certified-operators-c62vp\" (UID: \"f9d66f78-2d89-4f96-a7be-dbbfe7181da3\") " pod="openshift-marketplace/certified-operators-c62vp" Nov 23 15:05:45 crc kubenswrapper[5050]: I1123 15:05:45.673540 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c62vp" Nov 23 15:05:46 crc kubenswrapper[5050]: I1123 15:05:46.208873 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c62vp"] Nov 23 15:05:46 crc kubenswrapper[5050]: W1123 15:05:46.215200 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf9d66f78_2d89_4f96_a7be_dbbfe7181da3.slice/crio-154b5e8f0c69a29de48b6b27b253ddb1d0e6fe90d93246f358f818824b1a4c5a WatchSource:0}: Error finding container 154b5e8f0c69a29de48b6b27b253ddb1d0e6fe90d93246f358f818824b1a4c5a: Status 404 returned error can't find the container with id 154b5e8f0c69a29de48b6b27b253ddb1d0e6fe90d93246f358f818824b1a4c5a Nov 23 15:05:46 crc kubenswrapper[5050]: I1123 15:05:46.426746 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c62vp" event={"ID":"f9d66f78-2d89-4f96-a7be-dbbfe7181da3","Type":"ContainerStarted","Data":"154b5e8f0c69a29de48b6b27b253ddb1d0e6fe90d93246f358f818824b1a4c5a"} Nov 23 15:05:47 crc kubenswrapper[5050]: I1123 15:05:47.460926 5050 generic.go:334] "Generic (PLEG): container finished" podID="f9d66f78-2d89-4f96-a7be-dbbfe7181da3" containerID="fe8066dd53478e8222a43ef62dd0dfcb443529861b831e6c3d0fb64712679ab8" exitCode=0 Nov 23 15:05:47 crc kubenswrapper[5050]: I1123 15:05:47.461026 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c62vp" event={"ID":"f9d66f78-2d89-4f96-a7be-dbbfe7181da3","Type":"ContainerDied","Data":"fe8066dd53478e8222a43ef62dd0dfcb443529861b831e6c3d0fb64712679ab8"} Nov 23 15:05:48 crc kubenswrapper[5050]: I1123 15:05:48.474780 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c62vp" event={"ID":"f9d66f78-2d89-4f96-a7be-dbbfe7181da3","Type":"ContainerStarted","Data":"97558f3b982635b5cb150ab2bbc2144e7c70909a4d3bbf2fb3ba18dd42616f95"} Nov 23 15:05:49 crc kubenswrapper[5050]: I1123 15:05:49.493216 5050 generic.go:334] "Generic (PLEG): container finished" podID="f9d66f78-2d89-4f96-a7be-dbbfe7181da3" containerID="97558f3b982635b5cb150ab2bbc2144e7c70909a4d3bbf2fb3ba18dd42616f95" exitCode=0 Nov 23 15:05:49 crc kubenswrapper[5050]: I1123 15:05:49.493315 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c62vp" event={"ID":"f9d66f78-2d89-4f96-a7be-dbbfe7181da3","Type":"ContainerDied","Data":"97558f3b982635b5cb150ab2bbc2144e7c70909a4d3bbf2fb3ba18dd42616f95"} Nov 23 15:05:50 crc kubenswrapper[5050]: I1123 15:05:50.526056 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c62vp" event={"ID":"f9d66f78-2d89-4f96-a7be-dbbfe7181da3","Type":"ContainerStarted","Data":"dd8d5fb49f1d3cacc240238d2cbbeff05eee6c51b94f795d81e3ee01435e629a"} Nov 23 15:05:50 crc kubenswrapper[5050]: I1123 15:05:50.564035 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-c62vp" podStartSLOduration=3.126565096 podStartE2EDuration="5.56400796s" podCreationTimestamp="2025-11-23 15:05:45 +0000 UTC" firstStartedPulling="2025-11-23 15:05:47.463330221 +0000 UTC m=+1442.630326736" lastFinishedPulling="2025-11-23 15:05:49.900773115 +0000 UTC m=+1445.067769600" observedRunningTime="2025-11-23 15:05:50.560887003 +0000 UTC m=+1445.727883538" watchObservedRunningTime="2025-11-23 15:05:50.56400796 +0000 UTC m=+1445.731004455" Nov 23 15:05:53 crc kubenswrapper[5050]: I1123 15:05:53.203097 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-lw9jb" Nov 23 15:05:53 crc kubenswrapper[5050]: I1123 15:05:53.281209 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-lw9jb" Nov 23 15:05:53 crc kubenswrapper[5050]: I1123 15:05:53.533533 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lw9jb"] Nov 23 15:05:54 crc kubenswrapper[5050]: I1123 15:05:54.575223 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-lw9jb" podUID="9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a" containerName="registry-server" containerID="cri-o://2e66206e04ca18b5347485f9817b342d52dd17d7ae2388f1f7d5f3815d40fc3f" gracePeriod=2 Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.118479 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lw9jb" Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.221365 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4snx\" (UniqueName: \"kubernetes.io/projected/9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a-kube-api-access-x4snx\") pod \"9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a\" (UID: \"9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a\") " Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.221619 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a-utilities\") pod \"9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a\" (UID: \"9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a\") " Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.221689 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a-catalog-content\") pod \"9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a\" (UID: \"9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a\") " Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.222714 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a-utilities" (OuterVolumeSpecName: "utilities") pod "9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a" (UID: "9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.227799 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a-kube-api-access-x4snx" (OuterVolumeSpecName: "kube-api-access-x4snx") pod "9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a" (UID: "9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a"). InnerVolumeSpecName "kube-api-access-x4snx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.323519 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4snx\" (UniqueName: \"kubernetes.io/projected/9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a-kube-api-access-x4snx\") on node \"crc\" DevicePath \"\"" Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.323551 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.330303 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a" (UID: "9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.425971 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.593921 5050 generic.go:334] "Generic (PLEG): container finished" podID="9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a" containerID="2e66206e04ca18b5347485f9817b342d52dd17d7ae2388f1f7d5f3815d40fc3f" exitCode=0 Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.594138 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lw9jb" Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.594365 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lw9jb" event={"ID":"9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a","Type":"ContainerDied","Data":"2e66206e04ca18b5347485f9817b342d52dd17d7ae2388f1f7d5f3815d40fc3f"} Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.596162 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lw9jb" event={"ID":"9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a","Type":"ContainerDied","Data":"c343289209ef7146d7dcef8e5a6a42560131abaa140c5da39bbea3a219e6562c"} Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.596225 5050 scope.go:117] "RemoveContainer" containerID="2e66206e04ca18b5347485f9817b342d52dd17d7ae2388f1f7d5f3815d40fc3f" Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.646683 5050 scope.go:117] "RemoveContainer" containerID="6ee667e1574b0ab7b0777bcb4cd3520bc2c61478cb24350a52e726b2e3779206" Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.650640 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lw9jb"] Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.658100 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-lw9jb"] Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.672463 5050 scope.go:117] "RemoveContainer" containerID="7050e134f92ed422b0be0d856bbf4ea10a14f87a26e8fc6322e8e21624065739" Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.674322 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-c62vp" Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.674380 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-c62vp" Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.713403 5050 scope.go:117] "RemoveContainer" containerID="2e66206e04ca18b5347485f9817b342d52dd17d7ae2388f1f7d5f3815d40fc3f" Nov 23 15:05:55 crc kubenswrapper[5050]: E1123 15:05:55.714161 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e66206e04ca18b5347485f9817b342d52dd17d7ae2388f1f7d5f3815d40fc3f\": container with ID starting with 2e66206e04ca18b5347485f9817b342d52dd17d7ae2388f1f7d5f3815d40fc3f not found: ID does not exist" containerID="2e66206e04ca18b5347485f9817b342d52dd17d7ae2388f1f7d5f3815d40fc3f" Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.714224 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e66206e04ca18b5347485f9817b342d52dd17d7ae2388f1f7d5f3815d40fc3f"} err="failed to get container status \"2e66206e04ca18b5347485f9817b342d52dd17d7ae2388f1f7d5f3815d40fc3f\": rpc error: code = NotFound desc = could not find container \"2e66206e04ca18b5347485f9817b342d52dd17d7ae2388f1f7d5f3815d40fc3f\": container with ID starting with 2e66206e04ca18b5347485f9817b342d52dd17d7ae2388f1f7d5f3815d40fc3f not found: ID does not exist" Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.714261 5050 scope.go:117] "RemoveContainer" containerID="6ee667e1574b0ab7b0777bcb4cd3520bc2c61478cb24350a52e726b2e3779206" Nov 23 15:05:55 crc kubenswrapper[5050]: E1123 15:05:55.714625 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ee667e1574b0ab7b0777bcb4cd3520bc2c61478cb24350a52e726b2e3779206\": container with ID starting with 6ee667e1574b0ab7b0777bcb4cd3520bc2c61478cb24350a52e726b2e3779206 not found: ID does not exist" containerID="6ee667e1574b0ab7b0777bcb4cd3520bc2c61478cb24350a52e726b2e3779206" Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.714669 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ee667e1574b0ab7b0777bcb4cd3520bc2c61478cb24350a52e726b2e3779206"} err="failed to get container status \"6ee667e1574b0ab7b0777bcb4cd3520bc2c61478cb24350a52e726b2e3779206\": rpc error: code = NotFound desc = could not find container \"6ee667e1574b0ab7b0777bcb4cd3520bc2c61478cb24350a52e726b2e3779206\": container with ID starting with 6ee667e1574b0ab7b0777bcb4cd3520bc2c61478cb24350a52e726b2e3779206 not found: ID does not exist" Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.714701 5050 scope.go:117] "RemoveContainer" containerID="7050e134f92ed422b0be0d856bbf4ea10a14f87a26e8fc6322e8e21624065739" Nov 23 15:05:55 crc kubenswrapper[5050]: E1123 15:05:55.715043 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7050e134f92ed422b0be0d856bbf4ea10a14f87a26e8fc6322e8e21624065739\": container with ID starting with 7050e134f92ed422b0be0d856bbf4ea10a14f87a26e8fc6322e8e21624065739 not found: ID does not exist" containerID="7050e134f92ed422b0be0d856bbf4ea10a14f87a26e8fc6322e8e21624065739" Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.715067 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7050e134f92ed422b0be0d856bbf4ea10a14f87a26e8fc6322e8e21624065739"} err="failed to get container status \"7050e134f92ed422b0be0d856bbf4ea10a14f87a26e8fc6322e8e21624065739\": rpc error: code = NotFound desc = could not find container \"7050e134f92ed422b0be0d856bbf4ea10a14f87a26e8fc6322e8e21624065739\": container with ID starting with 7050e134f92ed422b0be0d856bbf4ea10a14f87a26e8fc6322e8e21624065739 not found: ID does not exist" Nov 23 15:05:55 crc kubenswrapper[5050]: I1123 15:05:55.725103 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-c62vp" Nov 23 15:05:56 crc kubenswrapper[5050]: I1123 15:05:56.701130 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-c62vp" Nov 23 15:05:57 crc kubenswrapper[5050]: I1123 15:05:57.567807 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a" path="/var/lib/kubelet/pods/9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a/volumes" Nov 23 15:05:57 crc kubenswrapper[5050]: I1123 15:05:57.933576 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c62vp"] Nov 23 15:05:58 crc kubenswrapper[5050]: I1123 15:05:58.632777 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-c62vp" podUID="f9d66f78-2d89-4f96-a7be-dbbfe7181da3" containerName="registry-server" containerID="cri-o://dd8d5fb49f1d3cacc240238d2cbbeff05eee6c51b94f795d81e3ee01435e629a" gracePeriod=2 Nov 23 15:05:59 crc kubenswrapper[5050]: I1123 15:05:59.225149 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:05:59 crc kubenswrapper[5050]: I1123 15:05:59.225250 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:05:59 crc kubenswrapper[5050]: I1123 15:05:59.665762 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c62vp" Nov 23 15:05:59 crc kubenswrapper[5050]: I1123 15:05:59.665986 5050 generic.go:334] "Generic (PLEG): container finished" podID="f9d66f78-2d89-4f96-a7be-dbbfe7181da3" containerID="dd8d5fb49f1d3cacc240238d2cbbeff05eee6c51b94f795d81e3ee01435e629a" exitCode=0 Nov 23 15:05:59 crc kubenswrapper[5050]: I1123 15:05:59.666025 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c62vp" event={"ID":"f9d66f78-2d89-4f96-a7be-dbbfe7181da3","Type":"ContainerDied","Data":"dd8d5fb49f1d3cacc240238d2cbbeff05eee6c51b94f795d81e3ee01435e629a"} Nov 23 15:05:59 crc kubenswrapper[5050]: I1123 15:05:59.667128 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c62vp" event={"ID":"f9d66f78-2d89-4f96-a7be-dbbfe7181da3","Type":"ContainerDied","Data":"154b5e8f0c69a29de48b6b27b253ddb1d0e6fe90d93246f358f818824b1a4c5a"} Nov 23 15:05:59 crc kubenswrapper[5050]: I1123 15:05:59.667176 5050 scope.go:117] "RemoveContainer" containerID="dd8d5fb49f1d3cacc240238d2cbbeff05eee6c51b94f795d81e3ee01435e629a" Nov 23 15:05:59 crc kubenswrapper[5050]: I1123 15:05:59.717172 5050 scope.go:117] "RemoveContainer" containerID="97558f3b982635b5cb150ab2bbc2144e7c70909a4d3bbf2fb3ba18dd42616f95" Nov 23 15:05:59 crc kubenswrapper[5050]: I1123 15:05:59.772224 5050 scope.go:117] "RemoveContainer" containerID="fe8066dd53478e8222a43ef62dd0dfcb443529861b831e6c3d0fb64712679ab8" Nov 23 15:05:59 crc kubenswrapper[5050]: I1123 15:05:59.797018 5050 scope.go:117] "RemoveContainer" containerID="dd8d5fb49f1d3cacc240238d2cbbeff05eee6c51b94f795d81e3ee01435e629a" Nov 23 15:05:59 crc kubenswrapper[5050]: E1123 15:05:59.797689 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd8d5fb49f1d3cacc240238d2cbbeff05eee6c51b94f795d81e3ee01435e629a\": container with ID starting with dd8d5fb49f1d3cacc240238d2cbbeff05eee6c51b94f795d81e3ee01435e629a not found: ID does not exist" containerID="dd8d5fb49f1d3cacc240238d2cbbeff05eee6c51b94f795d81e3ee01435e629a" Nov 23 15:05:59 crc kubenswrapper[5050]: I1123 15:05:59.797743 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd8d5fb49f1d3cacc240238d2cbbeff05eee6c51b94f795d81e3ee01435e629a"} err="failed to get container status \"dd8d5fb49f1d3cacc240238d2cbbeff05eee6c51b94f795d81e3ee01435e629a\": rpc error: code = NotFound desc = could not find container \"dd8d5fb49f1d3cacc240238d2cbbeff05eee6c51b94f795d81e3ee01435e629a\": container with ID starting with dd8d5fb49f1d3cacc240238d2cbbeff05eee6c51b94f795d81e3ee01435e629a not found: ID does not exist" Nov 23 15:05:59 crc kubenswrapper[5050]: I1123 15:05:59.797780 5050 scope.go:117] "RemoveContainer" containerID="97558f3b982635b5cb150ab2bbc2144e7c70909a4d3bbf2fb3ba18dd42616f95" Nov 23 15:05:59 crc kubenswrapper[5050]: E1123 15:05:59.798255 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"97558f3b982635b5cb150ab2bbc2144e7c70909a4d3bbf2fb3ba18dd42616f95\": container with ID starting with 97558f3b982635b5cb150ab2bbc2144e7c70909a4d3bbf2fb3ba18dd42616f95 not found: ID does not exist" containerID="97558f3b982635b5cb150ab2bbc2144e7c70909a4d3bbf2fb3ba18dd42616f95" Nov 23 15:05:59 crc kubenswrapper[5050]: I1123 15:05:59.798281 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97558f3b982635b5cb150ab2bbc2144e7c70909a4d3bbf2fb3ba18dd42616f95"} err="failed to get container status \"97558f3b982635b5cb150ab2bbc2144e7c70909a4d3bbf2fb3ba18dd42616f95\": rpc error: code = NotFound desc = could not find container \"97558f3b982635b5cb150ab2bbc2144e7c70909a4d3bbf2fb3ba18dd42616f95\": container with ID starting with 97558f3b982635b5cb150ab2bbc2144e7c70909a4d3bbf2fb3ba18dd42616f95 not found: ID does not exist" Nov 23 15:05:59 crc kubenswrapper[5050]: I1123 15:05:59.798295 5050 scope.go:117] "RemoveContainer" containerID="fe8066dd53478e8222a43ef62dd0dfcb443529861b831e6c3d0fb64712679ab8" Nov 23 15:05:59 crc kubenswrapper[5050]: E1123 15:05:59.799024 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe8066dd53478e8222a43ef62dd0dfcb443529861b831e6c3d0fb64712679ab8\": container with ID starting with fe8066dd53478e8222a43ef62dd0dfcb443529861b831e6c3d0fb64712679ab8 not found: ID does not exist" containerID="fe8066dd53478e8222a43ef62dd0dfcb443529861b831e6c3d0fb64712679ab8" Nov 23 15:05:59 crc kubenswrapper[5050]: I1123 15:05:59.799043 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe8066dd53478e8222a43ef62dd0dfcb443529861b831e6c3d0fb64712679ab8"} err="failed to get container status \"fe8066dd53478e8222a43ef62dd0dfcb443529861b831e6c3d0fb64712679ab8\": rpc error: code = NotFound desc = could not find container \"fe8066dd53478e8222a43ef62dd0dfcb443529861b831e6c3d0fb64712679ab8\": container with ID starting with fe8066dd53478e8222a43ef62dd0dfcb443529861b831e6c3d0fb64712679ab8 not found: ID does not exist" Nov 23 15:05:59 crc kubenswrapper[5050]: I1123 15:05:59.808665 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nvrc2\" (UniqueName: \"kubernetes.io/projected/f9d66f78-2d89-4f96-a7be-dbbfe7181da3-kube-api-access-nvrc2\") pod \"f9d66f78-2d89-4f96-a7be-dbbfe7181da3\" (UID: \"f9d66f78-2d89-4f96-a7be-dbbfe7181da3\") " Nov 23 15:05:59 crc kubenswrapper[5050]: I1123 15:05:59.808864 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9d66f78-2d89-4f96-a7be-dbbfe7181da3-utilities\") pod \"f9d66f78-2d89-4f96-a7be-dbbfe7181da3\" (UID: \"f9d66f78-2d89-4f96-a7be-dbbfe7181da3\") " Nov 23 15:05:59 crc kubenswrapper[5050]: I1123 15:05:59.809184 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9d66f78-2d89-4f96-a7be-dbbfe7181da3-catalog-content\") pod \"f9d66f78-2d89-4f96-a7be-dbbfe7181da3\" (UID: \"f9d66f78-2d89-4f96-a7be-dbbfe7181da3\") " Nov 23 15:05:59 crc kubenswrapper[5050]: I1123 15:05:59.810598 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f9d66f78-2d89-4f96-a7be-dbbfe7181da3-utilities" (OuterVolumeSpecName: "utilities") pod "f9d66f78-2d89-4f96-a7be-dbbfe7181da3" (UID: "f9d66f78-2d89-4f96-a7be-dbbfe7181da3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:05:59 crc kubenswrapper[5050]: I1123 15:05:59.810734 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9d66f78-2d89-4f96-a7be-dbbfe7181da3-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 15:05:59 crc kubenswrapper[5050]: I1123 15:05:59.818944 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9d66f78-2d89-4f96-a7be-dbbfe7181da3-kube-api-access-nvrc2" (OuterVolumeSpecName: "kube-api-access-nvrc2") pod "f9d66f78-2d89-4f96-a7be-dbbfe7181da3" (UID: "f9d66f78-2d89-4f96-a7be-dbbfe7181da3"). InnerVolumeSpecName "kube-api-access-nvrc2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:05:59 crc kubenswrapper[5050]: I1123 15:05:59.868296 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f9d66f78-2d89-4f96-a7be-dbbfe7181da3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f9d66f78-2d89-4f96-a7be-dbbfe7181da3" (UID: "f9d66f78-2d89-4f96-a7be-dbbfe7181da3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:05:59 crc kubenswrapper[5050]: I1123 15:05:59.911828 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9d66f78-2d89-4f96-a7be-dbbfe7181da3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 15:05:59 crc kubenswrapper[5050]: I1123 15:05:59.911867 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nvrc2\" (UniqueName: \"kubernetes.io/projected/f9d66f78-2d89-4f96-a7be-dbbfe7181da3-kube-api-access-nvrc2\") on node \"crc\" DevicePath \"\"" Nov 23 15:06:00 crc kubenswrapper[5050]: I1123 15:06:00.683054 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c62vp" Nov 23 15:06:00 crc kubenswrapper[5050]: I1123 15:06:00.747568 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c62vp"] Nov 23 15:06:00 crc kubenswrapper[5050]: I1123 15:06:00.759595 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-c62vp"] Nov 23 15:06:01 crc kubenswrapper[5050]: I1123 15:06:01.223848 5050 scope.go:117] "RemoveContainer" containerID="655f490e0c7aa13171353d2848ca0d68815a663102d03d5563818ce0757f8067" Nov 23 15:06:01 crc kubenswrapper[5050]: I1123 15:06:01.273158 5050 scope.go:117] "RemoveContainer" containerID="4d1b6c6c61b575c6c0aa6947db68d1bcd9ab01330419cb2406597cf12bc7e6b6" Nov 23 15:06:01 crc kubenswrapper[5050]: I1123 15:06:01.310823 5050 scope.go:117] "RemoveContainer" containerID="e05743a215c5ada706341f4409a1a724e3949e0173a53be6eeb47dacec86519e" Nov 23 15:06:01 crc kubenswrapper[5050]: I1123 15:06:01.349843 5050 scope.go:117] "RemoveContainer" containerID="841882bd34efc6f91dc9d992f938beffc9123274287f5c4036ba44df4cda2b87" Nov 23 15:06:01 crc kubenswrapper[5050]: I1123 15:06:01.378226 5050 scope.go:117] "RemoveContainer" containerID="e3acb2767a7451b13639ee8d1428b93e74fd54c5a63166ef91e7d202125e96e6" Nov 23 15:06:01 crc kubenswrapper[5050]: I1123 15:06:01.422221 5050 scope.go:117] "RemoveContainer" containerID="7a08082700302fc03c17eee134d553b46f2157cc92e9951f31627a535a42e5af" Nov 23 15:06:01 crc kubenswrapper[5050]: I1123 15:06:01.451616 5050 scope.go:117] "RemoveContainer" containerID="4406dcd09b9388169d2c1dc5682c54223678311807c5dd014e1a115a16dcbddc" Nov 23 15:06:01 crc kubenswrapper[5050]: I1123 15:06:01.497598 5050 scope.go:117] "RemoveContainer" containerID="ab68ce1a21506eded71804ea43eab7e0d792aeeb1a795d53cbc5840aa073d4e2" Nov 23 15:06:01 crc kubenswrapper[5050]: I1123 15:06:01.526817 5050 scope.go:117] "RemoveContainer" containerID="e729bbcb8d7b322a25efb13eeec7403f0b1d628f261b1bbd8be04eea5dc29292" Nov 23 15:06:01 crc kubenswrapper[5050]: I1123 15:06:01.561923 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f9d66f78-2d89-4f96-a7be-dbbfe7181da3" path="/var/lib/kubelet/pods/f9d66f78-2d89-4f96-a7be-dbbfe7181da3/volumes" Nov 23 15:06:01 crc kubenswrapper[5050]: I1123 15:06:01.571008 5050 scope.go:117] "RemoveContainer" containerID="17238151ee0c60557cc7e297ae575f8dcc2f0ca8d6df997fe8811099e7fe93ce" Nov 23 15:06:01 crc kubenswrapper[5050]: I1123 15:06:01.597099 5050 scope.go:117] "RemoveContainer" containerID="ebcffda6c310da77ec9c4ccf0ade9f7141b3251552ce8adddbcfbe4c14dc54c1" Nov 23 15:06:01 crc kubenswrapper[5050]: I1123 15:06:01.631423 5050 scope.go:117] "RemoveContainer" containerID="baa900082661a9828476231a8149439c9dd2f733f95eb32f3e6ed2627e03453b" Nov 23 15:06:01 crc kubenswrapper[5050]: I1123 15:06:01.681206 5050 scope.go:117] "RemoveContainer" containerID="b844cbd1a4937573b0a13e755f35bcb710017da1977f2f16470e0f9921d64853" Nov 23 15:06:01 crc kubenswrapper[5050]: I1123 15:06:01.712630 5050 scope.go:117] "RemoveContainer" containerID="3249cf8628f145ad70288b99dd53e1eb58e6ee9953f4d2ed143ddefe3d8d6d1c" Nov 23 15:06:01 crc kubenswrapper[5050]: I1123 15:06:01.734917 5050 scope.go:117] "RemoveContainer" containerID="e0aa36f5771c43f6dd0d81d07cdda1b7e03ac841535ddf7564847c9249d6a707" Nov 23 15:06:01 crc kubenswrapper[5050]: I1123 15:06:01.755118 5050 scope.go:117] "RemoveContainer" containerID="6492298288de9f874d9f80a4b99954a7040ed7d8739bb3c5f5c29a497762e27e" Nov 23 15:06:29 crc kubenswrapper[5050]: I1123 15:06:29.224773 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:06:29 crc kubenswrapper[5050]: I1123 15:06:29.225784 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:06:29 crc kubenswrapper[5050]: I1123 15:06:29.226111 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 15:06:29 crc kubenswrapper[5050]: I1123 15:06:29.227081 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 15:06:29 crc kubenswrapper[5050]: I1123 15:06:29.227148 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd" gracePeriod=600 Nov 23 15:06:29 crc kubenswrapper[5050]: E1123 15:06:29.357665 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:06:30 crc kubenswrapper[5050]: I1123 15:06:30.099410 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd" exitCode=0 Nov 23 15:06:30 crc kubenswrapper[5050]: I1123 15:06:30.099512 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd"} Nov 23 15:06:30 crc kubenswrapper[5050]: I1123 15:06:30.099648 5050 scope.go:117] "RemoveContainer" containerID="f369955faef6b1a99a27dfa755c0fc81c7c636113d3009cdb4311b4b6c0018d4" Nov 23 15:06:30 crc kubenswrapper[5050]: I1123 15:06:30.100362 5050 scope.go:117] "RemoveContainer" containerID="beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd" Nov 23 15:06:30 crc kubenswrapper[5050]: E1123 15:06:30.100861 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:06:42 crc kubenswrapper[5050]: I1123 15:06:42.548858 5050 scope.go:117] "RemoveContainer" containerID="beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd" Nov 23 15:06:42 crc kubenswrapper[5050]: E1123 15:06:42.549934 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:06:56 crc kubenswrapper[5050]: I1123 15:06:56.549128 5050 scope.go:117] "RemoveContainer" containerID="beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd" Nov 23 15:06:56 crc kubenswrapper[5050]: E1123 15:06:56.549941 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:07:02 crc kubenswrapper[5050]: I1123 15:07:02.158990 5050 scope.go:117] "RemoveContainer" containerID="ff68a1c25ddcf47d18f14477af4201b7b01eca17df536e55e05e2a33a3ed79c8" Nov 23 15:07:02 crc kubenswrapper[5050]: I1123 15:07:02.210219 5050 scope.go:117] "RemoveContainer" containerID="361f0f712cdb56c1a734658f4960db8d2ba0252abc0913de0ab85035acfe8a7b" Nov 23 15:07:02 crc kubenswrapper[5050]: I1123 15:07:02.258132 5050 scope.go:117] "RemoveContainer" containerID="299161f6b132a8506fc1999a7498e9f4796a83355fc8b6e0420a451e80357c9e" Nov 23 15:07:02 crc kubenswrapper[5050]: I1123 15:07:02.282789 5050 scope.go:117] "RemoveContainer" containerID="ed687669db467b72cf51858bbabe382365b14403b1f4ed39dea5604437cad30a" Nov 23 15:07:02 crc kubenswrapper[5050]: I1123 15:07:02.340650 5050 scope.go:117] "RemoveContainer" containerID="1894b6fbea8bbd84fe163e12069d94ea74e1467c86f754b3d1bb202e766111df" Nov 23 15:07:02 crc kubenswrapper[5050]: I1123 15:07:02.379190 5050 scope.go:117] "RemoveContainer" containerID="727dc0a552ebf37c7874c54993d577aa166a14da4f6f9ca0a707173e2c9665aa" Nov 23 15:07:02 crc kubenswrapper[5050]: I1123 15:07:02.419838 5050 scope.go:117] "RemoveContainer" containerID="1109890e497c0f796b8443638532b27dfc04e6ba90d2aa51286205529a6a9e0b" Nov 23 15:07:02 crc kubenswrapper[5050]: I1123 15:07:02.473083 5050 scope.go:117] "RemoveContainer" containerID="413a63448c5f1b6aa0ed69c820b78045e358548763dd1334c1ea783960639530" Nov 23 15:07:02 crc kubenswrapper[5050]: I1123 15:07:02.513591 5050 scope.go:117] "RemoveContainer" containerID="b47139886aa07e280e1a2070c1d4af199afdeb3729f1a9c4995f9f5cc7a034cd" Nov 23 15:07:02 crc kubenswrapper[5050]: I1123 15:07:02.544405 5050 scope.go:117] "RemoveContainer" containerID="4eaad0b4b2774c58b0ed715e526bba7629a4da67f0b876ef9fe855cfffd60bbf" Nov 23 15:07:02 crc kubenswrapper[5050]: I1123 15:07:02.578622 5050 scope.go:117] "RemoveContainer" containerID="bf86ac4b6838a47cd93889d0d9491ce491f000ed4be649f3721e2a86aeb4e9ae" Nov 23 15:07:02 crc kubenswrapper[5050]: I1123 15:07:02.613397 5050 scope.go:117] "RemoveContainer" containerID="ca03918f91f0d24416b97c1154f1427e4787e0717a61e93ae86f59f5dae6d896" Nov 23 15:07:02 crc kubenswrapper[5050]: I1123 15:07:02.671384 5050 scope.go:117] "RemoveContainer" containerID="096bc79c344ebb77c5036294dbf1470f2dcc544c7a30af4d159b48bf4fc5d85b" Nov 23 15:07:02 crc kubenswrapper[5050]: I1123 15:07:02.718800 5050 scope.go:117] "RemoveContainer" containerID="d8471c24b16d6d4f6103efd54a5561d59050162334738e8d02ec33d90eef6ff9" Nov 23 15:07:02 crc kubenswrapper[5050]: I1123 15:07:02.766115 5050 scope.go:117] "RemoveContainer" containerID="93834389fba7affdd694cfab3a6d64c84af6adddd324ea40539aa89d0238f84f" Nov 23 15:07:10 crc kubenswrapper[5050]: I1123 15:07:10.549279 5050 scope.go:117] "RemoveContainer" containerID="beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd" Nov 23 15:07:10 crc kubenswrapper[5050]: E1123 15:07:10.550406 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:07:24 crc kubenswrapper[5050]: I1123 15:07:24.549601 5050 scope.go:117] "RemoveContainer" containerID="beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd" Nov 23 15:07:24 crc kubenswrapper[5050]: E1123 15:07:24.552487 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:07:37 crc kubenswrapper[5050]: I1123 15:07:37.549765 5050 scope.go:117] "RemoveContainer" containerID="beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd" Nov 23 15:07:37 crc kubenswrapper[5050]: E1123 15:07:37.551197 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:07:48 crc kubenswrapper[5050]: I1123 15:07:48.548606 5050 scope.go:117] "RemoveContainer" containerID="beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd" Nov 23 15:07:48 crc kubenswrapper[5050]: E1123 15:07:48.550082 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:08:03 crc kubenswrapper[5050]: I1123 15:08:03.107983 5050 scope.go:117] "RemoveContainer" containerID="67627a24454619a22e3572ee09d69aded5b815e44193c91d154fa269a5576af2" Nov 23 15:08:03 crc kubenswrapper[5050]: I1123 15:08:03.142688 5050 scope.go:117] "RemoveContainer" containerID="0df6e87311c357f4060157ad3d323dd9559acb0de7668b9590e059d1943caa42" Nov 23 15:08:03 crc kubenswrapper[5050]: I1123 15:08:03.207722 5050 scope.go:117] "RemoveContainer" containerID="74740a260cb5560b82138d1b2de9a496e2eaceebcd795071134e76087256d9cf" Nov 23 15:08:03 crc kubenswrapper[5050]: I1123 15:08:03.243014 5050 scope.go:117] "RemoveContainer" containerID="4cbde859414d5230c51f3a896d2c7f2eb514382d4930ae12ebe388430ccf7b95" Nov 23 15:08:03 crc kubenswrapper[5050]: I1123 15:08:03.275987 5050 scope.go:117] "RemoveContainer" containerID="085d654f5685c959e6314e3ffe048bec6afd8a89e45c228a241cbd713de9226c" Nov 23 15:08:03 crc kubenswrapper[5050]: I1123 15:08:03.301471 5050 scope.go:117] "RemoveContainer" containerID="6de079cc2bb464468ef1408abbf5b65fc5771b1f360bfbfbcac40963b5df4e6d" Nov 23 15:08:03 crc kubenswrapper[5050]: I1123 15:08:03.328289 5050 scope.go:117] "RemoveContainer" containerID="15153a6ddd26bb52ffe675fc4511dfe431c750717734e0d53c6df64c01657ed6" Nov 23 15:08:03 crc kubenswrapper[5050]: I1123 15:08:03.362151 5050 scope.go:117] "RemoveContainer" containerID="49627df0d423eed22e18e73cad1c6b2832d4b69f55e220d377e77c16051b72e0" Nov 23 15:08:03 crc kubenswrapper[5050]: I1123 15:08:03.391768 5050 scope.go:117] "RemoveContainer" containerID="ddcc4b1b60ec1caba00533bfdcca9aabeb296bd155561a69a9313a277fd7f548" Nov 23 15:08:03 crc kubenswrapper[5050]: I1123 15:08:03.424088 5050 scope.go:117] "RemoveContainer" containerID="8413b1717750dc10c45317bdb032089ccf0935be49b622bea66cda6bbeae307e" Nov 23 15:08:03 crc kubenswrapper[5050]: I1123 15:08:03.550345 5050 scope.go:117] "RemoveContainer" containerID="beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd" Nov 23 15:08:03 crc kubenswrapper[5050]: E1123 15:08:03.550813 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:08:16 crc kubenswrapper[5050]: I1123 15:08:16.549961 5050 scope.go:117] "RemoveContainer" containerID="beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd" Nov 23 15:08:16 crc kubenswrapper[5050]: E1123 15:08:16.551338 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:08:28 crc kubenswrapper[5050]: I1123 15:08:28.549056 5050 scope.go:117] "RemoveContainer" containerID="beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd" Nov 23 15:08:28 crc kubenswrapper[5050]: E1123 15:08:28.550255 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:08:39 crc kubenswrapper[5050]: I1123 15:08:39.549812 5050 scope.go:117] "RemoveContainer" containerID="beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd" Nov 23 15:08:39 crc kubenswrapper[5050]: E1123 15:08:39.551345 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:08:51 crc kubenswrapper[5050]: I1123 15:08:51.549942 5050 scope.go:117] "RemoveContainer" containerID="beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd" Nov 23 15:08:51 crc kubenswrapper[5050]: E1123 15:08:51.551048 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:09:03 crc kubenswrapper[5050]: I1123 15:09:03.549618 5050 scope.go:117] "RemoveContainer" containerID="beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd" Nov 23 15:09:03 crc kubenswrapper[5050]: E1123 15:09:03.553136 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:09:03 crc kubenswrapper[5050]: I1123 15:09:03.608432 5050 scope.go:117] "RemoveContainer" containerID="7ddde7be0ab80cfcd7eacb38e126a697a0d2f4d855ad70062b839e6d933396d5" Nov 23 15:09:03 crc kubenswrapper[5050]: I1123 15:09:03.659948 5050 scope.go:117] "RemoveContainer" containerID="dcf12c08d4bf1811af924b4b17ab832619b6e87539c56aca5f56d1ebdc5f3fa1" Nov 23 15:09:03 crc kubenswrapper[5050]: I1123 15:09:03.744428 5050 scope.go:117] "RemoveContainer" containerID="4addc92803e0a7fa069a404a032c72c2edc38d23a3ca83cea0920a981320b2c9" Nov 23 15:09:03 crc kubenswrapper[5050]: I1123 15:09:03.793262 5050 scope.go:117] "RemoveContainer" containerID="5f6b86f81e584e0181c2a2f21d3c83b22e43fc8bc0a8211a4b5dcfeb058a4ea0" Nov 23 15:09:03 crc kubenswrapper[5050]: I1123 15:09:03.854066 5050 scope.go:117] "RemoveContainer" containerID="42a4a3972d0dfdbfa5a41878495e90a1790378398c3c410fbb952adc25a032b1" Nov 23 15:09:18 crc kubenswrapper[5050]: I1123 15:09:18.550476 5050 scope.go:117] "RemoveContainer" containerID="beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd" Nov 23 15:09:18 crc kubenswrapper[5050]: E1123 15:09:18.553979 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:09:31 crc kubenswrapper[5050]: I1123 15:09:31.581813 5050 scope.go:117] "RemoveContainer" containerID="beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd" Nov 23 15:09:31 crc kubenswrapper[5050]: E1123 15:09:31.585625 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:09:44 crc kubenswrapper[5050]: I1123 15:09:44.550137 5050 scope.go:117] "RemoveContainer" containerID="beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd" Nov 23 15:09:44 crc kubenswrapper[5050]: E1123 15:09:44.551382 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:09:58 crc kubenswrapper[5050]: I1123 15:09:58.549287 5050 scope.go:117] "RemoveContainer" containerID="beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd" Nov 23 15:09:58 crc kubenswrapper[5050]: E1123 15:09:58.550327 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:10:04 crc kubenswrapper[5050]: I1123 15:10:04.027993 5050 scope.go:117] "RemoveContainer" containerID="e5a03ed08028089ecf5d63d4900a3f9f939e1772c72456526c7bcdd023ba7ed7" Nov 23 15:10:13 crc kubenswrapper[5050]: I1123 15:10:13.549932 5050 scope.go:117] "RemoveContainer" containerID="beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd" Nov 23 15:10:13 crc kubenswrapper[5050]: E1123 15:10:13.551208 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:10:28 crc kubenswrapper[5050]: I1123 15:10:28.550053 5050 scope.go:117] "RemoveContainer" containerID="beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd" Nov 23 15:10:28 crc kubenswrapper[5050]: E1123 15:10:28.551854 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:10:39 crc kubenswrapper[5050]: I1123 15:10:39.549643 5050 scope.go:117] "RemoveContainer" containerID="beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd" Nov 23 15:10:39 crc kubenswrapper[5050]: E1123 15:10:39.550563 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:10:50 crc kubenswrapper[5050]: I1123 15:10:50.549850 5050 scope.go:117] "RemoveContainer" containerID="beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd" Nov 23 15:10:50 crc kubenswrapper[5050]: E1123 15:10:50.551122 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:11:03 crc kubenswrapper[5050]: I1123 15:11:03.549210 5050 scope.go:117] "RemoveContainer" containerID="beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd" Nov 23 15:11:03 crc kubenswrapper[5050]: E1123 15:11:03.550610 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:11:04 crc kubenswrapper[5050]: I1123 15:11:04.117756 5050 scope.go:117] "RemoveContainer" containerID="a5fe601429909f5855737ff265e47e05c7c710743d5e35f8c314bde3530608cb" Nov 23 15:11:04 crc kubenswrapper[5050]: I1123 15:11:04.158041 5050 scope.go:117] "RemoveContainer" containerID="5981e02fbf178bbd36c2b4aa0bd3d390cd001f59c62a709e8011df05a496109d" Nov 23 15:11:04 crc kubenswrapper[5050]: I1123 15:11:04.205311 5050 scope.go:117] "RemoveContainer" containerID="b0249ea7f2b3e47427ff24b071e6deac824d08d94f85a1a6b454cedaf35fdcfe" Nov 23 15:11:04 crc kubenswrapper[5050]: I1123 15:11:04.236638 5050 scope.go:117] "RemoveContainer" containerID="5ab05a12fa19db392ca4e60409469274cc565dcc9c3b508b3ef8ad77d4e5f63b" Nov 23 15:11:16 crc kubenswrapper[5050]: I1123 15:11:16.548841 5050 scope.go:117] "RemoveContainer" containerID="beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd" Nov 23 15:11:16 crc kubenswrapper[5050]: E1123 15:11:16.550236 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:11:29 crc kubenswrapper[5050]: I1123 15:11:29.549739 5050 scope.go:117] "RemoveContainer" containerID="beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd" Nov 23 15:11:30 crc kubenswrapper[5050]: I1123 15:11:30.380523 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"cb2ad25ebcc9082f315d032ed9abac6dfeae4ca432f56331b82c83eec7510b50"} Nov 23 15:13:29 crc kubenswrapper[5050]: I1123 15:13:29.224651 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:13:29 crc kubenswrapper[5050]: I1123 15:13:29.225474 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:13:59 crc kubenswrapper[5050]: I1123 15:13:59.225413 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:13:59 crc kubenswrapper[5050]: I1123 15:13:59.226258 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:14:29 crc kubenswrapper[5050]: I1123 15:14:29.224432 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:14:29 crc kubenswrapper[5050]: I1123 15:14:29.225613 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:14:29 crc kubenswrapper[5050]: I1123 15:14:29.225729 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 15:14:29 crc kubenswrapper[5050]: I1123 15:14:29.227025 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cb2ad25ebcc9082f315d032ed9abac6dfeae4ca432f56331b82c83eec7510b50"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 15:14:29 crc kubenswrapper[5050]: I1123 15:14:29.227134 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://cb2ad25ebcc9082f315d032ed9abac6dfeae4ca432f56331b82c83eec7510b50" gracePeriod=600 Nov 23 15:14:30 crc kubenswrapper[5050]: I1123 15:14:30.259435 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="cb2ad25ebcc9082f315d032ed9abac6dfeae4ca432f56331b82c83eec7510b50" exitCode=0 Nov 23 15:14:30 crc kubenswrapper[5050]: I1123 15:14:30.259521 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"cb2ad25ebcc9082f315d032ed9abac6dfeae4ca432f56331b82c83eec7510b50"} Nov 23 15:14:30 crc kubenswrapper[5050]: I1123 15:14:30.259990 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920"} Nov 23 15:14:30 crc kubenswrapper[5050]: I1123 15:14:30.260035 5050 scope.go:117] "RemoveContainer" containerID="beff7179d387676ffa12d4809954c41d9fc6861adf4f28da488173863b61b9bd" Nov 23 15:14:31 crc kubenswrapper[5050]: I1123 15:14:31.629167 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-lcn7w"] Nov 23 15:14:31 crc kubenswrapper[5050]: E1123 15:14:31.629956 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9d66f78-2d89-4f96-a7be-dbbfe7181da3" containerName="registry-server" Nov 23 15:14:31 crc kubenswrapper[5050]: I1123 15:14:31.629975 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9d66f78-2d89-4f96-a7be-dbbfe7181da3" containerName="registry-server" Nov 23 15:14:31 crc kubenswrapper[5050]: E1123 15:14:31.629994 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9d66f78-2d89-4f96-a7be-dbbfe7181da3" containerName="extract-utilities" Nov 23 15:14:31 crc kubenswrapper[5050]: I1123 15:14:31.630004 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9d66f78-2d89-4f96-a7be-dbbfe7181da3" containerName="extract-utilities" Nov 23 15:14:31 crc kubenswrapper[5050]: E1123 15:14:31.630030 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a" containerName="extract-content" Nov 23 15:14:31 crc kubenswrapper[5050]: I1123 15:14:31.630040 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a" containerName="extract-content" Nov 23 15:14:31 crc kubenswrapper[5050]: E1123 15:14:31.630058 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a" containerName="extract-utilities" Nov 23 15:14:31 crc kubenswrapper[5050]: I1123 15:14:31.630066 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a" containerName="extract-utilities" Nov 23 15:14:31 crc kubenswrapper[5050]: E1123 15:14:31.630078 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a" containerName="registry-server" Nov 23 15:14:31 crc kubenswrapper[5050]: I1123 15:14:31.630088 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a" containerName="registry-server" Nov 23 15:14:31 crc kubenswrapper[5050]: E1123 15:14:31.630103 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9d66f78-2d89-4f96-a7be-dbbfe7181da3" containerName="extract-content" Nov 23 15:14:31 crc kubenswrapper[5050]: I1123 15:14:31.630111 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9d66f78-2d89-4f96-a7be-dbbfe7181da3" containerName="extract-content" Nov 23 15:14:31 crc kubenswrapper[5050]: I1123 15:14:31.630314 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9d66f78-2d89-4f96-a7be-dbbfe7181da3" containerName="registry-server" Nov 23 15:14:31 crc kubenswrapper[5050]: I1123 15:14:31.630352 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e2ca38b-0c15-4ea5-9b78-89b1c2a0382a" containerName="registry-server" Nov 23 15:14:31 crc kubenswrapper[5050]: I1123 15:14:31.631812 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lcn7w" Nov 23 15:14:31 crc kubenswrapper[5050]: I1123 15:14:31.664294 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lcn7w"] Nov 23 15:14:31 crc kubenswrapper[5050]: I1123 15:14:31.737631 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5bvm2\" (UniqueName: \"kubernetes.io/projected/deae3233-be87-44c9-b430-dd3e16d593c3-kube-api-access-5bvm2\") pod \"community-operators-lcn7w\" (UID: \"deae3233-be87-44c9-b430-dd3e16d593c3\") " pod="openshift-marketplace/community-operators-lcn7w" Nov 23 15:14:31 crc kubenswrapper[5050]: I1123 15:14:31.737750 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/deae3233-be87-44c9-b430-dd3e16d593c3-utilities\") pod \"community-operators-lcn7w\" (UID: \"deae3233-be87-44c9-b430-dd3e16d593c3\") " pod="openshift-marketplace/community-operators-lcn7w" Nov 23 15:14:31 crc kubenswrapper[5050]: I1123 15:14:31.737789 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/deae3233-be87-44c9-b430-dd3e16d593c3-catalog-content\") pod \"community-operators-lcn7w\" (UID: \"deae3233-be87-44c9-b430-dd3e16d593c3\") " pod="openshift-marketplace/community-operators-lcn7w" Nov 23 15:14:31 crc kubenswrapper[5050]: I1123 15:14:31.838786 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5bvm2\" (UniqueName: \"kubernetes.io/projected/deae3233-be87-44c9-b430-dd3e16d593c3-kube-api-access-5bvm2\") pod \"community-operators-lcn7w\" (UID: \"deae3233-be87-44c9-b430-dd3e16d593c3\") " pod="openshift-marketplace/community-operators-lcn7w" Nov 23 15:14:31 crc kubenswrapper[5050]: I1123 15:14:31.838864 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/deae3233-be87-44c9-b430-dd3e16d593c3-utilities\") pod \"community-operators-lcn7w\" (UID: \"deae3233-be87-44c9-b430-dd3e16d593c3\") " pod="openshift-marketplace/community-operators-lcn7w" Nov 23 15:14:31 crc kubenswrapper[5050]: I1123 15:14:31.838890 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/deae3233-be87-44c9-b430-dd3e16d593c3-catalog-content\") pod \"community-operators-lcn7w\" (UID: \"deae3233-be87-44c9-b430-dd3e16d593c3\") " pod="openshift-marketplace/community-operators-lcn7w" Nov 23 15:14:31 crc kubenswrapper[5050]: I1123 15:14:31.839405 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/deae3233-be87-44c9-b430-dd3e16d593c3-utilities\") pod \"community-operators-lcn7w\" (UID: \"deae3233-be87-44c9-b430-dd3e16d593c3\") " pod="openshift-marketplace/community-operators-lcn7w" Nov 23 15:14:31 crc kubenswrapper[5050]: I1123 15:14:31.839436 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/deae3233-be87-44c9-b430-dd3e16d593c3-catalog-content\") pod \"community-operators-lcn7w\" (UID: \"deae3233-be87-44c9-b430-dd3e16d593c3\") " pod="openshift-marketplace/community-operators-lcn7w" Nov 23 15:14:31 crc kubenswrapper[5050]: I1123 15:14:31.863660 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5bvm2\" (UniqueName: \"kubernetes.io/projected/deae3233-be87-44c9-b430-dd3e16d593c3-kube-api-access-5bvm2\") pod \"community-operators-lcn7w\" (UID: \"deae3233-be87-44c9-b430-dd3e16d593c3\") " pod="openshift-marketplace/community-operators-lcn7w" Nov 23 15:14:31 crc kubenswrapper[5050]: I1123 15:14:31.968384 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lcn7w" Nov 23 15:14:32 crc kubenswrapper[5050]: I1123 15:14:32.503168 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lcn7w"] Nov 23 15:14:32 crc kubenswrapper[5050]: W1123 15:14:32.517714 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddeae3233_be87_44c9_b430_dd3e16d593c3.slice/crio-f7ef7aa2ad19141abbd11327665280f9ae6176025a56479c83310f3deca88a5d WatchSource:0}: Error finding container f7ef7aa2ad19141abbd11327665280f9ae6176025a56479c83310f3deca88a5d: Status 404 returned error can't find the container with id f7ef7aa2ad19141abbd11327665280f9ae6176025a56479c83310f3deca88a5d Nov 23 15:14:33 crc kubenswrapper[5050]: I1123 15:14:33.290542 5050 generic.go:334] "Generic (PLEG): container finished" podID="deae3233-be87-44c9-b430-dd3e16d593c3" containerID="928660b83fb25633369998378272e3dbbbba57edafd63511fc4eb6af01999d4c" exitCode=0 Nov 23 15:14:33 crc kubenswrapper[5050]: I1123 15:14:33.290644 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lcn7w" event={"ID":"deae3233-be87-44c9-b430-dd3e16d593c3","Type":"ContainerDied","Data":"928660b83fb25633369998378272e3dbbbba57edafd63511fc4eb6af01999d4c"} Nov 23 15:14:33 crc kubenswrapper[5050]: I1123 15:14:33.290756 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lcn7w" event={"ID":"deae3233-be87-44c9-b430-dd3e16d593c3","Type":"ContainerStarted","Data":"f7ef7aa2ad19141abbd11327665280f9ae6176025a56479c83310f3deca88a5d"} Nov 23 15:14:33 crc kubenswrapper[5050]: I1123 15:14:33.295129 5050 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 23 15:14:34 crc kubenswrapper[5050]: I1123 15:14:34.306122 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lcn7w" event={"ID":"deae3233-be87-44c9-b430-dd3e16d593c3","Type":"ContainerStarted","Data":"985814ceb0aee79da8706978d70683215cb8724d8ca5a607d97ab0de07b59000"} Nov 23 15:14:35 crc kubenswrapper[5050]: I1123 15:14:35.319704 5050 generic.go:334] "Generic (PLEG): container finished" podID="deae3233-be87-44c9-b430-dd3e16d593c3" containerID="985814ceb0aee79da8706978d70683215cb8724d8ca5a607d97ab0de07b59000" exitCode=0 Nov 23 15:14:35 crc kubenswrapper[5050]: I1123 15:14:35.319790 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lcn7w" event={"ID":"deae3233-be87-44c9-b430-dd3e16d593c3","Type":"ContainerDied","Data":"985814ceb0aee79da8706978d70683215cb8724d8ca5a607d97ab0de07b59000"} Nov 23 15:14:36 crc kubenswrapper[5050]: I1123 15:14:36.332903 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lcn7w" event={"ID":"deae3233-be87-44c9-b430-dd3e16d593c3","Type":"ContainerStarted","Data":"29a39e11b4d46d3d6f98a7db3499db045b79d3a4841fda11bb7ae12e70360dc5"} Nov 23 15:14:36 crc kubenswrapper[5050]: I1123 15:14:36.359606 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-lcn7w" podStartSLOduration=2.7961668790000003 podStartE2EDuration="5.359585441s" podCreationTimestamp="2025-11-23 15:14:31 +0000 UTC" firstStartedPulling="2025-11-23 15:14:33.293386422 +0000 UTC m=+1968.460382907" lastFinishedPulling="2025-11-23 15:14:35.856804944 +0000 UTC m=+1971.023801469" observedRunningTime="2025-11-23 15:14:36.357280535 +0000 UTC m=+1971.524277050" watchObservedRunningTime="2025-11-23 15:14:36.359585441 +0000 UTC m=+1971.526581926" Nov 23 15:14:41 crc kubenswrapper[5050]: I1123 15:14:41.968597 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-lcn7w" Nov 23 15:14:41 crc kubenswrapper[5050]: I1123 15:14:41.970003 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-lcn7w" Nov 23 15:14:42 crc kubenswrapper[5050]: I1123 15:14:42.025072 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-lcn7w" Nov 23 15:14:42 crc kubenswrapper[5050]: I1123 15:14:42.467926 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-lcn7w" Nov 23 15:14:42 crc kubenswrapper[5050]: I1123 15:14:42.543138 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lcn7w"] Nov 23 15:14:44 crc kubenswrapper[5050]: I1123 15:14:44.416785 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-lcn7w" podUID="deae3233-be87-44c9-b430-dd3e16d593c3" containerName="registry-server" containerID="cri-o://29a39e11b4d46d3d6f98a7db3499db045b79d3a4841fda11bb7ae12e70360dc5" gracePeriod=2 Nov 23 15:14:44 crc kubenswrapper[5050]: I1123 15:14:44.925723 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lcn7w" Nov 23 15:14:45 crc kubenswrapper[5050]: I1123 15:14:45.023751 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/deae3233-be87-44c9-b430-dd3e16d593c3-utilities\") pod \"deae3233-be87-44c9-b430-dd3e16d593c3\" (UID: \"deae3233-be87-44c9-b430-dd3e16d593c3\") " Nov 23 15:14:45 crc kubenswrapper[5050]: I1123 15:14:45.023893 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5bvm2\" (UniqueName: \"kubernetes.io/projected/deae3233-be87-44c9-b430-dd3e16d593c3-kube-api-access-5bvm2\") pod \"deae3233-be87-44c9-b430-dd3e16d593c3\" (UID: \"deae3233-be87-44c9-b430-dd3e16d593c3\") " Nov 23 15:14:45 crc kubenswrapper[5050]: I1123 15:14:45.023933 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/deae3233-be87-44c9-b430-dd3e16d593c3-catalog-content\") pod \"deae3233-be87-44c9-b430-dd3e16d593c3\" (UID: \"deae3233-be87-44c9-b430-dd3e16d593c3\") " Nov 23 15:14:45 crc kubenswrapper[5050]: I1123 15:14:45.024646 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/deae3233-be87-44c9-b430-dd3e16d593c3-utilities" (OuterVolumeSpecName: "utilities") pod "deae3233-be87-44c9-b430-dd3e16d593c3" (UID: "deae3233-be87-44c9-b430-dd3e16d593c3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:14:45 crc kubenswrapper[5050]: I1123 15:14:45.031394 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/deae3233-be87-44c9-b430-dd3e16d593c3-kube-api-access-5bvm2" (OuterVolumeSpecName: "kube-api-access-5bvm2") pod "deae3233-be87-44c9-b430-dd3e16d593c3" (UID: "deae3233-be87-44c9-b430-dd3e16d593c3"). InnerVolumeSpecName "kube-api-access-5bvm2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:14:45 crc kubenswrapper[5050]: I1123 15:14:45.084203 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/deae3233-be87-44c9-b430-dd3e16d593c3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "deae3233-be87-44c9-b430-dd3e16d593c3" (UID: "deae3233-be87-44c9-b430-dd3e16d593c3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:14:45 crc kubenswrapper[5050]: I1123 15:14:45.126436 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/deae3233-be87-44c9-b430-dd3e16d593c3-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 15:14:45 crc kubenswrapper[5050]: I1123 15:14:45.127040 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5bvm2\" (UniqueName: \"kubernetes.io/projected/deae3233-be87-44c9-b430-dd3e16d593c3-kube-api-access-5bvm2\") on node \"crc\" DevicePath \"\"" Nov 23 15:14:45 crc kubenswrapper[5050]: I1123 15:14:45.127065 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/deae3233-be87-44c9-b430-dd3e16d593c3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 15:14:45 crc kubenswrapper[5050]: I1123 15:14:45.439717 5050 generic.go:334] "Generic (PLEG): container finished" podID="deae3233-be87-44c9-b430-dd3e16d593c3" containerID="29a39e11b4d46d3d6f98a7db3499db045b79d3a4841fda11bb7ae12e70360dc5" exitCode=0 Nov 23 15:14:45 crc kubenswrapper[5050]: I1123 15:14:45.439834 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lcn7w" event={"ID":"deae3233-be87-44c9-b430-dd3e16d593c3","Type":"ContainerDied","Data":"29a39e11b4d46d3d6f98a7db3499db045b79d3a4841fda11bb7ae12e70360dc5"} Nov 23 15:14:45 crc kubenswrapper[5050]: I1123 15:14:45.439893 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lcn7w" event={"ID":"deae3233-be87-44c9-b430-dd3e16d593c3","Type":"ContainerDied","Data":"f7ef7aa2ad19141abbd11327665280f9ae6176025a56479c83310f3deca88a5d"} Nov 23 15:14:45 crc kubenswrapper[5050]: I1123 15:14:45.439933 5050 scope.go:117] "RemoveContainer" containerID="29a39e11b4d46d3d6f98a7db3499db045b79d3a4841fda11bb7ae12e70360dc5" Nov 23 15:14:45 crc kubenswrapper[5050]: I1123 15:14:45.440041 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lcn7w" Nov 23 15:14:45 crc kubenswrapper[5050]: I1123 15:14:45.495589 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lcn7w"] Nov 23 15:14:45 crc kubenswrapper[5050]: I1123 15:14:45.501331 5050 scope.go:117] "RemoveContainer" containerID="985814ceb0aee79da8706978d70683215cb8724d8ca5a607d97ab0de07b59000" Nov 23 15:14:45 crc kubenswrapper[5050]: I1123 15:14:45.504275 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-lcn7w"] Nov 23 15:14:45 crc kubenswrapper[5050]: I1123 15:14:45.527174 5050 scope.go:117] "RemoveContainer" containerID="928660b83fb25633369998378272e3dbbbba57edafd63511fc4eb6af01999d4c" Nov 23 15:14:45 crc kubenswrapper[5050]: I1123 15:14:45.571307 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="deae3233-be87-44c9-b430-dd3e16d593c3" path="/var/lib/kubelet/pods/deae3233-be87-44c9-b430-dd3e16d593c3/volumes" Nov 23 15:14:45 crc kubenswrapper[5050]: I1123 15:14:45.576731 5050 scope.go:117] "RemoveContainer" containerID="29a39e11b4d46d3d6f98a7db3499db045b79d3a4841fda11bb7ae12e70360dc5" Nov 23 15:14:45 crc kubenswrapper[5050]: E1123 15:14:45.577402 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29a39e11b4d46d3d6f98a7db3499db045b79d3a4841fda11bb7ae12e70360dc5\": container with ID starting with 29a39e11b4d46d3d6f98a7db3499db045b79d3a4841fda11bb7ae12e70360dc5 not found: ID does not exist" containerID="29a39e11b4d46d3d6f98a7db3499db045b79d3a4841fda11bb7ae12e70360dc5" Nov 23 15:14:45 crc kubenswrapper[5050]: I1123 15:14:45.577505 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29a39e11b4d46d3d6f98a7db3499db045b79d3a4841fda11bb7ae12e70360dc5"} err="failed to get container status \"29a39e11b4d46d3d6f98a7db3499db045b79d3a4841fda11bb7ae12e70360dc5\": rpc error: code = NotFound desc = could not find container \"29a39e11b4d46d3d6f98a7db3499db045b79d3a4841fda11bb7ae12e70360dc5\": container with ID starting with 29a39e11b4d46d3d6f98a7db3499db045b79d3a4841fda11bb7ae12e70360dc5 not found: ID does not exist" Nov 23 15:14:45 crc kubenswrapper[5050]: I1123 15:14:45.577546 5050 scope.go:117] "RemoveContainer" containerID="985814ceb0aee79da8706978d70683215cb8724d8ca5a607d97ab0de07b59000" Nov 23 15:14:45 crc kubenswrapper[5050]: E1123 15:14:45.578111 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"985814ceb0aee79da8706978d70683215cb8724d8ca5a607d97ab0de07b59000\": container with ID starting with 985814ceb0aee79da8706978d70683215cb8724d8ca5a607d97ab0de07b59000 not found: ID does not exist" containerID="985814ceb0aee79da8706978d70683215cb8724d8ca5a607d97ab0de07b59000" Nov 23 15:14:45 crc kubenswrapper[5050]: I1123 15:14:45.578171 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"985814ceb0aee79da8706978d70683215cb8724d8ca5a607d97ab0de07b59000"} err="failed to get container status \"985814ceb0aee79da8706978d70683215cb8724d8ca5a607d97ab0de07b59000\": rpc error: code = NotFound desc = could not find container \"985814ceb0aee79da8706978d70683215cb8724d8ca5a607d97ab0de07b59000\": container with ID starting with 985814ceb0aee79da8706978d70683215cb8724d8ca5a607d97ab0de07b59000 not found: ID does not exist" Nov 23 15:14:45 crc kubenswrapper[5050]: I1123 15:14:45.578209 5050 scope.go:117] "RemoveContainer" containerID="928660b83fb25633369998378272e3dbbbba57edafd63511fc4eb6af01999d4c" Nov 23 15:14:45 crc kubenswrapper[5050]: E1123 15:14:45.578638 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"928660b83fb25633369998378272e3dbbbba57edafd63511fc4eb6af01999d4c\": container with ID starting with 928660b83fb25633369998378272e3dbbbba57edafd63511fc4eb6af01999d4c not found: ID does not exist" containerID="928660b83fb25633369998378272e3dbbbba57edafd63511fc4eb6af01999d4c" Nov 23 15:14:45 crc kubenswrapper[5050]: I1123 15:14:45.578679 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"928660b83fb25633369998378272e3dbbbba57edafd63511fc4eb6af01999d4c"} err="failed to get container status \"928660b83fb25633369998378272e3dbbbba57edafd63511fc4eb6af01999d4c\": rpc error: code = NotFound desc = could not find container \"928660b83fb25633369998378272e3dbbbba57edafd63511fc4eb6af01999d4c\": container with ID starting with 928660b83fb25633369998378272e3dbbbba57edafd63511fc4eb6af01999d4c not found: ID does not exist" Nov 23 15:15:00 crc kubenswrapper[5050]: I1123 15:15:00.180151 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398515-8ncgq"] Nov 23 15:15:00 crc kubenswrapper[5050]: E1123 15:15:00.181564 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="deae3233-be87-44c9-b430-dd3e16d593c3" containerName="registry-server" Nov 23 15:15:00 crc kubenswrapper[5050]: I1123 15:15:00.181581 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="deae3233-be87-44c9-b430-dd3e16d593c3" containerName="registry-server" Nov 23 15:15:00 crc kubenswrapper[5050]: E1123 15:15:00.181605 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="deae3233-be87-44c9-b430-dd3e16d593c3" containerName="extract-utilities" Nov 23 15:15:00 crc kubenswrapper[5050]: I1123 15:15:00.181631 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="deae3233-be87-44c9-b430-dd3e16d593c3" containerName="extract-utilities" Nov 23 15:15:00 crc kubenswrapper[5050]: E1123 15:15:00.181677 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="deae3233-be87-44c9-b430-dd3e16d593c3" containerName="extract-content" Nov 23 15:15:00 crc kubenswrapper[5050]: I1123 15:15:00.181709 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="deae3233-be87-44c9-b430-dd3e16d593c3" containerName="extract-content" Nov 23 15:15:00 crc kubenswrapper[5050]: I1123 15:15:00.182554 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="deae3233-be87-44c9-b430-dd3e16d593c3" containerName="registry-server" Nov 23 15:15:00 crc kubenswrapper[5050]: I1123 15:15:00.183361 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398515-8ncgq" Nov 23 15:15:00 crc kubenswrapper[5050]: I1123 15:15:00.188909 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 23 15:15:00 crc kubenswrapper[5050]: I1123 15:15:00.194711 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 23 15:15:00 crc kubenswrapper[5050]: I1123 15:15:00.197885 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398515-8ncgq"] Nov 23 15:15:00 crc kubenswrapper[5050]: I1123 15:15:00.307355 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52h5c\" (UniqueName: \"kubernetes.io/projected/72f0e4c1-31ac-4791-b22c-c315b6ce4b11-kube-api-access-52h5c\") pod \"collect-profiles-29398515-8ncgq\" (UID: \"72f0e4c1-31ac-4791-b22c-c315b6ce4b11\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398515-8ncgq" Nov 23 15:15:00 crc kubenswrapper[5050]: I1123 15:15:00.307673 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/72f0e4c1-31ac-4791-b22c-c315b6ce4b11-secret-volume\") pod \"collect-profiles-29398515-8ncgq\" (UID: \"72f0e4c1-31ac-4791-b22c-c315b6ce4b11\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398515-8ncgq" Nov 23 15:15:00 crc kubenswrapper[5050]: I1123 15:15:00.307916 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/72f0e4c1-31ac-4791-b22c-c315b6ce4b11-config-volume\") pod \"collect-profiles-29398515-8ncgq\" (UID: \"72f0e4c1-31ac-4791-b22c-c315b6ce4b11\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398515-8ncgq" Nov 23 15:15:00 crc kubenswrapper[5050]: I1123 15:15:00.410029 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52h5c\" (UniqueName: \"kubernetes.io/projected/72f0e4c1-31ac-4791-b22c-c315b6ce4b11-kube-api-access-52h5c\") pod \"collect-profiles-29398515-8ncgq\" (UID: \"72f0e4c1-31ac-4791-b22c-c315b6ce4b11\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398515-8ncgq" Nov 23 15:15:00 crc kubenswrapper[5050]: I1123 15:15:00.410154 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/72f0e4c1-31ac-4791-b22c-c315b6ce4b11-secret-volume\") pod \"collect-profiles-29398515-8ncgq\" (UID: \"72f0e4c1-31ac-4791-b22c-c315b6ce4b11\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398515-8ncgq" Nov 23 15:15:00 crc kubenswrapper[5050]: I1123 15:15:00.410334 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/72f0e4c1-31ac-4791-b22c-c315b6ce4b11-config-volume\") pod \"collect-profiles-29398515-8ncgq\" (UID: \"72f0e4c1-31ac-4791-b22c-c315b6ce4b11\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398515-8ncgq" Nov 23 15:15:00 crc kubenswrapper[5050]: I1123 15:15:00.411659 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/72f0e4c1-31ac-4791-b22c-c315b6ce4b11-config-volume\") pod \"collect-profiles-29398515-8ncgq\" (UID: \"72f0e4c1-31ac-4791-b22c-c315b6ce4b11\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398515-8ncgq" Nov 23 15:15:00 crc kubenswrapper[5050]: I1123 15:15:00.421197 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/72f0e4c1-31ac-4791-b22c-c315b6ce4b11-secret-volume\") pod \"collect-profiles-29398515-8ncgq\" (UID: \"72f0e4c1-31ac-4791-b22c-c315b6ce4b11\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398515-8ncgq" Nov 23 15:15:00 crc kubenswrapper[5050]: I1123 15:15:00.429298 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52h5c\" (UniqueName: \"kubernetes.io/projected/72f0e4c1-31ac-4791-b22c-c315b6ce4b11-kube-api-access-52h5c\") pod \"collect-profiles-29398515-8ncgq\" (UID: \"72f0e4c1-31ac-4791-b22c-c315b6ce4b11\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398515-8ncgq" Nov 23 15:15:00 crc kubenswrapper[5050]: I1123 15:15:00.524812 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398515-8ncgq" Nov 23 15:15:00 crc kubenswrapper[5050]: I1123 15:15:00.771312 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398515-8ncgq"] Nov 23 15:15:01 crc kubenswrapper[5050]: I1123 15:15:01.638467 5050 generic.go:334] "Generic (PLEG): container finished" podID="72f0e4c1-31ac-4791-b22c-c315b6ce4b11" containerID="fc2645d00d7fecac9e96af21a596fe4440367cf7a2d77f07770dc70bcf16cc72" exitCode=0 Nov 23 15:15:01 crc kubenswrapper[5050]: I1123 15:15:01.638519 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398515-8ncgq" event={"ID":"72f0e4c1-31ac-4791-b22c-c315b6ce4b11","Type":"ContainerDied","Data":"fc2645d00d7fecac9e96af21a596fe4440367cf7a2d77f07770dc70bcf16cc72"} Nov 23 15:15:01 crc kubenswrapper[5050]: I1123 15:15:01.638550 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398515-8ncgq" event={"ID":"72f0e4c1-31ac-4791-b22c-c315b6ce4b11","Type":"ContainerStarted","Data":"d575dea8f17245e6cab23b0ac4e9d635e126e371aeb3e6582f938c241bb5fe5c"} Nov 23 15:15:02 crc kubenswrapper[5050]: I1123 15:15:02.974475 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398515-8ncgq" Nov 23 15:15:03 crc kubenswrapper[5050]: I1123 15:15:03.155942 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/72f0e4c1-31ac-4791-b22c-c315b6ce4b11-config-volume\") pod \"72f0e4c1-31ac-4791-b22c-c315b6ce4b11\" (UID: \"72f0e4c1-31ac-4791-b22c-c315b6ce4b11\") " Nov 23 15:15:03 crc kubenswrapper[5050]: I1123 15:15:03.156088 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/72f0e4c1-31ac-4791-b22c-c315b6ce4b11-secret-volume\") pod \"72f0e4c1-31ac-4791-b22c-c315b6ce4b11\" (UID: \"72f0e4c1-31ac-4791-b22c-c315b6ce4b11\") " Nov 23 15:15:03 crc kubenswrapper[5050]: I1123 15:15:03.156132 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-52h5c\" (UniqueName: \"kubernetes.io/projected/72f0e4c1-31ac-4791-b22c-c315b6ce4b11-kube-api-access-52h5c\") pod \"72f0e4c1-31ac-4791-b22c-c315b6ce4b11\" (UID: \"72f0e4c1-31ac-4791-b22c-c315b6ce4b11\") " Nov 23 15:15:03 crc kubenswrapper[5050]: I1123 15:15:03.157739 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/72f0e4c1-31ac-4791-b22c-c315b6ce4b11-config-volume" (OuterVolumeSpecName: "config-volume") pod "72f0e4c1-31ac-4791-b22c-c315b6ce4b11" (UID: "72f0e4c1-31ac-4791-b22c-c315b6ce4b11"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:15:03 crc kubenswrapper[5050]: I1123 15:15:03.164202 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72f0e4c1-31ac-4791-b22c-c315b6ce4b11-kube-api-access-52h5c" (OuterVolumeSpecName: "kube-api-access-52h5c") pod "72f0e4c1-31ac-4791-b22c-c315b6ce4b11" (UID: "72f0e4c1-31ac-4791-b22c-c315b6ce4b11"). InnerVolumeSpecName "kube-api-access-52h5c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:15:03 crc kubenswrapper[5050]: I1123 15:15:03.166194 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72f0e4c1-31ac-4791-b22c-c315b6ce4b11-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "72f0e4c1-31ac-4791-b22c-c315b6ce4b11" (UID: "72f0e4c1-31ac-4791-b22c-c315b6ce4b11"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:15:03 crc kubenswrapper[5050]: I1123 15:15:03.258429 5050 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/72f0e4c1-31ac-4791-b22c-c315b6ce4b11-config-volume\") on node \"crc\" DevicePath \"\"" Nov 23 15:15:03 crc kubenswrapper[5050]: I1123 15:15:03.258535 5050 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/72f0e4c1-31ac-4791-b22c-c315b6ce4b11-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 23 15:15:03 crc kubenswrapper[5050]: I1123 15:15:03.258556 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-52h5c\" (UniqueName: \"kubernetes.io/projected/72f0e4c1-31ac-4791-b22c-c315b6ce4b11-kube-api-access-52h5c\") on node \"crc\" DevicePath \"\"" Nov 23 15:15:03 crc kubenswrapper[5050]: I1123 15:15:03.657178 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398515-8ncgq" event={"ID":"72f0e4c1-31ac-4791-b22c-c315b6ce4b11","Type":"ContainerDied","Data":"d575dea8f17245e6cab23b0ac4e9d635e126e371aeb3e6582f938c241bb5fe5c"} Nov 23 15:15:03 crc kubenswrapper[5050]: I1123 15:15:03.657234 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d575dea8f17245e6cab23b0ac4e9d635e126e371aeb3e6582f938c241bb5fe5c" Nov 23 15:15:03 crc kubenswrapper[5050]: I1123 15:15:03.657318 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398515-8ncgq" Nov 23 15:15:04 crc kubenswrapper[5050]: I1123 15:15:04.138504 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398470-mm7rl"] Nov 23 15:15:04 crc kubenswrapper[5050]: I1123 15:15:04.155130 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398470-mm7rl"] Nov 23 15:15:05 crc kubenswrapper[5050]: I1123 15:15:05.561191 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab27cc8e-1d04-43dd-bf34-0cfdb48daba7" path="/var/lib/kubelet/pods/ab27cc8e-1d04-43dd-bf34-0cfdb48daba7/volumes" Nov 23 15:16:01 crc kubenswrapper[5050]: I1123 15:16:01.480900 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-7rbwj"] Nov 23 15:16:01 crc kubenswrapper[5050]: E1123 15:16:01.481996 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72f0e4c1-31ac-4791-b22c-c315b6ce4b11" containerName="collect-profiles" Nov 23 15:16:01 crc kubenswrapper[5050]: I1123 15:16:01.482045 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="72f0e4c1-31ac-4791-b22c-c315b6ce4b11" containerName="collect-profiles" Nov 23 15:16:01 crc kubenswrapper[5050]: I1123 15:16:01.482249 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="72f0e4c1-31ac-4791-b22c-c315b6ce4b11" containerName="collect-profiles" Nov 23 15:16:01 crc kubenswrapper[5050]: I1123 15:16:01.483851 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7rbwj" Nov 23 15:16:01 crc kubenswrapper[5050]: I1123 15:16:01.505008 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7rbwj"] Nov 23 15:16:01 crc kubenswrapper[5050]: I1123 15:16:01.625149 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lwf5r\" (UniqueName: \"kubernetes.io/projected/5ed2c22f-466c-49bf-82a1-dc56374adc1c-kube-api-access-lwf5r\") pod \"redhat-marketplace-7rbwj\" (UID: \"5ed2c22f-466c-49bf-82a1-dc56374adc1c\") " pod="openshift-marketplace/redhat-marketplace-7rbwj" Nov 23 15:16:01 crc kubenswrapper[5050]: I1123 15:16:01.625265 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ed2c22f-466c-49bf-82a1-dc56374adc1c-catalog-content\") pod \"redhat-marketplace-7rbwj\" (UID: \"5ed2c22f-466c-49bf-82a1-dc56374adc1c\") " pod="openshift-marketplace/redhat-marketplace-7rbwj" Nov 23 15:16:01 crc kubenswrapper[5050]: I1123 15:16:01.625952 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ed2c22f-466c-49bf-82a1-dc56374adc1c-utilities\") pod \"redhat-marketplace-7rbwj\" (UID: \"5ed2c22f-466c-49bf-82a1-dc56374adc1c\") " pod="openshift-marketplace/redhat-marketplace-7rbwj" Nov 23 15:16:01 crc kubenswrapper[5050]: I1123 15:16:01.727667 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ed2c22f-466c-49bf-82a1-dc56374adc1c-catalog-content\") pod \"redhat-marketplace-7rbwj\" (UID: \"5ed2c22f-466c-49bf-82a1-dc56374adc1c\") " pod="openshift-marketplace/redhat-marketplace-7rbwj" Nov 23 15:16:01 crc kubenswrapper[5050]: I1123 15:16:01.727838 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ed2c22f-466c-49bf-82a1-dc56374adc1c-utilities\") pod \"redhat-marketplace-7rbwj\" (UID: \"5ed2c22f-466c-49bf-82a1-dc56374adc1c\") " pod="openshift-marketplace/redhat-marketplace-7rbwj" Nov 23 15:16:01 crc kubenswrapper[5050]: I1123 15:16:01.727898 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lwf5r\" (UniqueName: \"kubernetes.io/projected/5ed2c22f-466c-49bf-82a1-dc56374adc1c-kube-api-access-lwf5r\") pod \"redhat-marketplace-7rbwj\" (UID: \"5ed2c22f-466c-49bf-82a1-dc56374adc1c\") " pod="openshift-marketplace/redhat-marketplace-7rbwj" Nov 23 15:16:01 crc kubenswrapper[5050]: I1123 15:16:01.728433 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ed2c22f-466c-49bf-82a1-dc56374adc1c-catalog-content\") pod \"redhat-marketplace-7rbwj\" (UID: \"5ed2c22f-466c-49bf-82a1-dc56374adc1c\") " pod="openshift-marketplace/redhat-marketplace-7rbwj" Nov 23 15:16:01 crc kubenswrapper[5050]: I1123 15:16:01.728614 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ed2c22f-466c-49bf-82a1-dc56374adc1c-utilities\") pod \"redhat-marketplace-7rbwj\" (UID: \"5ed2c22f-466c-49bf-82a1-dc56374adc1c\") " pod="openshift-marketplace/redhat-marketplace-7rbwj" Nov 23 15:16:01 crc kubenswrapper[5050]: I1123 15:16:01.758436 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lwf5r\" (UniqueName: \"kubernetes.io/projected/5ed2c22f-466c-49bf-82a1-dc56374adc1c-kube-api-access-lwf5r\") pod \"redhat-marketplace-7rbwj\" (UID: \"5ed2c22f-466c-49bf-82a1-dc56374adc1c\") " pod="openshift-marketplace/redhat-marketplace-7rbwj" Nov 23 15:16:01 crc kubenswrapper[5050]: I1123 15:16:01.824097 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7rbwj" Nov 23 15:16:02 crc kubenswrapper[5050]: I1123 15:16:02.095305 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7rbwj"] Nov 23 15:16:02 crc kubenswrapper[5050]: I1123 15:16:02.256276 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7rbwj" event={"ID":"5ed2c22f-466c-49bf-82a1-dc56374adc1c","Type":"ContainerStarted","Data":"6a7ddaabb675bdf7439640cd6a146cfe402a86a979a3786d4f26f07e85d48415"} Nov 23 15:16:03 crc kubenswrapper[5050]: I1123 15:16:03.267803 5050 generic.go:334] "Generic (PLEG): container finished" podID="5ed2c22f-466c-49bf-82a1-dc56374adc1c" containerID="d27925b2cc0d7b2f68001e5789b4168a902e1c663ddf5decab39670e25da6073" exitCode=0 Nov 23 15:16:03 crc kubenswrapper[5050]: I1123 15:16:03.267872 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7rbwj" event={"ID":"5ed2c22f-466c-49bf-82a1-dc56374adc1c","Type":"ContainerDied","Data":"d27925b2cc0d7b2f68001e5789b4168a902e1c663ddf5decab39670e25da6073"} Nov 23 15:16:04 crc kubenswrapper[5050]: I1123 15:16:04.277415 5050 generic.go:334] "Generic (PLEG): container finished" podID="5ed2c22f-466c-49bf-82a1-dc56374adc1c" containerID="244fb8ab476c57a5aa2b13483d8bf999e19db08a995bebe2b3e2ce762125bbf5" exitCode=0 Nov 23 15:16:04 crc kubenswrapper[5050]: I1123 15:16:04.277584 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7rbwj" event={"ID":"5ed2c22f-466c-49bf-82a1-dc56374adc1c","Type":"ContainerDied","Data":"244fb8ab476c57a5aa2b13483d8bf999e19db08a995bebe2b3e2ce762125bbf5"} Nov 23 15:16:04 crc kubenswrapper[5050]: I1123 15:16:04.478960 5050 scope.go:117] "RemoveContainer" containerID="7fbe45687412126d64567ea868effc5a3dab98a0df53a4f3b2c087fa38c6edb6" Nov 23 15:16:05 crc kubenswrapper[5050]: I1123 15:16:05.291870 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7rbwj" event={"ID":"5ed2c22f-466c-49bf-82a1-dc56374adc1c","Type":"ContainerStarted","Data":"1e3449fc3f15aff43ed46a182efa33704a22f28d42f27f4ce8473ddf354a8070"} Nov 23 15:16:05 crc kubenswrapper[5050]: I1123 15:16:05.325320 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-7rbwj" podStartSLOduration=2.902191286 podStartE2EDuration="4.325283449s" podCreationTimestamp="2025-11-23 15:16:01 +0000 UTC" firstStartedPulling="2025-11-23 15:16:03.270804629 +0000 UTC m=+2058.437801114" lastFinishedPulling="2025-11-23 15:16:04.693896792 +0000 UTC m=+2059.860893277" observedRunningTime="2025-11-23 15:16:05.316582962 +0000 UTC m=+2060.483579457" watchObservedRunningTime="2025-11-23 15:16:05.325283449 +0000 UTC m=+2060.492279974" Nov 23 15:16:08 crc kubenswrapper[5050]: I1123 15:16:08.983659 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-wvpg8"] Nov 23 15:16:08 crc kubenswrapper[5050]: I1123 15:16:08.987502 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wvpg8" Nov 23 15:16:09 crc kubenswrapper[5050]: I1123 15:16:08.999961 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wvpg8"] Nov 23 15:16:09 crc kubenswrapper[5050]: I1123 15:16:09.165929 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3180a72-1a07-4b2c-a699-24cf6223aeaf-utilities\") pod \"certified-operators-wvpg8\" (UID: \"b3180a72-1a07-4b2c-a699-24cf6223aeaf\") " pod="openshift-marketplace/certified-operators-wvpg8" Nov 23 15:16:09 crc kubenswrapper[5050]: I1123 15:16:09.166124 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5p4gn\" (UniqueName: \"kubernetes.io/projected/b3180a72-1a07-4b2c-a699-24cf6223aeaf-kube-api-access-5p4gn\") pod \"certified-operators-wvpg8\" (UID: \"b3180a72-1a07-4b2c-a699-24cf6223aeaf\") " pod="openshift-marketplace/certified-operators-wvpg8" Nov 23 15:16:09 crc kubenswrapper[5050]: I1123 15:16:09.166330 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3180a72-1a07-4b2c-a699-24cf6223aeaf-catalog-content\") pod \"certified-operators-wvpg8\" (UID: \"b3180a72-1a07-4b2c-a699-24cf6223aeaf\") " pod="openshift-marketplace/certified-operators-wvpg8" Nov 23 15:16:09 crc kubenswrapper[5050]: I1123 15:16:09.268741 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3180a72-1a07-4b2c-a699-24cf6223aeaf-catalog-content\") pod \"certified-operators-wvpg8\" (UID: \"b3180a72-1a07-4b2c-a699-24cf6223aeaf\") " pod="openshift-marketplace/certified-operators-wvpg8" Nov 23 15:16:09 crc kubenswrapper[5050]: I1123 15:16:09.268877 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3180a72-1a07-4b2c-a699-24cf6223aeaf-utilities\") pod \"certified-operators-wvpg8\" (UID: \"b3180a72-1a07-4b2c-a699-24cf6223aeaf\") " pod="openshift-marketplace/certified-operators-wvpg8" Nov 23 15:16:09 crc kubenswrapper[5050]: I1123 15:16:09.268943 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5p4gn\" (UniqueName: \"kubernetes.io/projected/b3180a72-1a07-4b2c-a699-24cf6223aeaf-kube-api-access-5p4gn\") pod \"certified-operators-wvpg8\" (UID: \"b3180a72-1a07-4b2c-a699-24cf6223aeaf\") " pod="openshift-marketplace/certified-operators-wvpg8" Nov 23 15:16:09 crc kubenswrapper[5050]: I1123 15:16:09.269578 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3180a72-1a07-4b2c-a699-24cf6223aeaf-catalog-content\") pod \"certified-operators-wvpg8\" (UID: \"b3180a72-1a07-4b2c-a699-24cf6223aeaf\") " pod="openshift-marketplace/certified-operators-wvpg8" Nov 23 15:16:09 crc kubenswrapper[5050]: I1123 15:16:09.269969 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3180a72-1a07-4b2c-a699-24cf6223aeaf-utilities\") pod \"certified-operators-wvpg8\" (UID: \"b3180a72-1a07-4b2c-a699-24cf6223aeaf\") " pod="openshift-marketplace/certified-operators-wvpg8" Nov 23 15:16:09 crc kubenswrapper[5050]: I1123 15:16:09.306373 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5p4gn\" (UniqueName: \"kubernetes.io/projected/b3180a72-1a07-4b2c-a699-24cf6223aeaf-kube-api-access-5p4gn\") pod \"certified-operators-wvpg8\" (UID: \"b3180a72-1a07-4b2c-a699-24cf6223aeaf\") " pod="openshift-marketplace/certified-operators-wvpg8" Nov 23 15:16:09 crc kubenswrapper[5050]: I1123 15:16:09.317187 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wvpg8" Nov 23 15:16:09 crc kubenswrapper[5050]: I1123 15:16:09.604795 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wvpg8"] Nov 23 15:16:10 crc kubenswrapper[5050]: I1123 15:16:10.336921 5050 generic.go:334] "Generic (PLEG): container finished" podID="b3180a72-1a07-4b2c-a699-24cf6223aeaf" containerID="d777125b11e169755ada04af0c32eea1bd8e170e33658bd4e7e7821f05a2dbf7" exitCode=0 Nov 23 15:16:10 crc kubenswrapper[5050]: I1123 15:16:10.336978 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wvpg8" event={"ID":"b3180a72-1a07-4b2c-a699-24cf6223aeaf","Type":"ContainerDied","Data":"d777125b11e169755ada04af0c32eea1bd8e170e33658bd4e7e7821f05a2dbf7"} Nov 23 15:16:10 crc kubenswrapper[5050]: I1123 15:16:10.337041 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wvpg8" event={"ID":"b3180a72-1a07-4b2c-a699-24cf6223aeaf","Type":"ContainerStarted","Data":"22409fcd0523b432eb65d2b6064208aef2b7c661238ec12808ca33c4973f8398"} Nov 23 15:16:11 crc kubenswrapper[5050]: I1123 15:16:11.350766 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wvpg8" event={"ID":"b3180a72-1a07-4b2c-a699-24cf6223aeaf","Type":"ContainerStarted","Data":"c992d278d71bc15c414d87931d5aca9099ee437c189076b82771f36a236ba1cc"} Nov 23 15:16:11 crc kubenswrapper[5050]: I1123 15:16:11.824558 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-7rbwj" Nov 23 15:16:11 crc kubenswrapper[5050]: I1123 15:16:11.824666 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-7rbwj" Nov 23 15:16:12 crc kubenswrapper[5050]: I1123 15:16:12.344816 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-7rbwj" Nov 23 15:16:12 crc kubenswrapper[5050]: I1123 15:16:12.376436 5050 generic.go:334] "Generic (PLEG): container finished" podID="b3180a72-1a07-4b2c-a699-24cf6223aeaf" containerID="c992d278d71bc15c414d87931d5aca9099ee437c189076b82771f36a236ba1cc" exitCode=0 Nov 23 15:16:12 crc kubenswrapper[5050]: I1123 15:16:12.376526 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wvpg8" event={"ID":"b3180a72-1a07-4b2c-a699-24cf6223aeaf","Type":"ContainerDied","Data":"c992d278d71bc15c414d87931d5aca9099ee437c189076b82771f36a236ba1cc"} Nov 23 15:16:12 crc kubenswrapper[5050]: I1123 15:16:12.464640 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-7rbwj" Nov 23 15:16:13 crc kubenswrapper[5050]: I1123 15:16:13.391179 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wvpg8" event={"ID":"b3180a72-1a07-4b2c-a699-24cf6223aeaf","Type":"ContainerStarted","Data":"1f9f6b225ac87586a4af8ec96a9422beaa073f81a2c40a627815a893eeb4f236"} Nov 23 15:16:13 crc kubenswrapper[5050]: I1123 15:16:13.424693 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-wvpg8" podStartSLOduration=2.97045932 podStartE2EDuration="5.424669463s" podCreationTimestamp="2025-11-23 15:16:08 +0000 UTC" firstStartedPulling="2025-11-23 15:16:10.341326818 +0000 UTC m=+2065.508323313" lastFinishedPulling="2025-11-23 15:16:12.795536931 +0000 UTC m=+2067.962533456" observedRunningTime="2025-11-23 15:16:13.421279617 +0000 UTC m=+2068.588276092" watchObservedRunningTime="2025-11-23 15:16:13.424669463 +0000 UTC m=+2068.591665948" Nov 23 15:16:14 crc kubenswrapper[5050]: I1123 15:16:14.741884 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7rbwj"] Nov 23 15:16:14 crc kubenswrapper[5050]: I1123 15:16:14.742485 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-7rbwj" podUID="5ed2c22f-466c-49bf-82a1-dc56374adc1c" containerName="registry-server" containerID="cri-o://1e3449fc3f15aff43ed46a182efa33704a22f28d42f27f4ce8473ddf354a8070" gracePeriod=2 Nov 23 15:16:15 crc kubenswrapper[5050]: I1123 15:16:15.237006 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7rbwj" Nov 23 15:16:15 crc kubenswrapper[5050]: I1123 15:16:15.382091 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ed2c22f-466c-49bf-82a1-dc56374adc1c-catalog-content\") pod \"5ed2c22f-466c-49bf-82a1-dc56374adc1c\" (UID: \"5ed2c22f-466c-49bf-82a1-dc56374adc1c\") " Nov 23 15:16:15 crc kubenswrapper[5050]: I1123 15:16:15.382351 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lwf5r\" (UniqueName: \"kubernetes.io/projected/5ed2c22f-466c-49bf-82a1-dc56374adc1c-kube-api-access-lwf5r\") pod \"5ed2c22f-466c-49bf-82a1-dc56374adc1c\" (UID: \"5ed2c22f-466c-49bf-82a1-dc56374adc1c\") " Nov 23 15:16:15 crc kubenswrapper[5050]: I1123 15:16:15.382400 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ed2c22f-466c-49bf-82a1-dc56374adc1c-utilities\") pod \"5ed2c22f-466c-49bf-82a1-dc56374adc1c\" (UID: \"5ed2c22f-466c-49bf-82a1-dc56374adc1c\") " Nov 23 15:16:15 crc kubenswrapper[5050]: I1123 15:16:15.383704 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ed2c22f-466c-49bf-82a1-dc56374adc1c-utilities" (OuterVolumeSpecName: "utilities") pod "5ed2c22f-466c-49bf-82a1-dc56374adc1c" (UID: "5ed2c22f-466c-49bf-82a1-dc56374adc1c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:16:15 crc kubenswrapper[5050]: I1123 15:16:15.390746 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ed2c22f-466c-49bf-82a1-dc56374adc1c-kube-api-access-lwf5r" (OuterVolumeSpecName: "kube-api-access-lwf5r") pod "5ed2c22f-466c-49bf-82a1-dc56374adc1c" (UID: "5ed2c22f-466c-49bf-82a1-dc56374adc1c"). InnerVolumeSpecName "kube-api-access-lwf5r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:16:15 crc kubenswrapper[5050]: I1123 15:16:15.411969 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ed2c22f-466c-49bf-82a1-dc56374adc1c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5ed2c22f-466c-49bf-82a1-dc56374adc1c" (UID: "5ed2c22f-466c-49bf-82a1-dc56374adc1c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:16:15 crc kubenswrapper[5050]: I1123 15:16:15.428235 5050 generic.go:334] "Generic (PLEG): container finished" podID="5ed2c22f-466c-49bf-82a1-dc56374adc1c" containerID="1e3449fc3f15aff43ed46a182efa33704a22f28d42f27f4ce8473ddf354a8070" exitCode=0 Nov 23 15:16:15 crc kubenswrapper[5050]: I1123 15:16:15.428351 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7rbwj" event={"ID":"5ed2c22f-466c-49bf-82a1-dc56374adc1c","Type":"ContainerDied","Data":"1e3449fc3f15aff43ed46a182efa33704a22f28d42f27f4ce8473ddf354a8070"} Nov 23 15:16:15 crc kubenswrapper[5050]: I1123 15:16:15.428365 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7rbwj" Nov 23 15:16:15 crc kubenswrapper[5050]: I1123 15:16:15.428919 5050 scope.go:117] "RemoveContainer" containerID="1e3449fc3f15aff43ed46a182efa33704a22f28d42f27f4ce8473ddf354a8070" Nov 23 15:16:15 crc kubenswrapper[5050]: I1123 15:16:15.428829 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7rbwj" event={"ID":"5ed2c22f-466c-49bf-82a1-dc56374adc1c","Type":"ContainerDied","Data":"6a7ddaabb675bdf7439640cd6a146cfe402a86a979a3786d4f26f07e85d48415"} Nov 23 15:16:15 crc kubenswrapper[5050]: I1123 15:16:15.460166 5050 scope.go:117] "RemoveContainer" containerID="244fb8ab476c57a5aa2b13483d8bf999e19db08a995bebe2b3e2ce762125bbf5" Nov 23 15:16:15 crc kubenswrapper[5050]: I1123 15:16:15.483066 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7rbwj"] Nov 23 15:16:15 crc kubenswrapper[5050]: I1123 15:16:15.484120 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lwf5r\" (UniqueName: \"kubernetes.io/projected/5ed2c22f-466c-49bf-82a1-dc56374adc1c-kube-api-access-lwf5r\") on node \"crc\" DevicePath \"\"" Nov 23 15:16:15 crc kubenswrapper[5050]: I1123 15:16:15.484152 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ed2c22f-466c-49bf-82a1-dc56374adc1c-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 15:16:15 crc kubenswrapper[5050]: I1123 15:16:15.484170 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ed2c22f-466c-49bf-82a1-dc56374adc1c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 15:16:15 crc kubenswrapper[5050]: I1123 15:16:15.493476 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-7rbwj"] Nov 23 15:16:15 crc kubenswrapper[5050]: I1123 15:16:15.503233 5050 scope.go:117] "RemoveContainer" containerID="d27925b2cc0d7b2f68001e5789b4168a902e1c663ddf5decab39670e25da6073" Nov 23 15:16:15 crc kubenswrapper[5050]: I1123 15:16:15.545284 5050 scope.go:117] "RemoveContainer" containerID="1e3449fc3f15aff43ed46a182efa33704a22f28d42f27f4ce8473ddf354a8070" Nov 23 15:16:15 crc kubenswrapper[5050]: E1123 15:16:15.545945 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e3449fc3f15aff43ed46a182efa33704a22f28d42f27f4ce8473ddf354a8070\": container with ID starting with 1e3449fc3f15aff43ed46a182efa33704a22f28d42f27f4ce8473ddf354a8070 not found: ID does not exist" containerID="1e3449fc3f15aff43ed46a182efa33704a22f28d42f27f4ce8473ddf354a8070" Nov 23 15:16:15 crc kubenswrapper[5050]: I1123 15:16:15.546028 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e3449fc3f15aff43ed46a182efa33704a22f28d42f27f4ce8473ddf354a8070"} err="failed to get container status \"1e3449fc3f15aff43ed46a182efa33704a22f28d42f27f4ce8473ddf354a8070\": rpc error: code = NotFound desc = could not find container \"1e3449fc3f15aff43ed46a182efa33704a22f28d42f27f4ce8473ddf354a8070\": container with ID starting with 1e3449fc3f15aff43ed46a182efa33704a22f28d42f27f4ce8473ddf354a8070 not found: ID does not exist" Nov 23 15:16:15 crc kubenswrapper[5050]: I1123 15:16:15.546074 5050 scope.go:117] "RemoveContainer" containerID="244fb8ab476c57a5aa2b13483d8bf999e19db08a995bebe2b3e2ce762125bbf5" Nov 23 15:16:15 crc kubenswrapper[5050]: E1123 15:16:15.547066 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"244fb8ab476c57a5aa2b13483d8bf999e19db08a995bebe2b3e2ce762125bbf5\": container with ID starting with 244fb8ab476c57a5aa2b13483d8bf999e19db08a995bebe2b3e2ce762125bbf5 not found: ID does not exist" containerID="244fb8ab476c57a5aa2b13483d8bf999e19db08a995bebe2b3e2ce762125bbf5" Nov 23 15:16:15 crc kubenswrapper[5050]: I1123 15:16:15.547094 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"244fb8ab476c57a5aa2b13483d8bf999e19db08a995bebe2b3e2ce762125bbf5"} err="failed to get container status \"244fb8ab476c57a5aa2b13483d8bf999e19db08a995bebe2b3e2ce762125bbf5\": rpc error: code = NotFound desc = could not find container \"244fb8ab476c57a5aa2b13483d8bf999e19db08a995bebe2b3e2ce762125bbf5\": container with ID starting with 244fb8ab476c57a5aa2b13483d8bf999e19db08a995bebe2b3e2ce762125bbf5 not found: ID does not exist" Nov 23 15:16:15 crc kubenswrapper[5050]: I1123 15:16:15.547109 5050 scope.go:117] "RemoveContainer" containerID="d27925b2cc0d7b2f68001e5789b4168a902e1c663ddf5decab39670e25da6073" Nov 23 15:16:15 crc kubenswrapper[5050]: E1123 15:16:15.547605 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d27925b2cc0d7b2f68001e5789b4168a902e1c663ddf5decab39670e25da6073\": container with ID starting with d27925b2cc0d7b2f68001e5789b4168a902e1c663ddf5decab39670e25da6073 not found: ID does not exist" containerID="d27925b2cc0d7b2f68001e5789b4168a902e1c663ddf5decab39670e25da6073" Nov 23 15:16:15 crc kubenswrapper[5050]: I1123 15:16:15.547663 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d27925b2cc0d7b2f68001e5789b4168a902e1c663ddf5decab39670e25da6073"} err="failed to get container status \"d27925b2cc0d7b2f68001e5789b4168a902e1c663ddf5decab39670e25da6073\": rpc error: code = NotFound desc = could not find container \"d27925b2cc0d7b2f68001e5789b4168a902e1c663ddf5decab39670e25da6073\": container with ID starting with d27925b2cc0d7b2f68001e5789b4168a902e1c663ddf5decab39670e25da6073 not found: ID does not exist" Nov 23 15:16:15 crc kubenswrapper[5050]: I1123 15:16:15.564122 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ed2c22f-466c-49bf-82a1-dc56374adc1c" path="/var/lib/kubelet/pods/5ed2c22f-466c-49bf-82a1-dc56374adc1c/volumes" Nov 23 15:16:19 crc kubenswrapper[5050]: I1123 15:16:19.318194 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-wvpg8" Nov 23 15:16:19 crc kubenswrapper[5050]: I1123 15:16:19.319241 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-wvpg8" Nov 23 15:16:19 crc kubenswrapper[5050]: I1123 15:16:19.401306 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-wvpg8" Nov 23 15:16:19 crc kubenswrapper[5050]: I1123 15:16:19.569428 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-wvpg8" Nov 23 15:16:19 crc kubenswrapper[5050]: I1123 15:16:19.652701 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wvpg8"] Nov 23 15:16:21 crc kubenswrapper[5050]: I1123 15:16:21.488079 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-wvpg8" podUID="b3180a72-1a07-4b2c-a699-24cf6223aeaf" containerName="registry-server" containerID="cri-o://1f9f6b225ac87586a4af8ec96a9422beaa073f81a2c40a627815a893eeb4f236" gracePeriod=2 Nov 23 15:16:21 crc kubenswrapper[5050]: I1123 15:16:21.983334 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wvpg8" Nov 23 15:16:22 crc kubenswrapper[5050]: I1123 15:16:22.039643 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3180a72-1a07-4b2c-a699-24cf6223aeaf-utilities\") pod \"b3180a72-1a07-4b2c-a699-24cf6223aeaf\" (UID: \"b3180a72-1a07-4b2c-a699-24cf6223aeaf\") " Nov 23 15:16:22 crc kubenswrapper[5050]: I1123 15:16:22.039724 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3180a72-1a07-4b2c-a699-24cf6223aeaf-catalog-content\") pod \"b3180a72-1a07-4b2c-a699-24cf6223aeaf\" (UID: \"b3180a72-1a07-4b2c-a699-24cf6223aeaf\") " Nov 23 15:16:22 crc kubenswrapper[5050]: I1123 15:16:22.039914 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5p4gn\" (UniqueName: \"kubernetes.io/projected/b3180a72-1a07-4b2c-a699-24cf6223aeaf-kube-api-access-5p4gn\") pod \"b3180a72-1a07-4b2c-a699-24cf6223aeaf\" (UID: \"b3180a72-1a07-4b2c-a699-24cf6223aeaf\") " Nov 23 15:16:22 crc kubenswrapper[5050]: I1123 15:16:22.041406 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3180a72-1a07-4b2c-a699-24cf6223aeaf-utilities" (OuterVolumeSpecName: "utilities") pod "b3180a72-1a07-4b2c-a699-24cf6223aeaf" (UID: "b3180a72-1a07-4b2c-a699-24cf6223aeaf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:16:22 crc kubenswrapper[5050]: I1123 15:16:22.054965 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3180a72-1a07-4b2c-a699-24cf6223aeaf-kube-api-access-5p4gn" (OuterVolumeSpecName: "kube-api-access-5p4gn") pod "b3180a72-1a07-4b2c-a699-24cf6223aeaf" (UID: "b3180a72-1a07-4b2c-a699-24cf6223aeaf"). InnerVolumeSpecName "kube-api-access-5p4gn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:16:22 crc kubenswrapper[5050]: I1123 15:16:22.106184 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3180a72-1a07-4b2c-a699-24cf6223aeaf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b3180a72-1a07-4b2c-a699-24cf6223aeaf" (UID: "b3180a72-1a07-4b2c-a699-24cf6223aeaf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:16:22 crc kubenswrapper[5050]: I1123 15:16:22.142037 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5p4gn\" (UniqueName: \"kubernetes.io/projected/b3180a72-1a07-4b2c-a699-24cf6223aeaf-kube-api-access-5p4gn\") on node \"crc\" DevicePath \"\"" Nov 23 15:16:22 crc kubenswrapper[5050]: I1123 15:16:22.142087 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3180a72-1a07-4b2c-a699-24cf6223aeaf-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 15:16:22 crc kubenswrapper[5050]: I1123 15:16:22.142101 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3180a72-1a07-4b2c-a699-24cf6223aeaf-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 15:16:22 crc kubenswrapper[5050]: I1123 15:16:22.503758 5050 generic.go:334] "Generic (PLEG): container finished" podID="b3180a72-1a07-4b2c-a699-24cf6223aeaf" containerID="1f9f6b225ac87586a4af8ec96a9422beaa073f81a2c40a627815a893eeb4f236" exitCode=0 Nov 23 15:16:22 crc kubenswrapper[5050]: I1123 15:16:22.503897 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wvpg8" event={"ID":"b3180a72-1a07-4b2c-a699-24cf6223aeaf","Type":"ContainerDied","Data":"1f9f6b225ac87586a4af8ec96a9422beaa073f81a2c40a627815a893eeb4f236"} Nov 23 15:16:22 crc kubenswrapper[5050]: I1123 15:16:22.503920 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wvpg8" Nov 23 15:16:22 crc kubenswrapper[5050]: I1123 15:16:22.504583 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wvpg8" event={"ID":"b3180a72-1a07-4b2c-a699-24cf6223aeaf","Type":"ContainerDied","Data":"22409fcd0523b432eb65d2b6064208aef2b7c661238ec12808ca33c4973f8398"} Nov 23 15:16:22 crc kubenswrapper[5050]: I1123 15:16:22.504624 5050 scope.go:117] "RemoveContainer" containerID="1f9f6b225ac87586a4af8ec96a9422beaa073f81a2c40a627815a893eeb4f236" Nov 23 15:16:22 crc kubenswrapper[5050]: I1123 15:16:22.536045 5050 scope.go:117] "RemoveContainer" containerID="c992d278d71bc15c414d87931d5aca9099ee437c189076b82771f36a236ba1cc" Nov 23 15:16:22 crc kubenswrapper[5050]: I1123 15:16:22.558607 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wvpg8"] Nov 23 15:16:22 crc kubenswrapper[5050]: I1123 15:16:22.565543 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-wvpg8"] Nov 23 15:16:22 crc kubenswrapper[5050]: I1123 15:16:22.580503 5050 scope.go:117] "RemoveContainer" containerID="d777125b11e169755ada04af0c32eea1bd8e170e33658bd4e7e7821f05a2dbf7" Nov 23 15:16:22 crc kubenswrapper[5050]: I1123 15:16:22.617842 5050 scope.go:117] "RemoveContainer" containerID="1f9f6b225ac87586a4af8ec96a9422beaa073f81a2c40a627815a893eeb4f236" Nov 23 15:16:22 crc kubenswrapper[5050]: E1123 15:16:22.619774 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f9f6b225ac87586a4af8ec96a9422beaa073f81a2c40a627815a893eeb4f236\": container with ID starting with 1f9f6b225ac87586a4af8ec96a9422beaa073f81a2c40a627815a893eeb4f236 not found: ID does not exist" containerID="1f9f6b225ac87586a4af8ec96a9422beaa073f81a2c40a627815a893eeb4f236" Nov 23 15:16:22 crc kubenswrapper[5050]: I1123 15:16:22.619814 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f9f6b225ac87586a4af8ec96a9422beaa073f81a2c40a627815a893eeb4f236"} err="failed to get container status \"1f9f6b225ac87586a4af8ec96a9422beaa073f81a2c40a627815a893eeb4f236\": rpc error: code = NotFound desc = could not find container \"1f9f6b225ac87586a4af8ec96a9422beaa073f81a2c40a627815a893eeb4f236\": container with ID starting with 1f9f6b225ac87586a4af8ec96a9422beaa073f81a2c40a627815a893eeb4f236 not found: ID does not exist" Nov 23 15:16:22 crc kubenswrapper[5050]: I1123 15:16:22.619854 5050 scope.go:117] "RemoveContainer" containerID="c992d278d71bc15c414d87931d5aca9099ee437c189076b82771f36a236ba1cc" Nov 23 15:16:22 crc kubenswrapper[5050]: E1123 15:16:22.620259 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c992d278d71bc15c414d87931d5aca9099ee437c189076b82771f36a236ba1cc\": container with ID starting with c992d278d71bc15c414d87931d5aca9099ee437c189076b82771f36a236ba1cc not found: ID does not exist" containerID="c992d278d71bc15c414d87931d5aca9099ee437c189076b82771f36a236ba1cc" Nov 23 15:16:22 crc kubenswrapper[5050]: I1123 15:16:22.620290 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c992d278d71bc15c414d87931d5aca9099ee437c189076b82771f36a236ba1cc"} err="failed to get container status \"c992d278d71bc15c414d87931d5aca9099ee437c189076b82771f36a236ba1cc\": rpc error: code = NotFound desc = could not find container \"c992d278d71bc15c414d87931d5aca9099ee437c189076b82771f36a236ba1cc\": container with ID starting with c992d278d71bc15c414d87931d5aca9099ee437c189076b82771f36a236ba1cc not found: ID does not exist" Nov 23 15:16:22 crc kubenswrapper[5050]: I1123 15:16:22.620309 5050 scope.go:117] "RemoveContainer" containerID="d777125b11e169755ada04af0c32eea1bd8e170e33658bd4e7e7821f05a2dbf7" Nov 23 15:16:22 crc kubenswrapper[5050]: E1123 15:16:22.620674 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d777125b11e169755ada04af0c32eea1bd8e170e33658bd4e7e7821f05a2dbf7\": container with ID starting with d777125b11e169755ada04af0c32eea1bd8e170e33658bd4e7e7821f05a2dbf7 not found: ID does not exist" containerID="d777125b11e169755ada04af0c32eea1bd8e170e33658bd4e7e7821f05a2dbf7" Nov 23 15:16:22 crc kubenswrapper[5050]: I1123 15:16:22.620725 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d777125b11e169755ada04af0c32eea1bd8e170e33658bd4e7e7821f05a2dbf7"} err="failed to get container status \"d777125b11e169755ada04af0c32eea1bd8e170e33658bd4e7e7821f05a2dbf7\": rpc error: code = NotFound desc = could not find container \"d777125b11e169755ada04af0c32eea1bd8e170e33658bd4e7e7821f05a2dbf7\": container with ID starting with d777125b11e169755ada04af0c32eea1bd8e170e33658bd4e7e7821f05a2dbf7 not found: ID does not exist" Nov 23 15:16:23 crc kubenswrapper[5050]: I1123 15:16:23.567545 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3180a72-1a07-4b2c-a699-24cf6223aeaf" path="/var/lib/kubelet/pods/b3180a72-1a07-4b2c-a699-24cf6223aeaf/volumes" Nov 23 15:16:29 crc kubenswrapper[5050]: I1123 15:16:29.224576 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:16:29 crc kubenswrapper[5050]: I1123 15:16:29.225471 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:16:33 crc kubenswrapper[5050]: I1123 15:16:33.080595 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-kvssd"] Nov 23 15:16:33 crc kubenswrapper[5050]: E1123 15:16:33.081813 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3180a72-1a07-4b2c-a699-24cf6223aeaf" containerName="registry-server" Nov 23 15:16:33 crc kubenswrapper[5050]: I1123 15:16:33.081848 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3180a72-1a07-4b2c-a699-24cf6223aeaf" containerName="registry-server" Nov 23 15:16:33 crc kubenswrapper[5050]: E1123 15:16:33.081895 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ed2c22f-466c-49bf-82a1-dc56374adc1c" containerName="extract-content" Nov 23 15:16:33 crc kubenswrapper[5050]: I1123 15:16:33.081912 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ed2c22f-466c-49bf-82a1-dc56374adc1c" containerName="extract-content" Nov 23 15:16:33 crc kubenswrapper[5050]: E1123 15:16:33.081934 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ed2c22f-466c-49bf-82a1-dc56374adc1c" containerName="extract-utilities" Nov 23 15:16:33 crc kubenswrapper[5050]: I1123 15:16:33.081949 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ed2c22f-466c-49bf-82a1-dc56374adc1c" containerName="extract-utilities" Nov 23 15:16:33 crc kubenswrapper[5050]: E1123 15:16:33.081966 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3180a72-1a07-4b2c-a699-24cf6223aeaf" containerName="extract-utilities" Nov 23 15:16:33 crc kubenswrapper[5050]: I1123 15:16:33.081978 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3180a72-1a07-4b2c-a699-24cf6223aeaf" containerName="extract-utilities" Nov 23 15:16:33 crc kubenswrapper[5050]: E1123 15:16:33.081996 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3180a72-1a07-4b2c-a699-24cf6223aeaf" containerName="extract-content" Nov 23 15:16:33 crc kubenswrapper[5050]: I1123 15:16:33.082008 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3180a72-1a07-4b2c-a699-24cf6223aeaf" containerName="extract-content" Nov 23 15:16:33 crc kubenswrapper[5050]: E1123 15:16:33.082148 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ed2c22f-466c-49bf-82a1-dc56374adc1c" containerName="registry-server" Nov 23 15:16:33 crc kubenswrapper[5050]: I1123 15:16:33.082166 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ed2c22f-466c-49bf-82a1-dc56374adc1c" containerName="registry-server" Nov 23 15:16:33 crc kubenswrapper[5050]: I1123 15:16:33.082616 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ed2c22f-466c-49bf-82a1-dc56374adc1c" containerName="registry-server" Nov 23 15:16:33 crc kubenswrapper[5050]: I1123 15:16:33.082681 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3180a72-1a07-4b2c-a699-24cf6223aeaf" containerName="registry-server" Nov 23 15:16:33 crc kubenswrapper[5050]: I1123 15:16:33.085130 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kvssd" Nov 23 15:16:33 crc kubenswrapper[5050]: I1123 15:16:33.111306 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kvssd"] Nov 23 15:16:33 crc kubenswrapper[5050]: I1123 15:16:33.165977 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58a76ce1-d583-44fe-b5f8-d503a7a98339-catalog-content\") pod \"redhat-operators-kvssd\" (UID: \"58a76ce1-d583-44fe-b5f8-d503a7a98339\") " pod="openshift-marketplace/redhat-operators-kvssd" Nov 23 15:16:33 crc kubenswrapper[5050]: I1123 15:16:33.166072 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dbbc\" (UniqueName: \"kubernetes.io/projected/58a76ce1-d583-44fe-b5f8-d503a7a98339-kube-api-access-8dbbc\") pod \"redhat-operators-kvssd\" (UID: \"58a76ce1-d583-44fe-b5f8-d503a7a98339\") " pod="openshift-marketplace/redhat-operators-kvssd" Nov 23 15:16:33 crc kubenswrapper[5050]: I1123 15:16:33.166221 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58a76ce1-d583-44fe-b5f8-d503a7a98339-utilities\") pod \"redhat-operators-kvssd\" (UID: \"58a76ce1-d583-44fe-b5f8-d503a7a98339\") " pod="openshift-marketplace/redhat-operators-kvssd" Nov 23 15:16:33 crc kubenswrapper[5050]: I1123 15:16:33.268641 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58a76ce1-d583-44fe-b5f8-d503a7a98339-utilities\") pod \"redhat-operators-kvssd\" (UID: \"58a76ce1-d583-44fe-b5f8-d503a7a98339\") " pod="openshift-marketplace/redhat-operators-kvssd" Nov 23 15:16:33 crc kubenswrapper[5050]: I1123 15:16:33.268816 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58a76ce1-d583-44fe-b5f8-d503a7a98339-catalog-content\") pod \"redhat-operators-kvssd\" (UID: \"58a76ce1-d583-44fe-b5f8-d503a7a98339\") " pod="openshift-marketplace/redhat-operators-kvssd" Nov 23 15:16:33 crc kubenswrapper[5050]: I1123 15:16:33.268867 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8dbbc\" (UniqueName: \"kubernetes.io/projected/58a76ce1-d583-44fe-b5f8-d503a7a98339-kube-api-access-8dbbc\") pod \"redhat-operators-kvssd\" (UID: \"58a76ce1-d583-44fe-b5f8-d503a7a98339\") " pod="openshift-marketplace/redhat-operators-kvssd" Nov 23 15:16:33 crc kubenswrapper[5050]: I1123 15:16:33.269641 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58a76ce1-d583-44fe-b5f8-d503a7a98339-utilities\") pod \"redhat-operators-kvssd\" (UID: \"58a76ce1-d583-44fe-b5f8-d503a7a98339\") " pod="openshift-marketplace/redhat-operators-kvssd" Nov 23 15:16:33 crc kubenswrapper[5050]: I1123 15:16:33.269675 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58a76ce1-d583-44fe-b5f8-d503a7a98339-catalog-content\") pod \"redhat-operators-kvssd\" (UID: \"58a76ce1-d583-44fe-b5f8-d503a7a98339\") " pod="openshift-marketplace/redhat-operators-kvssd" Nov 23 15:16:33 crc kubenswrapper[5050]: I1123 15:16:33.295396 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dbbc\" (UniqueName: \"kubernetes.io/projected/58a76ce1-d583-44fe-b5f8-d503a7a98339-kube-api-access-8dbbc\") pod \"redhat-operators-kvssd\" (UID: \"58a76ce1-d583-44fe-b5f8-d503a7a98339\") " pod="openshift-marketplace/redhat-operators-kvssd" Nov 23 15:16:33 crc kubenswrapper[5050]: I1123 15:16:33.444885 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kvssd" Nov 23 15:16:33 crc kubenswrapper[5050]: I1123 15:16:33.752041 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kvssd"] Nov 23 15:16:34 crc kubenswrapper[5050]: I1123 15:16:34.689825 5050 generic.go:334] "Generic (PLEG): container finished" podID="58a76ce1-d583-44fe-b5f8-d503a7a98339" containerID="1b0b5d8fb6aaa8c21a5a2f54096ecafda737aa877a75075c135ffeb4933a2283" exitCode=0 Nov 23 15:16:34 crc kubenswrapper[5050]: I1123 15:16:34.689903 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kvssd" event={"ID":"58a76ce1-d583-44fe-b5f8-d503a7a98339","Type":"ContainerDied","Data":"1b0b5d8fb6aaa8c21a5a2f54096ecafda737aa877a75075c135ffeb4933a2283"} Nov 23 15:16:34 crc kubenswrapper[5050]: I1123 15:16:34.690309 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kvssd" event={"ID":"58a76ce1-d583-44fe-b5f8-d503a7a98339","Type":"ContainerStarted","Data":"f5072f22a7b9f0d47e3002132c98e9fc703da5161d4f82c404656392a1072391"} Nov 23 15:16:35 crc kubenswrapper[5050]: I1123 15:16:35.700155 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kvssd" event={"ID":"58a76ce1-d583-44fe-b5f8-d503a7a98339","Type":"ContainerStarted","Data":"b9dda557ef547fb8245af8c916796e5b57a9acf69a1d453912801e00dde92646"} Nov 23 15:16:36 crc kubenswrapper[5050]: I1123 15:16:36.721943 5050 generic.go:334] "Generic (PLEG): container finished" podID="58a76ce1-d583-44fe-b5f8-d503a7a98339" containerID="b9dda557ef547fb8245af8c916796e5b57a9acf69a1d453912801e00dde92646" exitCode=0 Nov 23 15:16:36 crc kubenswrapper[5050]: I1123 15:16:36.722018 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kvssd" event={"ID":"58a76ce1-d583-44fe-b5f8-d503a7a98339","Type":"ContainerDied","Data":"b9dda557ef547fb8245af8c916796e5b57a9acf69a1d453912801e00dde92646"} Nov 23 15:16:37 crc kubenswrapper[5050]: I1123 15:16:37.741565 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kvssd" event={"ID":"58a76ce1-d583-44fe-b5f8-d503a7a98339","Type":"ContainerStarted","Data":"711726acb174cc3673b16e1333172d6f71199935a093b3ae14236ac40205f591"} Nov 23 15:16:37 crc kubenswrapper[5050]: I1123 15:16:37.775618 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-kvssd" podStartSLOduration=2.043093764 podStartE2EDuration="4.775590205s" podCreationTimestamp="2025-11-23 15:16:33 +0000 UTC" firstStartedPulling="2025-11-23 15:16:34.694604336 +0000 UTC m=+2089.861600861" lastFinishedPulling="2025-11-23 15:16:37.427100787 +0000 UTC m=+2092.594097302" observedRunningTime="2025-11-23 15:16:37.775314837 +0000 UTC m=+2092.942311322" watchObservedRunningTime="2025-11-23 15:16:37.775590205 +0000 UTC m=+2092.942586710" Nov 23 15:16:43 crc kubenswrapper[5050]: I1123 15:16:43.445231 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-kvssd" Nov 23 15:16:43 crc kubenswrapper[5050]: I1123 15:16:43.445658 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-kvssd" Nov 23 15:16:44 crc kubenswrapper[5050]: I1123 15:16:44.510196 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-kvssd" podUID="58a76ce1-d583-44fe-b5f8-d503a7a98339" containerName="registry-server" probeResult="failure" output=< Nov 23 15:16:44 crc kubenswrapper[5050]: timeout: failed to connect service ":50051" within 1s Nov 23 15:16:44 crc kubenswrapper[5050]: > Nov 23 15:16:53 crc kubenswrapper[5050]: I1123 15:16:53.545596 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-kvssd" Nov 23 15:16:53 crc kubenswrapper[5050]: I1123 15:16:53.618675 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-kvssd" Nov 23 15:16:53 crc kubenswrapper[5050]: I1123 15:16:53.800245 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kvssd"] Nov 23 15:16:54 crc kubenswrapper[5050]: I1123 15:16:54.913697 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-kvssd" podUID="58a76ce1-d583-44fe-b5f8-d503a7a98339" containerName="registry-server" containerID="cri-o://711726acb174cc3673b16e1333172d6f71199935a093b3ae14236ac40205f591" gracePeriod=2 Nov 23 15:16:55 crc kubenswrapper[5050]: I1123 15:16:55.333222 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kvssd" Nov 23 15:16:55 crc kubenswrapper[5050]: I1123 15:16:55.493396 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8dbbc\" (UniqueName: \"kubernetes.io/projected/58a76ce1-d583-44fe-b5f8-d503a7a98339-kube-api-access-8dbbc\") pod \"58a76ce1-d583-44fe-b5f8-d503a7a98339\" (UID: \"58a76ce1-d583-44fe-b5f8-d503a7a98339\") " Nov 23 15:16:55 crc kubenswrapper[5050]: I1123 15:16:55.494590 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58a76ce1-d583-44fe-b5f8-d503a7a98339-utilities\") pod \"58a76ce1-d583-44fe-b5f8-d503a7a98339\" (UID: \"58a76ce1-d583-44fe-b5f8-d503a7a98339\") " Nov 23 15:16:55 crc kubenswrapper[5050]: I1123 15:16:55.494717 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58a76ce1-d583-44fe-b5f8-d503a7a98339-catalog-content\") pod \"58a76ce1-d583-44fe-b5f8-d503a7a98339\" (UID: \"58a76ce1-d583-44fe-b5f8-d503a7a98339\") " Nov 23 15:16:55 crc kubenswrapper[5050]: I1123 15:16:55.495938 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58a76ce1-d583-44fe-b5f8-d503a7a98339-utilities" (OuterVolumeSpecName: "utilities") pod "58a76ce1-d583-44fe-b5f8-d503a7a98339" (UID: "58a76ce1-d583-44fe-b5f8-d503a7a98339"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:16:55 crc kubenswrapper[5050]: I1123 15:16:55.501471 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58a76ce1-d583-44fe-b5f8-d503a7a98339-kube-api-access-8dbbc" (OuterVolumeSpecName: "kube-api-access-8dbbc") pod "58a76ce1-d583-44fe-b5f8-d503a7a98339" (UID: "58a76ce1-d583-44fe-b5f8-d503a7a98339"). InnerVolumeSpecName "kube-api-access-8dbbc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:16:55 crc kubenswrapper[5050]: I1123 15:16:55.596935 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58a76ce1-d583-44fe-b5f8-d503a7a98339-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "58a76ce1-d583-44fe-b5f8-d503a7a98339" (UID: "58a76ce1-d583-44fe-b5f8-d503a7a98339"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:16:55 crc kubenswrapper[5050]: I1123 15:16:55.597423 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58a76ce1-d583-44fe-b5f8-d503a7a98339-catalog-content\") pod \"58a76ce1-d583-44fe-b5f8-d503a7a98339\" (UID: \"58a76ce1-d583-44fe-b5f8-d503a7a98339\") " Nov 23 15:16:55 crc kubenswrapper[5050]: W1123 15:16:55.597772 5050 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/58a76ce1-d583-44fe-b5f8-d503a7a98339/volumes/kubernetes.io~empty-dir/catalog-content Nov 23 15:16:55 crc kubenswrapper[5050]: I1123 15:16:55.597810 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58a76ce1-d583-44fe-b5f8-d503a7a98339-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "58a76ce1-d583-44fe-b5f8-d503a7a98339" (UID: "58a76ce1-d583-44fe-b5f8-d503a7a98339"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:16:55 crc kubenswrapper[5050]: I1123 15:16:55.598024 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58a76ce1-d583-44fe-b5f8-d503a7a98339-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 15:16:55 crc kubenswrapper[5050]: I1123 15:16:55.598053 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58a76ce1-d583-44fe-b5f8-d503a7a98339-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 15:16:55 crc kubenswrapper[5050]: I1123 15:16:55.598070 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8dbbc\" (UniqueName: \"kubernetes.io/projected/58a76ce1-d583-44fe-b5f8-d503a7a98339-kube-api-access-8dbbc\") on node \"crc\" DevicePath \"\"" Nov 23 15:16:55 crc kubenswrapper[5050]: I1123 15:16:55.930273 5050 generic.go:334] "Generic (PLEG): container finished" podID="58a76ce1-d583-44fe-b5f8-d503a7a98339" containerID="711726acb174cc3673b16e1333172d6f71199935a093b3ae14236ac40205f591" exitCode=0 Nov 23 15:16:55 crc kubenswrapper[5050]: I1123 15:16:55.930344 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kvssd" event={"ID":"58a76ce1-d583-44fe-b5f8-d503a7a98339","Type":"ContainerDied","Data":"711726acb174cc3673b16e1333172d6f71199935a093b3ae14236ac40205f591"} Nov 23 15:16:55 crc kubenswrapper[5050]: I1123 15:16:55.930398 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kvssd" event={"ID":"58a76ce1-d583-44fe-b5f8-d503a7a98339","Type":"ContainerDied","Data":"f5072f22a7b9f0d47e3002132c98e9fc703da5161d4f82c404656392a1072391"} Nov 23 15:16:55 crc kubenswrapper[5050]: I1123 15:16:55.930425 5050 scope.go:117] "RemoveContainer" containerID="711726acb174cc3673b16e1333172d6f71199935a093b3ae14236ac40205f591" Nov 23 15:16:55 crc kubenswrapper[5050]: I1123 15:16:55.930527 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kvssd" Nov 23 15:16:55 crc kubenswrapper[5050]: I1123 15:16:55.972483 5050 scope.go:117] "RemoveContainer" containerID="b9dda557ef547fb8245af8c916796e5b57a9acf69a1d453912801e00dde92646" Nov 23 15:16:55 crc kubenswrapper[5050]: I1123 15:16:55.973633 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kvssd"] Nov 23 15:16:55 crc kubenswrapper[5050]: I1123 15:16:55.978889 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-kvssd"] Nov 23 15:16:56 crc kubenswrapper[5050]: I1123 15:16:56.010726 5050 scope.go:117] "RemoveContainer" containerID="1b0b5d8fb6aaa8c21a5a2f54096ecafda737aa877a75075c135ffeb4933a2283" Nov 23 15:16:56 crc kubenswrapper[5050]: I1123 15:16:56.037981 5050 scope.go:117] "RemoveContainer" containerID="711726acb174cc3673b16e1333172d6f71199935a093b3ae14236ac40205f591" Nov 23 15:16:56 crc kubenswrapper[5050]: E1123 15:16:56.038488 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"711726acb174cc3673b16e1333172d6f71199935a093b3ae14236ac40205f591\": container with ID starting with 711726acb174cc3673b16e1333172d6f71199935a093b3ae14236ac40205f591 not found: ID does not exist" containerID="711726acb174cc3673b16e1333172d6f71199935a093b3ae14236ac40205f591" Nov 23 15:16:56 crc kubenswrapper[5050]: I1123 15:16:56.038533 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"711726acb174cc3673b16e1333172d6f71199935a093b3ae14236ac40205f591"} err="failed to get container status \"711726acb174cc3673b16e1333172d6f71199935a093b3ae14236ac40205f591\": rpc error: code = NotFound desc = could not find container \"711726acb174cc3673b16e1333172d6f71199935a093b3ae14236ac40205f591\": container with ID starting with 711726acb174cc3673b16e1333172d6f71199935a093b3ae14236ac40205f591 not found: ID does not exist" Nov 23 15:16:56 crc kubenswrapper[5050]: I1123 15:16:56.038592 5050 scope.go:117] "RemoveContainer" containerID="b9dda557ef547fb8245af8c916796e5b57a9acf69a1d453912801e00dde92646" Nov 23 15:16:56 crc kubenswrapper[5050]: E1123 15:16:56.039151 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b9dda557ef547fb8245af8c916796e5b57a9acf69a1d453912801e00dde92646\": container with ID starting with b9dda557ef547fb8245af8c916796e5b57a9acf69a1d453912801e00dde92646 not found: ID does not exist" containerID="b9dda557ef547fb8245af8c916796e5b57a9acf69a1d453912801e00dde92646" Nov 23 15:16:56 crc kubenswrapper[5050]: I1123 15:16:56.039185 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b9dda557ef547fb8245af8c916796e5b57a9acf69a1d453912801e00dde92646"} err="failed to get container status \"b9dda557ef547fb8245af8c916796e5b57a9acf69a1d453912801e00dde92646\": rpc error: code = NotFound desc = could not find container \"b9dda557ef547fb8245af8c916796e5b57a9acf69a1d453912801e00dde92646\": container with ID starting with b9dda557ef547fb8245af8c916796e5b57a9acf69a1d453912801e00dde92646 not found: ID does not exist" Nov 23 15:16:56 crc kubenswrapper[5050]: I1123 15:16:56.039207 5050 scope.go:117] "RemoveContainer" containerID="1b0b5d8fb6aaa8c21a5a2f54096ecafda737aa877a75075c135ffeb4933a2283" Nov 23 15:16:56 crc kubenswrapper[5050]: E1123 15:16:56.039780 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b0b5d8fb6aaa8c21a5a2f54096ecafda737aa877a75075c135ffeb4933a2283\": container with ID starting with 1b0b5d8fb6aaa8c21a5a2f54096ecafda737aa877a75075c135ffeb4933a2283 not found: ID does not exist" containerID="1b0b5d8fb6aaa8c21a5a2f54096ecafda737aa877a75075c135ffeb4933a2283" Nov 23 15:16:56 crc kubenswrapper[5050]: I1123 15:16:56.039881 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b0b5d8fb6aaa8c21a5a2f54096ecafda737aa877a75075c135ffeb4933a2283"} err="failed to get container status \"1b0b5d8fb6aaa8c21a5a2f54096ecafda737aa877a75075c135ffeb4933a2283\": rpc error: code = NotFound desc = could not find container \"1b0b5d8fb6aaa8c21a5a2f54096ecafda737aa877a75075c135ffeb4933a2283\": container with ID starting with 1b0b5d8fb6aaa8c21a5a2f54096ecafda737aa877a75075c135ffeb4933a2283 not found: ID does not exist" Nov 23 15:16:57 crc kubenswrapper[5050]: I1123 15:16:57.562054 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58a76ce1-d583-44fe-b5f8-d503a7a98339" path="/var/lib/kubelet/pods/58a76ce1-d583-44fe-b5f8-d503a7a98339/volumes" Nov 23 15:16:59 crc kubenswrapper[5050]: I1123 15:16:59.224945 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:16:59 crc kubenswrapper[5050]: I1123 15:16:59.225016 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:17:29 crc kubenswrapper[5050]: I1123 15:17:29.224419 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:17:29 crc kubenswrapper[5050]: I1123 15:17:29.225185 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:17:29 crc kubenswrapper[5050]: I1123 15:17:29.225244 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 15:17:29 crc kubenswrapper[5050]: I1123 15:17:29.226180 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 15:17:29 crc kubenswrapper[5050]: I1123 15:17:29.226255 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" gracePeriod=600 Nov 23 15:17:29 crc kubenswrapper[5050]: E1123 15:17:29.354281 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:17:30 crc kubenswrapper[5050]: I1123 15:17:30.249695 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" exitCode=0 Nov 23 15:17:30 crc kubenswrapper[5050]: I1123 15:17:30.249747 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920"} Nov 23 15:17:30 crc kubenswrapper[5050]: I1123 15:17:30.249801 5050 scope.go:117] "RemoveContainer" containerID="cb2ad25ebcc9082f315d032ed9abac6dfeae4ca432f56331b82c83eec7510b50" Nov 23 15:17:30 crc kubenswrapper[5050]: I1123 15:17:30.250838 5050 scope.go:117] "RemoveContainer" containerID="bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" Nov 23 15:17:30 crc kubenswrapper[5050]: E1123 15:17:30.251376 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:17:44 crc kubenswrapper[5050]: I1123 15:17:44.549097 5050 scope.go:117] "RemoveContainer" containerID="bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" Nov 23 15:17:44 crc kubenswrapper[5050]: E1123 15:17:44.550117 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:17:56 crc kubenswrapper[5050]: I1123 15:17:56.548824 5050 scope.go:117] "RemoveContainer" containerID="bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" Nov 23 15:17:56 crc kubenswrapper[5050]: E1123 15:17:56.549967 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:18:07 crc kubenswrapper[5050]: I1123 15:18:07.548427 5050 scope.go:117] "RemoveContainer" containerID="bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" Nov 23 15:18:07 crc kubenswrapper[5050]: E1123 15:18:07.549329 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:18:21 crc kubenswrapper[5050]: I1123 15:18:21.549085 5050 scope.go:117] "RemoveContainer" containerID="bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" Nov 23 15:18:21 crc kubenswrapper[5050]: E1123 15:18:21.550152 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:18:33 crc kubenswrapper[5050]: I1123 15:18:33.549430 5050 scope.go:117] "RemoveContainer" containerID="bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" Nov 23 15:18:33 crc kubenswrapper[5050]: E1123 15:18:33.550549 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:18:47 crc kubenswrapper[5050]: I1123 15:18:47.549960 5050 scope.go:117] "RemoveContainer" containerID="bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" Nov 23 15:18:47 crc kubenswrapper[5050]: E1123 15:18:47.551555 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:19:00 crc kubenswrapper[5050]: I1123 15:19:00.549191 5050 scope.go:117] "RemoveContainer" containerID="bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" Nov 23 15:19:00 crc kubenswrapper[5050]: E1123 15:19:00.550535 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:19:12 crc kubenswrapper[5050]: I1123 15:19:12.549140 5050 scope.go:117] "RemoveContainer" containerID="bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" Nov 23 15:19:12 crc kubenswrapper[5050]: E1123 15:19:12.553107 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:19:25 crc kubenswrapper[5050]: I1123 15:19:25.553042 5050 scope.go:117] "RemoveContainer" containerID="bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" Nov 23 15:19:25 crc kubenswrapper[5050]: E1123 15:19:25.554077 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:19:38 crc kubenswrapper[5050]: I1123 15:19:38.548920 5050 scope.go:117] "RemoveContainer" containerID="bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" Nov 23 15:19:38 crc kubenswrapper[5050]: E1123 15:19:38.550011 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:19:50 crc kubenswrapper[5050]: I1123 15:19:50.548608 5050 scope.go:117] "RemoveContainer" containerID="bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" Nov 23 15:19:50 crc kubenswrapper[5050]: E1123 15:19:50.549606 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:20:05 crc kubenswrapper[5050]: I1123 15:20:05.559636 5050 scope.go:117] "RemoveContainer" containerID="bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" Nov 23 15:20:05 crc kubenswrapper[5050]: E1123 15:20:05.561511 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:20:17 crc kubenswrapper[5050]: I1123 15:20:17.549411 5050 scope.go:117] "RemoveContainer" containerID="bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" Nov 23 15:20:17 crc kubenswrapper[5050]: E1123 15:20:17.551059 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:20:28 crc kubenswrapper[5050]: I1123 15:20:28.548685 5050 scope.go:117] "RemoveContainer" containerID="bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" Nov 23 15:20:28 crc kubenswrapper[5050]: E1123 15:20:28.549947 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:20:39 crc kubenswrapper[5050]: I1123 15:20:39.548759 5050 scope.go:117] "RemoveContainer" containerID="bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" Nov 23 15:20:39 crc kubenswrapper[5050]: E1123 15:20:39.549782 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:20:51 crc kubenswrapper[5050]: I1123 15:20:51.549877 5050 scope.go:117] "RemoveContainer" containerID="bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" Nov 23 15:20:51 crc kubenswrapper[5050]: E1123 15:20:51.553954 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:21:05 crc kubenswrapper[5050]: I1123 15:21:05.554536 5050 scope.go:117] "RemoveContainer" containerID="bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" Nov 23 15:21:05 crc kubenswrapper[5050]: E1123 15:21:05.555663 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:21:18 crc kubenswrapper[5050]: I1123 15:21:18.549003 5050 scope.go:117] "RemoveContainer" containerID="bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" Nov 23 15:21:18 crc kubenswrapper[5050]: E1123 15:21:18.550611 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:21:30 crc kubenswrapper[5050]: I1123 15:21:30.548724 5050 scope.go:117] "RemoveContainer" containerID="bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" Nov 23 15:21:30 crc kubenswrapper[5050]: E1123 15:21:30.549960 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:21:41 crc kubenswrapper[5050]: I1123 15:21:41.550849 5050 scope.go:117] "RemoveContainer" containerID="bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" Nov 23 15:21:41 crc kubenswrapper[5050]: E1123 15:21:41.555695 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:21:56 crc kubenswrapper[5050]: I1123 15:21:56.548851 5050 scope.go:117] "RemoveContainer" containerID="bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" Nov 23 15:21:56 crc kubenswrapper[5050]: E1123 15:21:56.550596 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:22:07 crc kubenswrapper[5050]: I1123 15:22:07.548676 5050 scope.go:117] "RemoveContainer" containerID="bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" Nov 23 15:22:07 crc kubenswrapper[5050]: E1123 15:22:07.549884 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:22:20 crc kubenswrapper[5050]: I1123 15:22:20.548047 5050 scope.go:117] "RemoveContainer" containerID="bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" Nov 23 15:22:20 crc kubenswrapper[5050]: E1123 15:22:20.549033 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:22:33 crc kubenswrapper[5050]: I1123 15:22:33.549348 5050 scope.go:117] "RemoveContainer" containerID="bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" Nov 23 15:22:34 crc kubenswrapper[5050]: I1123 15:22:34.717358 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"8f669cdd1f43caedab92fcdf6aa597a61496916e12c9b8f8acf25da546e84e8b"} Nov 23 15:24:59 crc kubenswrapper[5050]: I1123 15:24:59.224010 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:24:59 crc kubenswrapper[5050]: I1123 15:24:59.225889 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:25:29 crc kubenswrapper[5050]: I1123 15:25:29.224907 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:25:29 crc kubenswrapper[5050]: I1123 15:25:29.225822 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:25:29 crc kubenswrapper[5050]: I1123 15:25:29.918681 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-fnj9j"] Nov 23 15:25:29 crc kubenswrapper[5050]: E1123 15:25:29.919199 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58a76ce1-d583-44fe-b5f8-d503a7a98339" containerName="extract-content" Nov 23 15:25:29 crc kubenswrapper[5050]: I1123 15:25:29.919217 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="58a76ce1-d583-44fe-b5f8-d503a7a98339" containerName="extract-content" Nov 23 15:25:29 crc kubenswrapper[5050]: E1123 15:25:29.919245 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58a76ce1-d583-44fe-b5f8-d503a7a98339" containerName="extract-utilities" Nov 23 15:25:29 crc kubenswrapper[5050]: I1123 15:25:29.919256 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="58a76ce1-d583-44fe-b5f8-d503a7a98339" containerName="extract-utilities" Nov 23 15:25:29 crc kubenswrapper[5050]: E1123 15:25:29.919274 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58a76ce1-d583-44fe-b5f8-d503a7a98339" containerName="registry-server" Nov 23 15:25:29 crc kubenswrapper[5050]: I1123 15:25:29.919283 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="58a76ce1-d583-44fe-b5f8-d503a7a98339" containerName="registry-server" Nov 23 15:25:29 crc kubenswrapper[5050]: I1123 15:25:29.919479 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="58a76ce1-d583-44fe-b5f8-d503a7a98339" containerName="registry-server" Nov 23 15:25:29 crc kubenswrapper[5050]: I1123 15:25:29.920808 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fnj9j" Nov 23 15:25:29 crc kubenswrapper[5050]: I1123 15:25:29.931075 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fnj9j"] Nov 23 15:25:30 crc kubenswrapper[5050]: I1123 15:25:30.011147 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c06c2e7-bc3f-4586-ab22-9aecbf25c09c-catalog-content\") pod \"community-operators-fnj9j\" (UID: \"6c06c2e7-bc3f-4586-ab22-9aecbf25c09c\") " pod="openshift-marketplace/community-operators-fnj9j" Nov 23 15:25:30 crc kubenswrapper[5050]: I1123 15:25:30.011535 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbq96\" (UniqueName: \"kubernetes.io/projected/6c06c2e7-bc3f-4586-ab22-9aecbf25c09c-kube-api-access-rbq96\") pod \"community-operators-fnj9j\" (UID: \"6c06c2e7-bc3f-4586-ab22-9aecbf25c09c\") " pod="openshift-marketplace/community-operators-fnj9j" Nov 23 15:25:30 crc kubenswrapper[5050]: I1123 15:25:30.016968 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c06c2e7-bc3f-4586-ab22-9aecbf25c09c-utilities\") pod \"community-operators-fnj9j\" (UID: \"6c06c2e7-bc3f-4586-ab22-9aecbf25c09c\") " pod="openshift-marketplace/community-operators-fnj9j" Nov 23 15:25:30 crc kubenswrapper[5050]: I1123 15:25:30.119507 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c06c2e7-bc3f-4586-ab22-9aecbf25c09c-utilities\") pod \"community-operators-fnj9j\" (UID: \"6c06c2e7-bc3f-4586-ab22-9aecbf25c09c\") " pod="openshift-marketplace/community-operators-fnj9j" Nov 23 15:25:30 crc kubenswrapper[5050]: I1123 15:25:30.119593 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c06c2e7-bc3f-4586-ab22-9aecbf25c09c-catalog-content\") pod \"community-operators-fnj9j\" (UID: \"6c06c2e7-bc3f-4586-ab22-9aecbf25c09c\") " pod="openshift-marketplace/community-operators-fnj9j" Nov 23 15:25:30 crc kubenswrapper[5050]: I1123 15:25:30.119642 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbq96\" (UniqueName: \"kubernetes.io/projected/6c06c2e7-bc3f-4586-ab22-9aecbf25c09c-kube-api-access-rbq96\") pod \"community-operators-fnj9j\" (UID: \"6c06c2e7-bc3f-4586-ab22-9aecbf25c09c\") " pod="openshift-marketplace/community-operators-fnj9j" Nov 23 15:25:30 crc kubenswrapper[5050]: I1123 15:25:30.120272 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c06c2e7-bc3f-4586-ab22-9aecbf25c09c-utilities\") pod \"community-operators-fnj9j\" (UID: \"6c06c2e7-bc3f-4586-ab22-9aecbf25c09c\") " pod="openshift-marketplace/community-operators-fnj9j" Nov 23 15:25:30 crc kubenswrapper[5050]: I1123 15:25:30.120274 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c06c2e7-bc3f-4586-ab22-9aecbf25c09c-catalog-content\") pod \"community-operators-fnj9j\" (UID: \"6c06c2e7-bc3f-4586-ab22-9aecbf25c09c\") " pod="openshift-marketplace/community-operators-fnj9j" Nov 23 15:25:30 crc kubenswrapper[5050]: I1123 15:25:30.158325 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbq96\" (UniqueName: \"kubernetes.io/projected/6c06c2e7-bc3f-4586-ab22-9aecbf25c09c-kube-api-access-rbq96\") pod \"community-operators-fnj9j\" (UID: \"6c06c2e7-bc3f-4586-ab22-9aecbf25c09c\") " pod="openshift-marketplace/community-operators-fnj9j" Nov 23 15:25:30 crc kubenswrapper[5050]: I1123 15:25:30.319417 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fnj9j" Nov 23 15:25:30 crc kubenswrapper[5050]: I1123 15:25:30.743827 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fnj9j"] Nov 23 15:25:31 crc kubenswrapper[5050]: I1123 15:25:31.693641 5050 generic.go:334] "Generic (PLEG): container finished" podID="6c06c2e7-bc3f-4586-ab22-9aecbf25c09c" containerID="9770755555d42f15d44767b0960a9afc06ec9f517aa2f871ac1d32d04badb38e" exitCode=0 Nov 23 15:25:31 crc kubenswrapper[5050]: I1123 15:25:31.693776 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fnj9j" event={"ID":"6c06c2e7-bc3f-4586-ab22-9aecbf25c09c","Type":"ContainerDied","Data":"9770755555d42f15d44767b0960a9afc06ec9f517aa2f871ac1d32d04badb38e"} Nov 23 15:25:31 crc kubenswrapper[5050]: I1123 15:25:31.694300 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fnj9j" event={"ID":"6c06c2e7-bc3f-4586-ab22-9aecbf25c09c","Type":"ContainerStarted","Data":"b257b15e5e3427c3d653572761ef1b4caa08d835c766af1a8c71d70f22b232cb"} Nov 23 15:25:31 crc kubenswrapper[5050]: I1123 15:25:31.700016 5050 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 23 15:25:32 crc kubenswrapper[5050]: I1123 15:25:32.708730 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fnj9j" event={"ID":"6c06c2e7-bc3f-4586-ab22-9aecbf25c09c","Type":"ContainerStarted","Data":"19edb0cc1646686349ee0757af9c5cff658764f7dd37bf715a287687b67ac690"} Nov 23 15:25:33 crc kubenswrapper[5050]: I1123 15:25:33.718120 5050 generic.go:334] "Generic (PLEG): container finished" podID="6c06c2e7-bc3f-4586-ab22-9aecbf25c09c" containerID="19edb0cc1646686349ee0757af9c5cff658764f7dd37bf715a287687b67ac690" exitCode=0 Nov 23 15:25:33 crc kubenswrapper[5050]: I1123 15:25:33.718183 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fnj9j" event={"ID":"6c06c2e7-bc3f-4586-ab22-9aecbf25c09c","Type":"ContainerDied","Data":"19edb0cc1646686349ee0757af9c5cff658764f7dd37bf715a287687b67ac690"} Nov 23 15:25:34 crc kubenswrapper[5050]: I1123 15:25:34.732795 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fnj9j" event={"ID":"6c06c2e7-bc3f-4586-ab22-9aecbf25c09c","Type":"ContainerStarted","Data":"8c763df513cfec06790d7757245f7f374f82b023e97ee3315f45a1a55b432dfa"} Nov 23 15:25:34 crc kubenswrapper[5050]: I1123 15:25:34.761018 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-fnj9j" podStartSLOduration=3.304473534 podStartE2EDuration="5.760997738s" podCreationTimestamp="2025-11-23 15:25:29 +0000 UTC" firstStartedPulling="2025-11-23 15:25:31.699530254 +0000 UTC m=+2626.866526769" lastFinishedPulling="2025-11-23 15:25:34.156054458 +0000 UTC m=+2629.323050973" observedRunningTime="2025-11-23 15:25:34.758742354 +0000 UTC m=+2629.925738869" watchObservedRunningTime="2025-11-23 15:25:34.760997738 +0000 UTC m=+2629.927994223" Nov 23 15:25:40 crc kubenswrapper[5050]: I1123 15:25:40.319860 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-fnj9j" Nov 23 15:25:40 crc kubenswrapper[5050]: I1123 15:25:40.320675 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-fnj9j" Nov 23 15:25:40 crc kubenswrapper[5050]: I1123 15:25:40.378036 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-fnj9j" Nov 23 15:25:40 crc kubenswrapper[5050]: I1123 15:25:40.878774 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-fnj9j" Nov 23 15:25:40 crc kubenswrapper[5050]: I1123 15:25:40.995057 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fnj9j"] Nov 23 15:25:42 crc kubenswrapper[5050]: I1123 15:25:42.813156 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-fnj9j" podUID="6c06c2e7-bc3f-4586-ab22-9aecbf25c09c" containerName="registry-server" containerID="cri-o://8c763df513cfec06790d7757245f7f374f82b023e97ee3315f45a1a55b432dfa" gracePeriod=2 Nov 23 15:25:43 crc kubenswrapper[5050]: I1123 15:25:43.825511 5050 generic.go:334] "Generic (PLEG): container finished" podID="6c06c2e7-bc3f-4586-ab22-9aecbf25c09c" containerID="8c763df513cfec06790d7757245f7f374f82b023e97ee3315f45a1a55b432dfa" exitCode=0 Nov 23 15:25:43 crc kubenswrapper[5050]: I1123 15:25:43.825601 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fnj9j" event={"ID":"6c06c2e7-bc3f-4586-ab22-9aecbf25c09c","Type":"ContainerDied","Data":"8c763df513cfec06790d7757245f7f374f82b023e97ee3315f45a1a55b432dfa"} Nov 23 15:25:43 crc kubenswrapper[5050]: I1123 15:25:43.825924 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fnj9j" event={"ID":"6c06c2e7-bc3f-4586-ab22-9aecbf25c09c","Type":"ContainerDied","Data":"b257b15e5e3427c3d653572761ef1b4caa08d835c766af1a8c71d70f22b232cb"} Nov 23 15:25:43 crc kubenswrapper[5050]: I1123 15:25:43.825944 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b257b15e5e3427c3d653572761ef1b4caa08d835c766af1a8c71d70f22b232cb" Nov 23 15:25:43 crc kubenswrapper[5050]: I1123 15:25:43.849863 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fnj9j" Nov 23 15:25:43 crc kubenswrapper[5050]: I1123 15:25:43.993277 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c06c2e7-bc3f-4586-ab22-9aecbf25c09c-utilities\") pod \"6c06c2e7-bc3f-4586-ab22-9aecbf25c09c\" (UID: \"6c06c2e7-bc3f-4586-ab22-9aecbf25c09c\") " Nov 23 15:25:43 crc kubenswrapper[5050]: I1123 15:25:43.993370 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c06c2e7-bc3f-4586-ab22-9aecbf25c09c-catalog-content\") pod \"6c06c2e7-bc3f-4586-ab22-9aecbf25c09c\" (UID: \"6c06c2e7-bc3f-4586-ab22-9aecbf25c09c\") " Nov 23 15:25:43 crc kubenswrapper[5050]: I1123 15:25:43.993411 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rbq96\" (UniqueName: \"kubernetes.io/projected/6c06c2e7-bc3f-4586-ab22-9aecbf25c09c-kube-api-access-rbq96\") pod \"6c06c2e7-bc3f-4586-ab22-9aecbf25c09c\" (UID: \"6c06c2e7-bc3f-4586-ab22-9aecbf25c09c\") " Nov 23 15:25:43 crc kubenswrapper[5050]: I1123 15:25:43.995098 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c06c2e7-bc3f-4586-ab22-9aecbf25c09c-utilities" (OuterVolumeSpecName: "utilities") pod "6c06c2e7-bc3f-4586-ab22-9aecbf25c09c" (UID: "6c06c2e7-bc3f-4586-ab22-9aecbf25c09c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:25:44 crc kubenswrapper[5050]: I1123 15:25:44.002436 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c06c2e7-bc3f-4586-ab22-9aecbf25c09c-kube-api-access-rbq96" (OuterVolumeSpecName: "kube-api-access-rbq96") pod "6c06c2e7-bc3f-4586-ab22-9aecbf25c09c" (UID: "6c06c2e7-bc3f-4586-ab22-9aecbf25c09c"). InnerVolumeSpecName "kube-api-access-rbq96". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:25:44 crc kubenswrapper[5050]: I1123 15:25:44.065713 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c06c2e7-bc3f-4586-ab22-9aecbf25c09c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6c06c2e7-bc3f-4586-ab22-9aecbf25c09c" (UID: "6c06c2e7-bc3f-4586-ab22-9aecbf25c09c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:25:44 crc kubenswrapper[5050]: I1123 15:25:44.095977 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c06c2e7-bc3f-4586-ab22-9aecbf25c09c-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 15:25:44 crc kubenswrapper[5050]: I1123 15:25:44.096303 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c06c2e7-bc3f-4586-ab22-9aecbf25c09c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 15:25:44 crc kubenswrapper[5050]: I1123 15:25:44.096421 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rbq96\" (UniqueName: \"kubernetes.io/projected/6c06c2e7-bc3f-4586-ab22-9aecbf25c09c-kube-api-access-rbq96\") on node \"crc\" DevicePath \"\"" Nov 23 15:25:44 crc kubenswrapper[5050]: I1123 15:25:44.836496 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fnj9j" Nov 23 15:25:44 crc kubenswrapper[5050]: I1123 15:25:44.882138 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fnj9j"] Nov 23 15:25:44 crc kubenswrapper[5050]: I1123 15:25:44.889894 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-fnj9j"] Nov 23 15:25:45 crc kubenswrapper[5050]: I1123 15:25:45.568320 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c06c2e7-bc3f-4586-ab22-9aecbf25c09c" path="/var/lib/kubelet/pods/6c06c2e7-bc3f-4586-ab22-9aecbf25c09c/volumes" Nov 23 15:25:59 crc kubenswrapper[5050]: I1123 15:25:59.224707 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:25:59 crc kubenswrapper[5050]: I1123 15:25:59.225764 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:25:59 crc kubenswrapper[5050]: I1123 15:25:59.225860 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 15:25:59 crc kubenswrapper[5050]: I1123 15:25:59.227319 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8f669cdd1f43caedab92fcdf6aa597a61496916e12c9b8f8acf25da546e84e8b"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 15:25:59 crc kubenswrapper[5050]: I1123 15:25:59.227490 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://8f669cdd1f43caedab92fcdf6aa597a61496916e12c9b8f8acf25da546e84e8b" gracePeriod=600 Nov 23 15:26:00 crc kubenswrapper[5050]: I1123 15:26:00.003513 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="8f669cdd1f43caedab92fcdf6aa597a61496916e12c9b8f8acf25da546e84e8b" exitCode=0 Nov 23 15:26:00 crc kubenswrapper[5050]: I1123 15:26:00.003595 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"8f669cdd1f43caedab92fcdf6aa597a61496916e12c9b8f8acf25da546e84e8b"} Nov 23 15:26:00 crc kubenswrapper[5050]: I1123 15:26:00.004086 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1"} Nov 23 15:26:00 crc kubenswrapper[5050]: I1123 15:26:00.004122 5050 scope.go:117] "RemoveContainer" containerID="bdd40c88519a2cc1e873e7266b0787007822918051c94cd3f1fdfcfacf25d920" Nov 23 15:26:21 crc kubenswrapper[5050]: I1123 15:26:21.723164 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mvnrn"] Nov 23 15:26:21 crc kubenswrapper[5050]: E1123 15:26:21.724502 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c06c2e7-bc3f-4586-ab22-9aecbf25c09c" containerName="registry-server" Nov 23 15:26:21 crc kubenswrapper[5050]: I1123 15:26:21.724528 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c06c2e7-bc3f-4586-ab22-9aecbf25c09c" containerName="registry-server" Nov 23 15:26:21 crc kubenswrapper[5050]: E1123 15:26:21.724569 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c06c2e7-bc3f-4586-ab22-9aecbf25c09c" containerName="extract-content" Nov 23 15:26:21 crc kubenswrapper[5050]: I1123 15:26:21.724581 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c06c2e7-bc3f-4586-ab22-9aecbf25c09c" containerName="extract-content" Nov 23 15:26:21 crc kubenswrapper[5050]: E1123 15:26:21.724624 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c06c2e7-bc3f-4586-ab22-9aecbf25c09c" containerName="extract-utilities" Nov 23 15:26:21 crc kubenswrapper[5050]: I1123 15:26:21.724636 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c06c2e7-bc3f-4586-ab22-9aecbf25c09c" containerName="extract-utilities" Nov 23 15:26:21 crc kubenswrapper[5050]: I1123 15:26:21.724880 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c06c2e7-bc3f-4586-ab22-9aecbf25c09c" containerName="registry-server" Nov 23 15:26:21 crc kubenswrapper[5050]: I1123 15:26:21.726679 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mvnrn" Nov 23 15:26:21 crc kubenswrapper[5050]: I1123 15:26:21.734115 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mvnrn"] Nov 23 15:26:21 crc kubenswrapper[5050]: I1123 15:26:21.745160 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51b2e9d2-da97-4213-8069-e608fb001b3c-utilities\") pod \"redhat-marketplace-mvnrn\" (UID: \"51b2e9d2-da97-4213-8069-e608fb001b3c\") " pod="openshift-marketplace/redhat-marketplace-mvnrn" Nov 23 15:26:21 crc kubenswrapper[5050]: I1123 15:26:21.745266 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51b2e9d2-da97-4213-8069-e608fb001b3c-catalog-content\") pod \"redhat-marketplace-mvnrn\" (UID: \"51b2e9d2-da97-4213-8069-e608fb001b3c\") " pod="openshift-marketplace/redhat-marketplace-mvnrn" Nov 23 15:26:21 crc kubenswrapper[5050]: I1123 15:26:21.745328 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qdb64\" (UniqueName: \"kubernetes.io/projected/51b2e9d2-da97-4213-8069-e608fb001b3c-kube-api-access-qdb64\") pod \"redhat-marketplace-mvnrn\" (UID: \"51b2e9d2-da97-4213-8069-e608fb001b3c\") " pod="openshift-marketplace/redhat-marketplace-mvnrn" Nov 23 15:26:21 crc kubenswrapper[5050]: I1123 15:26:21.847678 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51b2e9d2-da97-4213-8069-e608fb001b3c-utilities\") pod \"redhat-marketplace-mvnrn\" (UID: \"51b2e9d2-da97-4213-8069-e608fb001b3c\") " pod="openshift-marketplace/redhat-marketplace-mvnrn" Nov 23 15:26:21 crc kubenswrapper[5050]: I1123 15:26:21.847777 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51b2e9d2-da97-4213-8069-e608fb001b3c-catalog-content\") pod \"redhat-marketplace-mvnrn\" (UID: \"51b2e9d2-da97-4213-8069-e608fb001b3c\") " pod="openshift-marketplace/redhat-marketplace-mvnrn" Nov 23 15:26:21 crc kubenswrapper[5050]: I1123 15:26:21.847843 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qdb64\" (UniqueName: \"kubernetes.io/projected/51b2e9d2-da97-4213-8069-e608fb001b3c-kube-api-access-qdb64\") pod \"redhat-marketplace-mvnrn\" (UID: \"51b2e9d2-da97-4213-8069-e608fb001b3c\") " pod="openshift-marketplace/redhat-marketplace-mvnrn" Nov 23 15:26:21 crc kubenswrapper[5050]: I1123 15:26:21.848478 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51b2e9d2-da97-4213-8069-e608fb001b3c-catalog-content\") pod \"redhat-marketplace-mvnrn\" (UID: \"51b2e9d2-da97-4213-8069-e608fb001b3c\") " pod="openshift-marketplace/redhat-marketplace-mvnrn" Nov 23 15:26:21 crc kubenswrapper[5050]: I1123 15:26:21.848569 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51b2e9d2-da97-4213-8069-e608fb001b3c-utilities\") pod \"redhat-marketplace-mvnrn\" (UID: \"51b2e9d2-da97-4213-8069-e608fb001b3c\") " pod="openshift-marketplace/redhat-marketplace-mvnrn" Nov 23 15:26:21 crc kubenswrapper[5050]: I1123 15:26:21.876210 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qdb64\" (UniqueName: \"kubernetes.io/projected/51b2e9d2-da97-4213-8069-e608fb001b3c-kube-api-access-qdb64\") pod \"redhat-marketplace-mvnrn\" (UID: \"51b2e9d2-da97-4213-8069-e608fb001b3c\") " pod="openshift-marketplace/redhat-marketplace-mvnrn" Nov 23 15:26:22 crc kubenswrapper[5050]: I1123 15:26:22.064237 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mvnrn" Nov 23 15:26:22 crc kubenswrapper[5050]: I1123 15:26:22.308774 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mvnrn"] Nov 23 15:26:22 crc kubenswrapper[5050]: I1123 15:26:22.728686 5050 generic.go:334] "Generic (PLEG): container finished" podID="51b2e9d2-da97-4213-8069-e608fb001b3c" containerID="ce2839644508b3f048ef7bf64ab8375bdb113a9d68096b3cd72cbaf2e093f04a" exitCode=0 Nov 23 15:26:22 crc kubenswrapper[5050]: I1123 15:26:22.728793 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mvnrn" event={"ID":"51b2e9d2-da97-4213-8069-e608fb001b3c","Type":"ContainerDied","Data":"ce2839644508b3f048ef7bf64ab8375bdb113a9d68096b3cd72cbaf2e093f04a"} Nov 23 15:26:22 crc kubenswrapper[5050]: I1123 15:26:22.728841 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mvnrn" event={"ID":"51b2e9d2-da97-4213-8069-e608fb001b3c","Type":"ContainerStarted","Data":"a91014d56a46846b5062be202d693280870dd2f2061ea43fdb33265509601a20"} Nov 23 15:26:23 crc kubenswrapper[5050]: I1123 15:26:23.738913 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mvnrn" event={"ID":"51b2e9d2-da97-4213-8069-e608fb001b3c","Type":"ContainerStarted","Data":"318b0bf52efb3e54d3080ec958ef42afd3e53484e06b2f5364ebd32540e6c951"} Nov 23 15:26:24 crc kubenswrapper[5050]: I1123 15:26:24.752171 5050 generic.go:334] "Generic (PLEG): container finished" podID="51b2e9d2-da97-4213-8069-e608fb001b3c" containerID="318b0bf52efb3e54d3080ec958ef42afd3e53484e06b2f5364ebd32540e6c951" exitCode=0 Nov 23 15:26:24 crc kubenswrapper[5050]: I1123 15:26:24.752235 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mvnrn" event={"ID":"51b2e9d2-da97-4213-8069-e608fb001b3c","Type":"ContainerDied","Data":"318b0bf52efb3e54d3080ec958ef42afd3e53484e06b2f5364ebd32540e6c951"} Nov 23 15:26:25 crc kubenswrapper[5050]: I1123 15:26:25.764814 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mvnrn" event={"ID":"51b2e9d2-da97-4213-8069-e608fb001b3c","Type":"ContainerStarted","Data":"e9c32e1dee63e1d9306460cf90ab3941bb0db863d2a16accdc1e48ba79ce8857"} Nov 23 15:26:32 crc kubenswrapper[5050]: I1123 15:26:32.064807 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mvnrn" Nov 23 15:26:32 crc kubenswrapper[5050]: I1123 15:26:32.065859 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mvnrn" Nov 23 15:26:32 crc kubenswrapper[5050]: I1123 15:26:32.151535 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mvnrn" Nov 23 15:26:32 crc kubenswrapper[5050]: I1123 15:26:32.183605 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mvnrn" podStartSLOduration=8.744502101 podStartE2EDuration="11.183575336s" podCreationTimestamp="2025-11-23 15:26:21 +0000 UTC" firstStartedPulling="2025-11-23 15:26:22.730919442 +0000 UTC m=+2677.897915927" lastFinishedPulling="2025-11-23 15:26:25.169992687 +0000 UTC m=+2680.336989162" observedRunningTime="2025-11-23 15:26:25.815035628 +0000 UTC m=+2680.982032133" watchObservedRunningTime="2025-11-23 15:26:32.183575336 +0000 UTC m=+2687.350571851" Nov 23 15:26:32 crc kubenswrapper[5050]: I1123 15:26:32.958679 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mvnrn" Nov 23 15:26:33 crc kubenswrapper[5050]: I1123 15:26:33.031195 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mvnrn"] Nov 23 15:26:34 crc kubenswrapper[5050]: I1123 15:26:34.862556 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-mvnrn" podUID="51b2e9d2-da97-4213-8069-e608fb001b3c" containerName="registry-server" containerID="cri-o://e9c32e1dee63e1d9306460cf90ab3941bb0db863d2a16accdc1e48ba79ce8857" gracePeriod=2 Nov 23 15:26:35 crc kubenswrapper[5050]: I1123 15:26:35.313691 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mvnrn" Nov 23 15:26:35 crc kubenswrapper[5050]: I1123 15:26:35.500002 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qdb64\" (UniqueName: \"kubernetes.io/projected/51b2e9d2-da97-4213-8069-e608fb001b3c-kube-api-access-qdb64\") pod \"51b2e9d2-da97-4213-8069-e608fb001b3c\" (UID: \"51b2e9d2-da97-4213-8069-e608fb001b3c\") " Nov 23 15:26:35 crc kubenswrapper[5050]: I1123 15:26:35.500119 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51b2e9d2-da97-4213-8069-e608fb001b3c-catalog-content\") pod \"51b2e9d2-da97-4213-8069-e608fb001b3c\" (UID: \"51b2e9d2-da97-4213-8069-e608fb001b3c\") " Nov 23 15:26:35 crc kubenswrapper[5050]: I1123 15:26:35.500308 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51b2e9d2-da97-4213-8069-e608fb001b3c-utilities\") pod \"51b2e9d2-da97-4213-8069-e608fb001b3c\" (UID: \"51b2e9d2-da97-4213-8069-e608fb001b3c\") " Nov 23 15:26:35 crc kubenswrapper[5050]: I1123 15:26:35.502084 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51b2e9d2-da97-4213-8069-e608fb001b3c-utilities" (OuterVolumeSpecName: "utilities") pod "51b2e9d2-da97-4213-8069-e608fb001b3c" (UID: "51b2e9d2-da97-4213-8069-e608fb001b3c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:26:35 crc kubenswrapper[5050]: I1123 15:26:35.510213 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51b2e9d2-da97-4213-8069-e608fb001b3c-kube-api-access-qdb64" (OuterVolumeSpecName: "kube-api-access-qdb64") pod "51b2e9d2-da97-4213-8069-e608fb001b3c" (UID: "51b2e9d2-da97-4213-8069-e608fb001b3c"). InnerVolumeSpecName "kube-api-access-qdb64". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:26:35 crc kubenswrapper[5050]: I1123 15:26:35.539740 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51b2e9d2-da97-4213-8069-e608fb001b3c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "51b2e9d2-da97-4213-8069-e608fb001b3c" (UID: "51b2e9d2-da97-4213-8069-e608fb001b3c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:26:35 crc kubenswrapper[5050]: I1123 15:26:35.604602 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51b2e9d2-da97-4213-8069-e608fb001b3c-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 15:26:35 crc kubenswrapper[5050]: I1123 15:26:35.604726 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qdb64\" (UniqueName: \"kubernetes.io/projected/51b2e9d2-da97-4213-8069-e608fb001b3c-kube-api-access-qdb64\") on node \"crc\" DevicePath \"\"" Nov 23 15:26:35 crc kubenswrapper[5050]: I1123 15:26:35.604755 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51b2e9d2-da97-4213-8069-e608fb001b3c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 15:26:35 crc kubenswrapper[5050]: I1123 15:26:35.881115 5050 generic.go:334] "Generic (PLEG): container finished" podID="51b2e9d2-da97-4213-8069-e608fb001b3c" containerID="e9c32e1dee63e1d9306460cf90ab3941bb0db863d2a16accdc1e48ba79ce8857" exitCode=0 Nov 23 15:26:35 crc kubenswrapper[5050]: I1123 15:26:35.881205 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mvnrn" event={"ID":"51b2e9d2-da97-4213-8069-e608fb001b3c","Type":"ContainerDied","Data":"e9c32e1dee63e1d9306460cf90ab3941bb0db863d2a16accdc1e48ba79ce8857"} Nov 23 15:26:35 crc kubenswrapper[5050]: I1123 15:26:35.881269 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mvnrn" event={"ID":"51b2e9d2-da97-4213-8069-e608fb001b3c","Type":"ContainerDied","Data":"a91014d56a46846b5062be202d693280870dd2f2061ea43fdb33265509601a20"} Nov 23 15:26:35 crc kubenswrapper[5050]: I1123 15:26:35.881302 5050 scope.go:117] "RemoveContainer" containerID="e9c32e1dee63e1d9306460cf90ab3941bb0db863d2a16accdc1e48ba79ce8857" Nov 23 15:26:35 crc kubenswrapper[5050]: I1123 15:26:35.881329 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mvnrn" Nov 23 15:26:35 crc kubenswrapper[5050]: I1123 15:26:35.922980 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mvnrn"] Nov 23 15:26:35 crc kubenswrapper[5050]: I1123 15:26:35.925275 5050 scope.go:117] "RemoveContainer" containerID="318b0bf52efb3e54d3080ec958ef42afd3e53484e06b2f5364ebd32540e6c951" Nov 23 15:26:35 crc kubenswrapper[5050]: I1123 15:26:35.928592 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-mvnrn"] Nov 23 15:26:35 crc kubenswrapper[5050]: I1123 15:26:35.977799 5050 scope.go:117] "RemoveContainer" containerID="ce2839644508b3f048ef7bf64ab8375bdb113a9d68096b3cd72cbaf2e093f04a" Nov 23 15:26:36 crc kubenswrapper[5050]: I1123 15:26:36.001821 5050 scope.go:117] "RemoveContainer" containerID="e9c32e1dee63e1d9306460cf90ab3941bb0db863d2a16accdc1e48ba79ce8857" Nov 23 15:26:36 crc kubenswrapper[5050]: E1123 15:26:36.002535 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e9c32e1dee63e1d9306460cf90ab3941bb0db863d2a16accdc1e48ba79ce8857\": container with ID starting with e9c32e1dee63e1d9306460cf90ab3941bb0db863d2a16accdc1e48ba79ce8857 not found: ID does not exist" containerID="e9c32e1dee63e1d9306460cf90ab3941bb0db863d2a16accdc1e48ba79ce8857" Nov 23 15:26:36 crc kubenswrapper[5050]: I1123 15:26:36.002613 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e9c32e1dee63e1d9306460cf90ab3941bb0db863d2a16accdc1e48ba79ce8857"} err="failed to get container status \"e9c32e1dee63e1d9306460cf90ab3941bb0db863d2a16accdc1e48ba79ce8857\": rpc error: code = NotFound desc = could not find container \"e9c32e1dee63e1d9306460cf90ab3941bb0db863d2a16accdc1e48ba79ce8857\": container with ID starting with e9c32e1dee63e1d9306460cf90ab3941bb0db863d2a16accdc1e48ba79ce8857 not found: ID does not exist" Nov 23 15:26:36 crc kubenswrapper[5050]: I1123 15:26:36.002658 5050 scope.go:117] "RemoveContainer" containerID="318b0bf52efb3e54d3080ec958ef42afd3e53484e06b2f5364ebd32540e6c951" Nov 23 15:26:36 crc kubenswrapper[5050]: E1123 15:26:36.003096 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"318b0bf52efb3e54d3080ec958ef42afd3e53484e06b2f5364ebd32540e6c951\": container with ID starting with 318b0bf52efb3e54d3080ec958ef42afd3e53484e06b2f5364ebd32540e6c951 not found: ID does not exist" containerID="318b0bf52efb3e54d3080ec958ef42afd3e53484e06b2f5364ebd32540e6c951" Nov 23 15:26:36 crc kubenswrapper[5050]: I1123 15:26:36.003157 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"318b0bf52efb3e54d3080ec958ef42afd3e53484e06b2f5364ebd32540e6c951"} err="failed to get container status \"318b0bf52efb3e54d3080ec958ef42afd3e53484e06b2f5364ebd32540e6c951\": rpc error: code = NotFound desc = could not find container \"318b0bf52efb3e54d3080ec958ef42afd3e53484e06b2f5364ebd32540e6c951\": container with ID starting with 318b0bf52efb3e54d3080ec958ef42afd3e53484e06b2f5364ebd32540e6c951 not found: ID does not exist" Nov 23 15:26:36 crc kubenswrapper[5050]: I1123 15:26:36.003210 5050 scope.go:117] "RemoveContainer" containerID="ce2839644508b3f048ef7bf64ab8375bdb113a9d68096b3cd72cbaf2e093f04a" Nov 23 15:26:36 crc kubenswrapper[5050]: E1123 15:26:36.003987 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce2839644508b3f048ef7bf64ab8375bdb113a9d68096b3cd72cbaf2e093f04a\": container with ID starting with ce2839644508b3f048ef7bf64ab8375bdb113a9d68096b3cd72cbaf2e093f04a not found: ID does not exist" containerID="ce2839644508b3f048ef7bf64ab8375bdb113a9d68096b3cd72cbaf2e093f04a" Nov 23 15:26:36 crc kubenswrapper[5050]: I1123 15:26:36.004051 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce2839644508b3f048ef7bf64ab8375bdb113a9d68096b3cd72cbaf2e093f04a"} err="failed to get container status \"ce2839644508b3f048ef7bf64ab8375bdb113a9d68096b3cd72cbaf2e093f04a\": rpc error: code = NotFound desc = could not find container \"ce2839644508b3f048ef7bf64ab8375bdb113a9d68096b3cd72cbaf2e093f04a\": container with ID starting with ce2839644508b3f048ef7bf64ab8375bdb113a9d68096b3cd72cbaf2e093f04a not found: ID does not exist" Nov 23 15:26:37 crc kubenswrapper[5050]: I1123 15:26:37.565010 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51b2e9d2-da97-4213-8069-e608fb001b3c" path="/var/lib/kubelet/pods/51b2e9d2-da97-4213-8069-e608fb001b3c/volumes" Nov 23 15:27:32 crc kubenswrapper[5050]: I1123 15:27:32.195639 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-xmgkt"] Nov 23 15:27:32 crc kubenswrapper[5050]: E1123 15:27:32.200375 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51b2e9d2-da97-4213-8069-e608fb001b3c" containerName="extract-content" Nov 23 15:27:32 crc kubenswrapper[5050]: I1123 15:27:32.200583 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="51b2e9d2-da97-4213-8069-e608fb001b3c" containerName="extract-content" Nov 23 15:27:32 crc kubenswrapper[5050]: E1123 15:27:32.200749 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51b2e9d2-da97-4213-8069-e608fb001b3c" containerName="extract-utilities" Nov 23 15:27:32 crc kubenswrapper[5050]: I1123 15:27:32.200839 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="51b2e9d2-da97-4213-8069-e608fb001b3c" containerName="extract-utilities" Nov 23 15:27:32 crc kubenswrapper[5050]: E1123 15:27:32.200925 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51b2e9d2-da97-4213-8069-e608fb001b3c" containerName="registry-server" Nov 23 15:27:32 crc kubenswrapper[5050]: I1123 15:27:32.201005 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="51b2e9d2-da97-4213-8069-e608fb001b3c" containerName="registry-server" Nov 23 15:27:32 crc kubenswrapper[5050]: I1123 15:27:32.201287 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="51b2e9d2-da97-4213-8069-e608fb001b3c" containerName="registry-server" Nov 23 15:27:32 crc kubenswrapper[5050]: I1123 15:27:32.203702 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xmgkt" Nov 23 15:27:32 crc kubenswrapper[5050]: I1123 15:27:32.246485 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xmgkt"] Nov 23 15:27:32 crc kubenswrapper[5050]: I1123 15:27:32.395776 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee3bbc24-9528-4284-b09a-884d2c41bdba-catalog-content\") pod \"certified-operators-xmgkt\" (UID: \"ee3bbc24-9528-4284-b09a-884d2c41bdba\") " pod="openshift-marketplace/certified-operators-xmgkt" Nov 23 15:27:32 crc kubenswrapper[5050]: I1123 15:27:32.395840 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee3bbc24-9528-4284-b09a-884d2c41bdba-utilities\") pod \"certified-operators-xmgkt\" (UID: \"ee3bbc24-9528-4284-b09a-884d2c41bdba\") " pod="openshift-marketplace/certified-operators-xmgkt" Nov 23 15:27:32 crc kubenswrapper[5050]: I1123 15:27:32.395868 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5gx4x\" (UniqueName: \"kubernetes.io/projected/ee3bbc24-9528-4284-b09a-884d2c41bdba-kube-api-access-5gx4x\") pod \"certified-operators-xmgkt\" (UID: \"ee3bbc24-9528-4284-b09a-884d2c41bdba\") " pod="openshift-marketplace/certified-operators-xmgkt" Nov 23 15:27:32 crc kubenswrapper[5050]: I1123 15:27:32.497828 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee3bbc24-9528-4284-b09a-884d2c41bdba-catalog-content\") pod \"certified-operators-xmgkt\" (UID: \"ee3bbc24-9528-4284-b09a-884d2c41bdba\") " pod="openshift-marketplace/certified-operators-xmgkt" Nov 23 15:27:32 crc kubenswrapper[5050]: I1123 15:27:32.497884 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee3bbc24-9528-4284-b09a-884d2c41bdba-utilities\") pod \"certified-operators-xmgkt\" (UID: \"ee3bbc24-9528-4284-b09a-884d2c41bdba\") " pod="openshift-marketplace/certified-operators-xmgkt" Nov 23 15:27:32 crc kubenswrapper[5050]: I1123 15:27:32.497913 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5gx4x\" (UniqueName: \"kubernetes.io/projected/ee3bbc24-9528-4284-b09a-884d2c41bdba-kube-api-access-5gx4x\") pod \"certified-operators-xmgkt\" (UID: \"ee3bbc24-9528-4284-b09a-884d2c41bdba\") " pod="openshift-marketplace/certified-operators-xmgkt" Nov 23 15:27:32 crc kubenswrapper[5050]: I1123 15:27:32.498626 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee3bbc24-9528-4284-b09a-884d2c41bdba-catalog-content\") pod \"certified-operators-xmgkt\" (UID: \"ee3bbc24-9528-4284-b09a-884d2c41bdba\") " pod="openshift-marketplace/certified-operators-xmgkt" Nov 23 15:27:32 crc kubenswrapper[5050]: I1123 15:27:32.498907 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee3bbc24-9528-4284-b09a-884d2c41bdba-utilities\") pod \"certified-operators-xmgkt\" (UID: \"ee3bbc24-9528-4284-b09a-884d2c41bdba\") " pod="openshift-marketplace/certified-operators-xmgkt" Nov 23 15:27:32 crc kubenswrapper[5050]: I1123 15:27:32.535215 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5gx4x\" (UniqueName: \"kubernetes.io/projected/ee3bbc24-9528-4284-b09a-884d2c41bdba-kube-api-access-5gx4x\") pod \"certified-operators-xmgkt\" (UID: \"ee3bbc24-9528-4284-b09a-884d2c41bdba\") " pod="openshift-marketplace/certified-operators-xmgkt" Nov 23 15:27:32 crc kubenswrapper[5050]: I1123 15:27:32.573567 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xmgkt" Nov 23 15:27:33 crc kubenswrapper[5050]: I1123 15:27:33.054401 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xmgkt"] Nov 23 15:27:33 crc kubenswrapper[5050]: I1123 15:27:33.758106 5050 generic.go:334] "Generic (PLEG): container finished" podID="ee3bbc24-9528-4284-b09a-884d2c41bdba" containerID="489f25ddf8027d8a48cf6396312203b684d07e3b85a65fef1f9d0e1f3d1148e5" exitCode=0 Nov 23 15:27:33 crc kubenswrapper[5050]: I1123 15:27:33.758175 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xmgkt" event={"ID":"ee3bbc24-9528-4284-b09a-884d2c41bdba","Type":"ContainerDied","Data":"489f25ddf8027d8a48cf6396312203b684d07e3b85a65fef1f9d0e1f3d1148e5"} Nov 23 15:27:33 crc kubenswrapper[5050]: I1123 15:27:33.758219 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xmgkt" event={"ID":"ee3bbc24-9528-4284-b09a-884d2c41bdba","Type":"ContainerStarted","Data":"8b8a8de727d70354861eb7755ec9b5c3de2b8352517411ff23844ba8e967730c"} Nov 23 15:27:34 crc kubenswrapper[5050]: I1123 15:27:34.793118 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xmgkt" event={"ID":"ee3bbc24-9528-4284-b09a-884d2c41bdba","Type":"ContainerStarted","Data":"91e38f40e678739a921aee277cb49f60b55ed9ade9f6cd9c80e84dbec014ec0b"} Nov 23 15:27:35 crc kubenswrapper[5050]: I1123 15:27:35.808360 5050 generic.go:334] "Generic (PLEG): container finished" podID="ee3bbc24-9528-4284-b09a-884d2c41bdba" containerID="91e38f40e678739a921aee277cb49f60b55ed9ade9f6cd9c80e84dbec014ec0b" exitCode=0 Nov 23 15:27:35 crc kubenswrapper[5050]: I1123 15:27:35.808413 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xmgkt" event={"ID":"ee3bbc24-9528-4284-b09a-884d2c41bdba","Type":"ContainerDied","Data":"91e38f40e678739a921aee277cb49f60b55ed9ade9f6cd9c80e84dbec014ec0b"} Nov 23 15:27:36 crc kubenswrapper[5050]: I1123 15:27:36.819855 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xmgkt" event={"ID":"ee3bbc24-9528-4284-b09a-884d2c41bdba","Type":"ContainerStarted","Data":"fce5828463300ec94a01a4b59f1cd4cc7a942be7457dac6584e5f6236ce851f2"} Nov 23 15:27:36 crc kubenswrapper[5050]: I1123 15:27:36.854761 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-xmgkt" podStartSLOduration=2.371563584 podStartE2EDuration="4.854734407s" podCreationTimestamp="2025-11-23 15:27:32 +0000 UTC" firstStartedPulling="2025-11-23 15:27:33.761847181 +0000 UTC m=+2748.928843666" lastFinishedPulling="2025-11-23 15:27:36.245017974 +0000 UTC m=+2751.412014489" observedRunningTime="2025-11-23 15:27:36.847348128 +0000 UTC m=+2752.014344643" watchObservedRunningTime="2025-11-23 15:27:36.854734407 +0000 UTC m=+2752.021730902" Nov 23 15:27:42 crc kubenswrapper[5050]: I1123 15:27:42.573885 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-xmgkt" Nov 23 15:27:42 crc kubenswrapper[5050]: I1123 15:27:42.575159 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-xmgkt" Nov 23 15:27:42 crc kubenswrapper[5050]: I1123 15:27:42.660737 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-xmgkt" Nov 23 15:27:42 crc kubenswrapper[5050]: I1123 15:27:42.955016 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-xmgkt" Nov 23 15:27:43 crc kubenswrapper[5050]: I1123 15:27:43.019098 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xmgkt"] Nov 23 15:27:44 crc kubenswrapper[5050]: I1123 15:27:44.904687 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-xmgkt" podUID="ee3bbc24-9528-4284-b09a-884d2c41bdba" containerName="registry-server" containerID="cri-o://fce5828463300ec94a01a4b59f1cd4cc7a942be7457dac6584e5f6236ce851f2" gracePeriod=2 Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.340126 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-54769"] Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.342884 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-54769" Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.358824 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-54769"] Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.401898 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xmgkt" Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.455189 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5l52\" (UniqueName: \"kubernetes.io/projected/7246a0b6-ab02-4b8c-af94-719a77523b26-kube-api-access-f5l52\") pod \"redhat-operators-54769\" (UID: \"7246a0b6-ab02-4b8c-af94-719a77523b26\") " pod="openshift-marketplace/redhat-operators-54769" Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.455272 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7246a0b6-ab02-4b8c-af94-719a77523b26-catalog-content\") pod \"redhat-operators-54769\" (UID: \"7246a0b6-ab02-4b8c-af94-719a77523b26\") " pod="openshift-marketplace/redhat-operators-54769" Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.455374 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7246a0b6-ab02-4b8c-af94-719a77523b26-utilities\") pod \"redhat-operators-54769\" (UID: \"7246a0b6-ab02-4b8c-af94-719a77523b26\") " pod="openshift-marketplace/redhat-operators-54769" Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.555918 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee3bbc24-9528-4284-b09a-884d2c41bdba-utilities\") pod \"ee3bbc24-9528-4284-b09a-884d2c41bdba\" (UID: \"ee3bbc24-9528-4284-b09a-884d2c41bdba\") " Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.557328 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee3bbc24-9528-4284-b09a-884d2c41bdba-utilities" (OuterVolumeSpecName: "utilities") pod "ee3bbc24-9528-4284-b09a-884d2c41bdba" (UID: "ee3bbc24-9528-4284-b09a-884d2c41bdba"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.558400 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee3bbc24-9528-4284-b09a-884d2c41bdba-catalog-content\") pod \"ee3bbc24-9528-4284-b09a-884d2c41bdba\" (UID: \"ee3bbc24-9528-4284-b09a-884d2c41bdba\") " Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.558663 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5gx4x\" (UniqueName: \"kubernetes.io/projected/ee3bbc24-9528-4284-b09a-884d2c41bdba-kube-api-access-5gx4x\") pod \"ee3bbc24-9528-4284-b09a-884d2c41bdba\" (UID: \"ee3bbc24-9528-4284-b09a-884d2c41bdba\") " Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.559167 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7246a0b6-ab02-4b8c-af94-719a77523b26-utilities\") pod \"redhat-operators-54769\" (UID: \"7246a0b6-ab02-4b8c-af94-719a77523b26\") " pod="openshift-marketplace/redhat-operators-54769" Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.559277 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5l52\" (UniqueName: \"kubernetes.io/projected/7246a0b6-ab02-4b8c-af94-719a77523b26-kube-api-access-f5l52\") pod \"redhat-operators-54769\" (UID: \"7246a0b6-ab02-4b8c-af94-719a77523b26\") " pod="openshift-marketplace/redhat-operators-54769" Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.559337 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7246a0b6-ab02-4b8c-af94-719a77523b26-catalog-content\") pod \"redhat-operators-54769\" (UID: \"7246a0b6-ab02-4b8c-af94-719a77523b26\") " pod="openshift-marketplace/redhat-operators-54769" Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.559499 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee3bbc24-9528-4284-b09a-884d2c41bdba-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.560360 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7246a0b6-ab02-4b8c-af94-719a77523b26-utilities\") pod \"redhat-operators-54769\" (UID: \"7246a0b6-ab02-4b8c-af94-719a77523b26\") " pod="openshift-marketplace/redhat-operators-54769" Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.560487 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7246a0b6-ab02-4b8c-af94-719a77523b26-catalog-content\") pod \"redhat-operators-54769\" (UID: \"7246a0b6-ab02-4b8c-af94-719a77523b26\") " pod="openshift-marketplace/redhat-operators-54769" Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.581617 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee3bbc24-9528-4284-b09a-884d2c41bdba-kube-api-access-5gx4x" (OuterVolumeSpecName: "kube-api-access-5gx4x") pod "ee3bbc24-9528-4284-b09a-884d2c41bdba" (UID: "ee3bbc24-9528-4284-b09a-884d2c41bdba"). InnerVolumeSpecName "kube-api-access-5gx4x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.585934 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5l52\" (UniqueName: \"kubernetes.io/projected/7246a0b6-ab02-4b8c-af94-719a77523b26-kube-api-access-f5l52\") pod \"redhat-operators-54769\" (UID: \"7246a0b6-ab02-4b8c-af94-719a77523b26\") " pod="openshift-marketplace/redhat-operators-54769" Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.617048 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee3bbc24-9528-4284-b09a-884d2c41bdba-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ee3bbc24-9528-4284-b09a-884d2c41bdba" (UID: "ee3bbc24-9528-4284-b09a-884d2c41bdba"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.661194 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5gx4x\" (UniqueName: \"kubernetes.io/projected/ee3bbc24-9528-4284-b09a-884d2c41bdba-kube-api-access-5gx4x\") on node \"crc\" DevicePath \"\"" Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.661243 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee3bbc24-9528-4284-b09a-884d2c41bdba-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.713705 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-54769" Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.925674 5050 generic.go:334] "Generic (PLEG): container finished" podID="ee3bbc24-9528-4284-b09a-884d2c41bdba" containerID="fce5828463300ec94a01a4b59f1cd4cc7a942be7457dac6584e5f6236ce851f2" exitCode=0 Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.925774 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xmgkt" event={"ID":"ee3bbc24-9528-4284-b09a-884d2c41bdba","Type":"ContainerDied","Data":"fce5828463300ec94a01a4b59f1cd4cc7a942be7457dac6584e5f6236ce851f2"} Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.925820 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xmgkt" event={"ID":"ee3bbc24-9528-4284-b09a-884d2c41bdba","Type":"ContainerDied","Data":"8b8a8de727d70354861eb7755ec9b5c3de2b8352517411ff23844ba8e967730c"} Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.925853 5050 scope.go:117] "RemoveContainer" containerID="fce5828463300ec94a01a4b59f1cd4cc7a942be7457dac6584e5f6236ce851f2" Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.926169 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xmgkt" Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.973984 5050 scope.go:117] "RemoveContainer" containerID="91e38f40e678739a921aee277cb49f60b55ed9ade9f6cd9c80e84dbec014ec0b" Nov 23 15:27:45 crc kubenswrapper[5050]: I1123 15:27:45.983949 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xmgkt"] Nov 23 15:27:46 crc kubenswrapper[5050]: I1123 15:27:46.003315 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-xmgkt"] Nov 23 15:27:46 crc kubenswrapper[5050]: I1123 15:27:46.009906 5050 scope.go:117] "RemoveContainer" containerID="489f25ddf8027d8a48cf6396312203b684d07e3b85a65fef1f9d0e1f3d1148e5" Nov 23 15:27:46 crc kubenswrapper[5050]: I1123 15:27:46.060923 5050 scope.go:117] "RemoveContainer" containerID="fce5828463300ec94a01a4b59f1cd4cc7a942be7457dac6584e5f6236ce851f2" Nov 23 15:27:46 crc kubenswrapper[5050]: E1123 15:27:46.062157 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fce5828463300ec94a01a4b59f1cd4cc7a942be7457dac6584e5f6236ce851f2\": container with ID starting with fce5828463300ec94a01a4b59f1cd4cc7a942be7457dac6584e5f6236ce851f2 not found: ID does not exist" containerID="fce5828463300ec94a01a4b59f1cd4cc7a942be7457dac6584e5f6236ce851f2" Nov 23 15:27:46 crc kubenswrapper[5050]: I1123 15:27:46.062206 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fce5828463300ec94a01a4b59f1cd4cc7a942be7457dac6584e5f6236ce851f2"} err="failed to get container status \"fce5828463300ec94a01a4b59f1cd4cc7a942be7457dac6584e5f6236ce851f2\": rpc error: code = NotFound desc = could not find container \"fce5828463300ec94a01a4b59f1cd4cc7a942be7457dac6584e5f6236ce851f2\": container with ID starting with fce5828463300ec94a01a4b59f1cd4cc7a942be7457dac6584e5f6236ce851f2 not found: ID does not exist" Nov 23 15:27:46 crc kubenswrapper[5050]: I1123 15:27:46.062239 5050 scope.go:117] "RemoveContainer" containerID="91e38f40e678739a921aee277cb49f60b55ed9ade9f6cd9c80e84dbec014ec0b" Nov 23 15:27:46 crc kubenswrapper[5050]: E1123 15:27:46.062644 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91e38f40e678739a921aee277cb49f60b55ed9ade9f6cd9c80e84dbec014ec0b\": container with ID starting with 91e38f40e678739a921aee277cb49f60b55ed9ade9f6cd9c80e84dbec014ec0b not found: ID does not exist" containerID="91e38f40e678739a921aee277cb49f60b55ed9ade9f6cd9c80e84dbec014ec0b" Nov 23 15:27:46 crc kubenswrapper[5050]: I1123 15:27:46.062700 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91e38f40e678739a921aee277cb49f60b55ed9ade9f6cd9c80e84dbec014ec0b"} err="failed to get container status \"91e38f40e678739a921aee277cb49f60b55ed9ade9f6cd9c80e84dbec014ec0b\": rpc error: code = NotFound desc = could not find container \"91e38f40e678739a921aee277cb49f60b55ed9ade9f6cd9c80e84dbec014ec0b\": container with ID starting with 91e38f40e678739a921aee277cb49f60b55ed9ade9f6cd9c80e84dbec014ec0b not found: ID does not exist" Nov 23 15:27:46 crc kubenswrapper[5050]: I1123 15:27:46.062735 5050 scope.go:117] "RemoveContainer" containerID="489f25ddf8027d8a48cf6396312203b684d07e3b85a65fef1f9d0e1f3d1148e5" Nov 23 15:27:46 crc kubenswrapper[5050]: E1123 15:27:46.063777 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"489f25ddf8027d8a48cf6396312203b684d07e3b85a65fef1f9d0e1f3d1148e5\": container with ID starting with 489f25ddf8027d8a48cf6396312203b684d07e3b85a65fef1f9d0e1f3d1148e5 not found: ID does not exist" containerID="489f25ddf8027d8a48cf6396312203b684d07e3b85a65fef1f9d0e1f3d1148e5" Nov 23 15:27:46 crc kubenswrapper[5050]: I1123 15:27:46.063809 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"489f25ddf8027d8a48cf6396312203b684d07e3b85a65fef1f9d0e1f3d1148e5"} err="failed to get container status \"489f25ddf8027d8a48cf6396312203b684d07e3b85a65fef1f9d0e1f3d1148e5\": rpc error: code = NotFound desc = could not find container \"489f25ddf8027d8a48cf6396312203b684d07e3b85a65fef1f9d0e1f3d1148e5\": container with ID starting with 489f25ddf8027d8a48cf6396312203b684d07e3b85a65fef1f9d0e1f3d1148e5 not found: ID does not exist" Nov 23 15:27:46 crc kubenswrapper[5050]: I1123 15:27:46.233138 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-54769"] Nov 23 15:27:46 crc kubenswrapper[5050]: I1123 15:27:46.936427 5050 generic.go:334] "Generic (PLEG): container finished" podID="7246a0b6-ab02-4b8c-af94-719a77523b26" containerID="e5d1d9d6e00d0b04d9bed030927928b1da37ef6e503ab3508e7ddc881433f981" exitCode=0 Nov 23 15:27:46 crc kubenswrapper[5050]: I1123 15:27:46.936539 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-54769" event={"ID":"7246a0b6-ab02-4b8c-af94-719a77523b26","Type":"ContainerDied","Data":"e5d1d9d6e00d0b04d9bed030927928b1da37ef6e503ab3508e7ddc881433f981"} Nov 23 15:27:46 crc kubenswrapper[5050]: I1123 15:27:46.936619 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-54769" event={"ID":"7246a0b6-ab02-4b8c-af94-719a77523b26","Type":"ContainerStarted","Data":"f9fa8bea1b42f097c476db1416a0c40c431d397310c7eec9ac129ed97a698dd2"} Nov 23 15:27:47 crc kubenswrapper[5050]: I1123 15:27:47.573555 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee3bbc24-9528-4284-b09a-884d2c41bdba" path="/var/lib/kubelet/pods/ee3bbc24-9528-4284-b09a-884d2c41bdba/volumes" Nov 23 15:27:47 crc kubenswrapper[5050]: I1123 15:27:47.950163 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-54769" event={"ID":"7246a0b6-ab02-4b8c-af94-719a77523b26","Type":"ContainerStarted","Data":"a2915e2ecbe903bff113444a0786584985f85acaba9f1dd99b1bc3719e16a916"} Nov 23 15:27:48 crc kubenswrapper[5050]: I1123 15:27:48.960434 5050 generic.go:334] "Generic (PLEG): container finished" podID="7246a0b6-ab02-4b8c-af94-719a77523b26" containerID="a2915e2ecbe903bff113444a0786584985f85acaba9f1dd99b1bc3719e16a916" exitCode=0 Nov 23 15:27:48 crc kubenswrapper[5050]: I1123 15:27:48.960569 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-54769" event={"ID":"7246a0b6-ab02-4b8c-af94-719a77523b26","Type":"ContainerDied","Data":"a2915e2ecbe903bff113444a0786584985f85acaba9f1dd99b1bc3719e16a916"} Nov 23 15:27:49 crc kubenswrapper[5050]: I1123 15:27:49.980155 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-54769" event={"ID":"7246a0b6-ab02-4b8c-af94-719a77523b26","Type":"ContainerStarted","Data":"40fa84ff56bc203a5b3e8f9db2672c726b1de46cff62e0f4c788717a56786269"} Nov 23 15:27:50 crc kubenswrapper[5050]: I1123 15:27:50.022217 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-54769" podStartSLOduration=2.483180301 podStartE2EDuration="5.022184325s" podCreationTimestamp="2025-11-23 15:27:45 +0000 UTC" firstStartedPulling="2025-11-23 15:27:46.940607708 +0000 UTC m=+2762.107604183" lastFinishedPulling="2025-11-23 15:27:49.479611712 +0000 UTC m=+2764.646608207" observedRunningTime="2025-11-23 15:27:50.008331313 +0000 UTC m=+2765.175327838" watchObservedRunningTime="2025-11-23 15:27:50.022184325 +0000 UTC m=+2765.189180850" Nov 23 15:27:55 crc kubenswrapper[5050]: I1123 15:27:55.714078 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-54769" Nov 23 15:27:55 crc kubenswrapper[5050]: I1123 15:27:55.714999 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-54769" Nov 23 15:27:56 crc kubenswrapper[5050]: I1123 15:27:56.783640 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-54769" podUID="7246a0b6-ab02-4b8c-af94-719a77523b26" containerName="registry-server" probeResult="failure" output=< Nov 23 15:27:56 crc kubenswrapper[5050]: timeout: failed to connect service ":50051" within 1s Nov 23 15:27:56 crc kubenswrapper[5050]: > Nov 23 15:27:59 crc kubenswrapper[5050]: I1123 15:27:59.225033 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:27:59 crc kubenswrapper[5050]: I1123 15:27:59.225761 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:28:05 crc kubenswrapper[5050]: I1123 15:28:05.794556 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-54769" Nov 23 15:28:05 crc kubenswrapper[5050]: I1123 15:28:05.876644 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-54769" Nov 23 15:28:06 crc kubenswrapper[5050]: I1123 15:28:06.034676 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-54769"] Nov 23 15:28:07 crc kubenswrapper[5050]: I1123 15:28:07.206563 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-54769" podUID="7246a0b6-ab02-4b8c-af94-719a77523b26" containerName="registry-server" containerID="cri-o://40fa84ff56bc203a5b3e8f9db2672c726b1de46cff62e0f4c788717a56786269" gracePeriod=2 Nov 23 15:28:07 crc kubenswrapper[5050]: I1123 15:28:07.656030 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-54769" Nov 23 15:28:07 crc kubenswrapper[5050]: I1123 15:28:07.731389 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7246a0b6-ab02-4b8c-af94-719a77523b26-catalog-content\") pod \"7246a0b6-ab02-4b8c-af94-719a77523b26\" (UID: \"7246a0b6-ab02-4b8c-af94-719a77523b26\") " Nov 23 15:28:07 crc kubenswrapper[5050]: I1123 15:28:07.731528 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f5l52\" (UniqueName: \"kubernetes.io/projected/7246a0b6-ab02-4b8c-af94-719a77523b26-kube-api-access-f5l52\") pod \"7246a0b6-ab02-4b8c-af94-719a77523b26\" (UID: \"7246a0b6-ab02-4b8c-af94-719a77523b26\") " Nov 23 15:28:07 crc kubenswrapper[5050]: I1123 15:28:07.731598 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7246a0b6-ab02-4b8c-af94-719a77523b26-utilities\") pod \"7246a0b6-ab02-4b8c-af94-719a77523b26\" (UID: \"7246a0b6-ab02-4b8c-af94-719a77523b26\") " Nov 23 15:28:07 crc kubenswrapper[5050]: I1123 15:28:07.732540 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7246a0b6-ab02-4b8c-af94-719a77523b26-utilities" (OuterVolumeSpecName: "utilities") pod "7246a0b6-ab02-4b8c-af94-719a77523b26" (UID: "7246a0b6-ab02-4b8c-af94-719a77523b26"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:28:07 crc kubenswrapper[5050]: I1123 15:28:07.742895 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7246a0b6-ab02-4b8c-af94-719a77523b26-kube-api-access-f5l52" (OuterVolumeSpecName: "kube-api-access-f5l52") pod "7246a0b6-ab02-4b8c-af94-719a77523b26" (UID: "7246a0b6-ab02-4b8c-af94-719a77523b26"). InnerVolumeSpecName "kube-api-access-f5l52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:28:07 crc kubenswrapper[5050]: I1123 15:28:07.833226 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7246a0b6-ab02-4b8c-af94-719a77523b26-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 15:28:07 crc kubenswrapper[5050]: I1123 15:28:07.833274 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f5l52\" (UniqueName: \"kubernetes.io/projected/7246a0b6-ab02-4b8c-af94-719a77523b26-kube-api-access-f5l52\") on node \"crc\" DevicePath \"\"" Nov 23 15:28:07 crc kubenswrapper[5050]: I1123 15:28:07.837581 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7246a0b6-ab02-4b8c-af94-719a77523b26-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7246a0b6-ab02-4b8c-af94-719a77523b26" (UID: "7246a0b6-ab02-4b8c-af94-719a77523b26"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:28:07 crc kubenswrapper[5050]: I1123 15:28:07.936397 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7246a0b6-ab02-4b8c-af94-719a77523b26-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 15:28:08 crc kubenswrapper[5050]: I1123 15:28:08.217329 5050 generic.go:334] "Generic (PLEG): container finished" podID="7246a0b6-ab02-4b8c-af94-719a77523b26" containerID="40fa84ff56bc203a5b3e8f9db2672c726b1de46cff62e0f4c788717a56786269" exitCode=0 Nov 23 15:28:08 crc kubenswrapper[5050]: I1123 15:28:08.217409 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-54769" Nov 23 15:28:08 crc kubenswrapper[5050]: I1123 15:28:08.217428 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-54769" event={"ID":"7246a0b6-ab02-4b8c-af94-719a77523b26","Type":"ContainerDied","Data":"40fa84ff56bc203a5b3e8f9db2672c726b1de46cff62e0f4c788717a56786269"} Nov 23 15:28:08 crc kubenswrapper[5050]: I1123 15:28:08.218104 5050 scope.go:117] "RemoveContainer" containerID="40fa84ff56bc203a5b3e8f9db2672c726b1de46cff62e0f4c788717a56786269" Nov 23 15:28:08 crc kubenswrapper[5050]: I1123 15:28:08.219931 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-54769" event={"ID":"7246a0b6-ab02-4b8c-af94-719a77523b26","Type":"ContainerDied","Data":"f9fa8bea1b42f097c476db1416a0c40c431d397310c7eec9ac129ed97a698dd2"} Nov 23 15:28:08 crc kubenswrapper[5050]: I1123 15:28:08.249688 5050 scope.go:117] "RemoveContainer" containerID="a2915e2ecbe903bff113444a0786584985f85acaba9f1dd99b1bc3719e16a916" Nov 23 15:28:08 crc kubenswrapper[5050]: I1123 15:28:08.272601 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-54769"] Nov 23 15:28:08 crc kubenswrapper[5050]: I1123 15:28:08.280738 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-54769"] Nov 23 15:28:08 crc kubenswrapper[5050]: I1123 15:28:08.292221 5050 scope.go:117] "RemoveContainer" containerID="e5d1d9d6e00d0b04d9bed030927928b1da37ef6e503ab3508e7ddc881433f981" Nov 23 15:28:08 crc kubenswrapper[5050]: I1123 15:28:08.327145 5050 scope.go:117] "RemoveContainer" containerID="40fa84ff56bc203a5b3e8f9db2672c726b1de46cff62e0f4c788717a56786269" Nov 23 15:28:08 crc kubenswrapper[5050]: E1123 15:28:08.327861 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"40fa84ff56bc203a5b3e8f9db2672c726b1de46cff62e0f4c788717a56786269\": container with ID starting with 40fa84ff56bc203a5b3e8f9db2672c726b1de46cff62e0f4c788717a56786269 not found: ID does not exist" containerID="40fa84ff56bc203a5b3e8f9db2672c726b1de46cff62e0f4c788717a56786269" Nov 23 15:28:08 crc kubenswrapper[5050]: I1123 15:28:08.327923 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"40fa84ff56bc203a5b3e8f9db2672c726b1de46cff62e0f4c788717a56786269"} err="failed to get container status \"40fa84ff56bc203a5b3e8f9db2672c726b1de46cff62e0f4c788717a56786269\": rpc error: code = NotFound desc = could not find container \"40fa84ff56bc203a5b3e8f9db2672c726b1de46cff62e0f4c788717a56786269\": container with ID starting with 40fa84ff56bc203a5b3e8f9db2672c726b1de46cff62e0f4c788717a56786269 not found: ID does not exist" Nov 23 15:28:08 crc kubenswrapper[5050]: I1123 15:28:08.327968 5050 scope.go:117] "RemoveContainer" containerID="a2915e2ecbe903bff113444a0786584985f85acaba9f1dd99b1bc3719e16a916" Nov 23 15:28:08 crc kubenswrapper[5050]: E1123 15:28:08.328852 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2915e2ecbe903bff113444a0786584985f85acaba9f1dd99b1bc3719e16a916\": container with ID starting with a2915e2ecbe903bff113444a0786584985f85acaba9f1dd99b1bc3719e16a916 not found: ID does not exist" containerID="a2915e2ecbe903bff113444a0786584985f85acaba9f1dd99b1bc3719e16a916" Nov 23 15:28:08 crc kubenswrapper[5050]: I1123 15:28:08.328894 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2915e2ecbe903bff113444a0786584985f85acaba9f1dd99b1bc3719e16a916"} err="failed to get container status \"a2915e2ecbe903bff113444a0786584985f85acaba9f1dd99b1bc3719e16a916\": rpc error: code = NotFound desc = could not find container \"a2915e2ecbe903bff113444a0786584985f85acaba9f1dd99b1bc3719e16a916\": container with ID starting with a2915e2ecbe903bff113444a0786584985f85acaba9f1dd99b1bc3719e16a916 not found: ID does not exist" Nov 23 15:28:08 crc kubenswrapper[5050]: I1123 15:28:08.328919 5050 scope.go:117] "RemoveContainer" containerID="e5d1d9d6e00d0b04d9bed030927928b1da37ef6e503ab3508e7ddc881433f981" Nov 23 15:28:08 crc kubenswrapper[5050]: E1123 15:28:08.329428 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5d1d9d6e00d0b04d9bed030927928b1da37ef6e503ab3508e7ddc881433f981\": container with ID starting with e5d1d9d6e00d0b04d9bed030927928b1da37ef6e503ab3508e7ddc881433f981 not found: ID does not exist" containerID="e5d1d9d6e00d0b04d9bed030927928b1da37ef6e503ab3508e7ddc881433f981" Nov 23 15:28:08 crc kubenswrapper[5050]: I1123 15:28:08.329551 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5d1d9d6e00d0b04d9bed030927928b1da37ef6e503ab3508e7ddc881433f981"} err="failed to get container status \"e5d1d9d6e00d0b04d9bed030927928b1da37ef6e503ab3508e7ddc881433f981\": rpc error: code = NotFound desc = could not find container \"e5d1d9d6e00d0b04d9bed030927928b1da37ef6e503ab3508e7ddc881433f981\": container with ID starting with e5d1d9d6e00d0b04d9bed030927928b1da37ef6e503ab3508e7ddc881433f981 not found: ID does not exist" Nov 23 15:28:09 crc kubenswrapper[5050]: I1123 15:28:09.557777 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7246a0b6-ab02-4b8c-af94-719a77523b26" path="/var/lib/kubelet/pods/7246a0b6-ab02-4b8c-af94-719a77523b26/volumes" Nov 23 15:28:29 crc kubenswrapper[5050]: I1123 15:28:29.224444 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:28:29 crc kubenswrapper[5050]: I1123 15:28:29.225529 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:28:59 crc kubenswrapper[5050]: I1123 15:28:59.224418 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:28:59 crc kubenswrapper[5050]: I1123 15:28:59.225259 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:28:59 crc kubenswrapper[5050]: I1123 15:28:59.225311 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 15:28:59 crc kubenswrapper[5050]: I1123 15:28:59.226165 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 15:28:59 crc kubenswrapper[5050]: I1123 15:28:59.226222 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1" gracePeriod=600 Nov 23 15:28:59 crc kubenswrapper[5050]: E1123 15:28:59.360623 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:28:59 crc kubenswrapper[5050]: I1123 15:28:59.763774 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1" exitCode=0 Nov 23 15:28:59 crc kubenswrapper[5050]: I1123 15:28:59.763860 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1"} Nov 23 15:28:59 crc kubenswrapper[5050]: I1123 15:28:59.764242 5050 scope.go:117] "RemoveContainer" containerID="8f669cdd1f43caedab92fcdf6aa597a61496916e12c9b8f8acf25da546e84e8b" Nov 23 15:28:59 crc kubenswrapper[5050]: I1123 15:28:59.765113 5050 scope.go:117] "RemoveContainer" containerID="ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1" Nov 23 15:28:59 crc kubenswrapper[5050]: E1123 15:28:59.765615 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:29:11 crc kubenswrapper[5050]: I1123 15:29:11.548331 5050 scope.go:117] "RemoveContainer" containerID="ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1" Nov 23 15:29:11 crc kubenswrapper[5050]: E1123 15:29:11.549514 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:29:24 crc kubenswrapper[5050]: I1123 15:29:24.548985 5050 scope.go:117] "RemoveContainer" containerID="ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1" Nov 23 15:29:24 crc kubenswrapper[5050]: E1123 15:29:24.550248 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:29:37 crc kubenswrapper[5050]: I1123 15:29:37.548888 5050 scope.go:117] "RemoveContainer" containerID="ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1" Nov 23 15:29:37 crc kubenswrapper[5050]: E1123 15:29:37.550199 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:29:49 crc kubenswrapper[5050]: I1123 15:29:49.549340 5050 scope.go:117] "RemoveContainer" containerID="ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1" Nov 23 15:29:49 crc kubenswrapper[5050]: E1123 15:29:49.550507 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:30:00 crc kubenswrapper[5050]: I1123 15:30:00.167312 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398530-vhkcp"] Nov 23 15:30:00 crc kubenswrapper[5050]: E1123 15:30:00.168725 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee3bbc24-9528-4284-b09a-884d2c41bdba" containerName="registry-server" Nov 23 15:30:00 crc kubenswrapper[5050]: I1123 15:30:00.168745 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee3bbc24-9528-4284-b09a-884d2c41bdba" containerName="registry-server" Nov 23 15:30:00 crc kubenswrapper[5050]: E1123 15:30:00.168765 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7246a0b6-ab02-4b8c-af94-719a77523b26" containerName="registry-server" Nov 23 15:30:00 crc kubenswrapper[5050]: I1123 15:30:00.168772 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="7246a0b6-ab02-4b8c-af94-719a77523b26" containerName="registry-server" Nov 23 15:30:00 crc kubenswrapper[5050]: E1123 15:30:00.168808 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee3bbc24-9528-4284-b09a-884d2c41bdba" containerName="extract-utilities" Nov 23 15:30:00 crc kubenswrapper[5050]: I1123 15:30:00.168817 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee3bbc24-9528-4284-b09a-884d2c41bdba" containerName="extract-utilities" Nov 23 15:30:00 crc kubenswrapper[5050]: E1123 15:30:00.168828 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee3bbc24-9528-4284-b09a-884d2c41bdba" containerName="extract-content" Nov 23 15:30:00 crc kubenswrapper[5050]: I1123 15:30:00.168838 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee3bbc24-9528-4284-b09a-884d2c41bdba" containerName="extract-content" Nov 23 15:30:00 crc kubenswrapper[5050]: E1123 15:30:00.168849 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7246a0b6-ab02-4b8c-af94-719a77523b26" containerName="extract-content" Nov 23 15:30:00 crc kubenswrapper[5050]: I1123 15:30:00.168856 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="7246a0b6-ab02-4b8c-af94-719a77523b26" containerName="extract-content" Nov 23 15:30:00 crc kubenswrapper[5050]: E1123 15:30:00.168863 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7246a0b6-ab02-4b8c-af94-719a77523b26" containerName="extract-utilities" Nov 23 15:30:00 crc kubenswrapper[5050]: I1123 15:30:00.168870 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="7246a0b6-ab02-4b8c-af94-719a77523b26" containerName="extract-utilities" Nov 23 15:30:00 crc kubenswrapper[5050]: I1123 15:30:00.169041 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="7246a0b6-ab02-4b8c-af94-719a77523b26" containerName="registry-server" Nov 23 15:30:00 crc kubenswrapper[5050]: I1123 15:30:00.169066 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee3bbc24-9528-4284-b09a-884d2c41bdba" containerName="registry-server" Nov 23 15:30:00 crc kubenswrapper[5050]: I1123 15:30:00.169811 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398530-vhkcp" Nov 23 15:30:00 crc kubenswrapper[5050]: I1123 15:30:00.177855 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 23 15:30:00 crc kubenswrapper[5050]: I1123 15:30:00.178420 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 23 15:30:00 crc kubenswrapper[5050]: I1123 15:30:00.204232 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398530-vhkcp"] Nov 23 15:30:00 crc kubenswrapper[5050]: I1123 15:30:00.331920 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/76cd80a6-c49c-4663-8483-3c144a527de9-secret-volume\") pod \"collect-profiles-29398530-vhkcp\" (UID: \"76cd80a6-c49c-4663-8483-3c144a527de9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398530-vhkcp" Nov 23 15:30:00 crc kubenswrapper[5050]: I1123 15:30:00.332519 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/76cd80a6-c49c-4663-8483-3c144a527de9-config-volume\") pod \"collect-profiles-29398530-vhkcp\" (UID: \"76cd80a6-c49c-4663-8483-3c144a527de9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398530-vhkcp" Nov 23 15:30:00 crc kubenswrapper[5050]: I1123 15:30:00.332681 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8sch\" (UniqueName: \"kubernetes.io/projected/76cd80a6-c49c-4663-8483-3c144a527de9-kube-api-access-x8sch\") pod \"collect-profiles-29398530-vhkcp\" (UID: \"76cd80a6-c49c-4663-8483-3c144a527de9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398530-vhkcp" Nov 23 15:30:00 crc kubenswrapper[5050]: I1123 15:30:00.434183 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/76cd80a6-c49c-4663-8483-3c144a527de9-secret-volume\") pod \"collect-profiles-29398530-vhkcp\" (UID: \"76cd80a6-c49c-4663-8483-3c144a527de9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398530-vhkcp" Nov 23 15:30:00 crc kubenswrapper[5050]: I1123 15:30:00.434273 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/76cd80a6-c49c-4663-8483-3c144a527de9-config-volume\") pod \"collect-profiles-29398530-vhkcp\" (UID: \"76cd80a6-c49c-4663-8483-3c144a527de9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398530-vhkcp" Nov 23 15:30:00 crc kubenswrapper[5050]: I1123 15:30:00.434313 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8sch\" (UniqueName: \"kubernetes.io/projected/76cd80a6-c49c-4663-8483-3c144a527de9-kube-api-access-x8sch\") pod \"collect-profiles-29398530-vhkcp\" (UID: \"76cd80a6-c49c-4663-8483-3c144a527de9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398530-vhkcp" Nov 23 15:30:00 crc kubenswrapper[5050]: I1123 15:30:00.435636 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/76cd80a6-c49c-4663-8483-3c144a527de9-config-volume\") pod \"collect-profiles-29398530-vhkcp\" (UID: \"76cd80a6-c49c-4663-8483-3c144a527de9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398530-vhkcp" Nov 23 15:30:00 crc kubenswrapper[5050]: I1123 15:30:00.455294 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/76cd80a6-c49c-4663-8483-3c144a527de9-secret-volume\") pod \"collect-profiles-29398530-vhkcp\" (UID: \"76cd80a6-c49c-4663-8483-3c144a527de9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398530-vhkcp" Nov 23 15:30:00 crc kubenswrapper[5050]: I1123 15:30:00.467134 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8sch\" (UniqueName: \"kubernetes.io/projected/76cd80a6-c49c-4663-8483-3c144a527de9-kube-api-access-x8sch\") pod \"collect-profiles-29398530-vhkcp\" (UID: \"76cd80a6-c49c-4663-8483-3c144a527de9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398530-vhkcp" Nov 23 15:30:00 crc kubenswrapper[5050]: I1123 15:30:00.522960 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398530-vhkcp" Nov 23 15:30:00 crc kubenswrapper[5050]: I1123 15:30:00.985112 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398530-vhkcp"] Nov 23 15:30:01 crc kubenswrapper[5050]: I1123 15:30:01.439711 5050 generic.go:334] "Generic (PLEG): container finished" podID="76cd80a6-c49c-4663-8483-3c144a527de9" containerID="7169d7655d76cd53ed06613f891f4123bd166ef3ddb806df5a4673f855d62369" exitCode=0 Nov 23 15:30:01 crc kubenswrapper[5050]: I1123 15:30:01.439812 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398530-vhkcp" event={"ID":"76cd80a6-c49c-4663-8483-3c144a527de9","Type":"ContainerDied","Data":"7169d7655d76cd53ed06613f891f4123bd166ef3ddb806df5a4673f855d62369"} Nov 23 15:30:01 crc kubenswrapper[5050]: I1123 15:30:01.441069 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398530-vhkcp" event={"ID":"76cd80a6-c49c-4663-8483-3c144a527de9","Type":"ContainerStarted","Data":"78bff932ac4592f5bc7481cff762d215d047c0855f68363863e98df5c45d9af5"} Nov 23 15:30:02 crc kubenswrapper[5050]: I1123 15:30:02.840481 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398530-vhkcp" Nov 23 15:30:02 crc kubenswrapper[5050]: I1123 15:30:02.889006 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x8sch\" (UniqueName: \"kubernetes.io/projected/76cd80a6-c49c-4663-8483-3c144a527de9-kube-api-access-x8sch\") pod \"76cd80a6-c49c-4663-8483-3c144a527de9\" (UID: \"76cd80a6-c49c-4663-8483-3c144a527de9\") " Nov 23 15:30:02 crc kubenswrapper[5050]: I1123 15:30:02.889217 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/76cd80a6-c49c-4663-8483-3c144a527de9-config-volume\") pod \"76cd80a6-c49c-4663-8483-3c144a527de9\" (UID: \"76cd80a6-c49c-4663-8483-3c144a527de9\") " Nov 23 15:30:02 crc kubenswrapper[5050]: I1123 15:30:02.889249 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/76cd80a6-c49c-4663-8483-3c144a527de9-secret-volume\") pod \"76cd80a6-c49c-4663-8483-3c144a527de9\" (UID: \"76cd80a6-c49c-4663-8483-3c144a527de9\") " Nov 23 15:30:02 crc kubenswrapper[5050]: I1123 15:30:02.890261 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/76cd80a6-c49c-4663-8483-3c144a527de9-config-volume" (OuterVolumeSpecName: "config-volume") pod "76cd80a6-c49c-4663-8483-3c144a527de9" (UID: "76cd80a6-c49c-4663-8483-3c144a527de9"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:30:02 crc kubenswrapper[5050]: I1123 15:30:02.896173 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76cd80a6-c49c-4663-8483-3c144a527de9-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "76cd80a6-c49c-4663-8483-3c144a527de9" (UID: "76cd80a6-c49c-4663-8483-3c144a527de9"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:30:02 crc kubenswrapper[5050]: I1123 15:30:02.896555 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76cd80a6-c49c-4663-8483-3c144a527de9-kube-api-access-x8sch" (OuterVolumeSpecName: "kube-api-access-x8sch") pod "76cd80a6-c49c-4663-8483-3c144a527de9" (UID: "76cd80a6-c49c-4663-8483-3c144a527de9"). InnerVolumeSpecName "kube-api-access-x8sch". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:30:02 crc kubenswrapper[5050]: I1123 15:30:02.992064 5050 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/76cd80a6-c49c-4663-8483-3c144a527de9-config-volume\") on node \"crc\" DevicePath \"\"" Nov 23 15:30:02 crc kubenswrapper[5050]: I1123 15:30:02.992462 5050 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/76cd80a6-c49c-4663-8483-3c144a527de9-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 23 15:30:02 crc kubenswrapper[5050]: I1123 15:30:02.992482 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x8sch\" (UniqueName: \"kubernetes.io/projected/76cd80a6-c49c-4663-8483-3c144a527de9-kube-api-access-x8sch\") on node \"crc\" DevicePath \"\"" Nov 23 15:30:03 crc kubenswrapper[5050]: I1123 15:30:03.466156 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398530-vhkcp" event={"ID":"76cd80a6-c49c-4663-8483-3c144a527de9","Type":"ContainerDied","Data":"78bff932ac4592f5bc7481cff762d215d047c0855f68363863e98df5c45d9af5"} Nov 23 15:30:03 crc kubenswrapper[5050]: I1123 15:30:03.466216 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="78bff932ac4592f5bc7481cff762d215d047c0855f68363863e98df5c45d9af5" Nov 23 15:30:03 crc kubenswrapper[5050]: I1123 15:30:03.466258 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398530-vhkcp" Nov 23 15:30:03 crc kubenswrapper[5050]: I1123 15:30:03.993047 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398485-p4nwd"] Nov 23 15:30:04 crc kubenswrapper[5050]: I1123 15:30:04.001189 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398485-p4nwd"] Nov 23 15:30:04 crc kubenswrapper[5050]: I1123 15:30:04.548940 5050 scope.go:117] "RemoveContainer" containerID="ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1" Nov 23 15:30:04 crc kubenswrapper[5050]: E1123 15:30:04.549732 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:30:05 crc kubenswrapper[5050]: I1123 15:30:05.564223 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9ec5edf-785a-4b69-bf12-857345ceb036" path="/var/lib/kubelet/pods/c9ec5edf-785a-4b69-bf12-857345ceb036/volumes" Nov 23 15:30:18 crc kubenswrapper[5050]: I1123 15:30:18.549101 5050 scope.go:117] "RemoveContainer" containerID="ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1" Nov 23 15:30:18 crc kubenswrapper[5050]: E1123 15:30:18.552013 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:30:30 crc kubenswrapper[5050]: I1123 15:30:30.549394 5050 scope.go:117] "RemoveContainer" containerID="ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1" Nov 23 15:30:30 crc kubenswrapper[5050]: E1123 15:30:30.551000 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:30:44 crc kubenswrapper[5050]: I1123 15:30:44.548302 5050 scope.go:117] "RemoveContainer" containerID="ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1" Nov 23 15:30:44 crc kubenswrapper[5050]: E1123 15:30:44.549097 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:30:57 crc kubenswrapper[5050]: I1123 15:30:57.549318 5050 scope.go:117] "RemoveContainer" containerID="ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1" Nov 23 15:30:57 crc kubenswrapper[5050]: E1123 15:30:57.550109 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:31:04 crc kubenswrapper[5050]: I1123 15:31:04.910808 5050 scope.go:117] "RemoveContainer" containerID="1669a05290c08d7db83bd2181ec08bc5120f0d9f5f5c1362d28a3b73177398c0" Nov 23 15:31:12 crc kubenswrapper[5050]: I1123 15:31:12.549601 5050 scope.go:117] "RemoveContainer" containerID="ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1" Nov 23 15:31:12 crc kubenswrapper[5050]: E1123 15:31:12.550751 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:31:27 crc kubenswrapper[5050]: I1123 15:31:27.549294 5050 scope.go:117] "RemoveContainer" containerID="ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1" Nov 23 15:31:27 crc kubenswrapper[5050]: E1123 15:31:27.550433 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:31:40 crc kubenswrapper[5050]: I1123 15:31:40.549549 5050 scope.go:117] "RemoveContainer" containerID="ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1" Nov 23 15:31:40 crc kubenswrapper[5050]: E1123 15:31:40.550899 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:31:55 crc kubenswrapper[5050]: I1123 15:31:55.553998 5050 scope.go:117] "RemoveContainer" containerID="ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1" Nov 23 15:31:55 crc kubenswrapper[5050]: E1123 15:31:55.554765 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:32:04 crc kubenswrapper[5050]: I1123 15:32:04.966727 5050 scope.go:117] "RemoveContainer" containerID="8c763df513cfec06790d7757245f7f374f82b023e97ee3315f45a1a55b432dfa" Nov 23 15:32:05 crc kubenswrapper[5050]: I1123 15:32:05.008260 5050 scope.go:117] "RemoveContainer" containerID="9770755555d42f15d44767b0960a9afc06ec9f517aa2f871ac1d32d04badb38e" Nov 23 15:32:05 crc kubenswrapper[5050]: I1123 15:32:05.058872 5050 scope.go:117] "RemoveContainer" containerID="19edb0cc1646686349ee0757af9c5cff658764f7dd37bf715a287687b67ac690" Nov 23 15:32:06 crc kubenswrapper[5050]: I1123 15:32:06.550241 5050 scope.go:117] "RemoveContainer" containerID="ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1" Nov 23 15:32:06 crc kubenswrapper[5050]: E1123 15:32:06.551322 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:32:19 crc kubenswrapper[5050]: I1123 15:32:19.549241 5050 scope.go:117] "RemoveContainer" containerID="ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1" Nov 23 15:32:19 crc kubenswrapper[5050]: E1123 15:32:19.551640 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:32:30 crc kubenswrapper[5050]: I1123 15:32:30.549810 5050 scope.go:117] "RemoveContainer" containerID="ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1" Nov 23 15:32:30 crc kubenswrapper[5050]: E1123 15:32:30.551980 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:32:41 crc kubenswrapper[5050]: I1123 15:32:41.549671 5050 scope.go:117] "RemoveContainer" containerID="ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1" Nov 23 15:32:41 crc kubenswrapper[5050]: E1123 15:32:41.550910 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:32:53 crc kubenswrapper[5050]: I1123 15:32:53.548824 5050 scope.go:117] "RemoveContainer" containerID="ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1" Nov 23 15:32:53 crc kubenswrapper[5050]: E1123 15:32:53.550084 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:33:08 crc kubenswrapper[5050]: I1123 15:33:08.549335 5050 scope.go:117] "RemoveContainer" containerID="ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1" Nov 23 15:33:08 crc kubenswrapper[5050]: E1123 15:33:08.550847 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:33:20 crc kubenswrapper[5050]: I1123 15:33:20.549390 5050 scope.go:117] "RemoveContainer" containerID="ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1" Nov 23 15:33:20 crc kubenswrapper[5050]: E1123 15:33:20.550704 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:33:35 crc kubenswrapper[5050]: I1123 15:33:35.562092 5050 scope.go:117] "RemoveContainer" containerID="ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1" Nov 23 15:33:35 crc kubenswrapper[5050]: E1123 15:33:35.564207 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:33:50 crc kubenswrapper[5050]: I1123 15:33:50.549164 5050 scope.go:117] "RemoveContainer" containerID="ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1" Nov 23 15:33:50 crc kubenswrapper[5050]: E1123 15:33:50.550599 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:34:01 crc kubenswrapper[5050]: I1123 15:34:01.549614 5050 scope.go:117] "RemoveContainer" containerID="ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1" Nov 23 15:34:02 crc kubenswrapper[5050]: I1123 15:34:02.174734 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"71c5dc3e8bad64d51a2b5da9ac65dbad97d43525614f201dc994bac805fae17d"} Nov 23 15:35:33 crc kubenswrapper[5050]: I1123 15:35:33.444891 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-692kx"] Nov 23 15:35:33 crc kubenswrapper[5050]: E1123 15:35:33.446055 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76cd80a6-c49c-4663-8483-3c144a527de9" containerName="collect-profiles" Nov 23 15:35:33 crc kubenswrapper[5050]: I1123 15:35:33.446071 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="76cd80a6-c49c-4663-8483-3c144a527de9" containerName="collect-profiles" Nov 23 15:35:33 crc kubenswrapper[5050]: I1123 15:35:33.446226 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="76cd80a6-c49c-4663-8483-3c144a527de9" containerName="collect-profiles" Nov 23 15:35:33 crc kubenswrapper[5050]: I1123 15:35:33.447358 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-692kx" Nov 23 15:35:33 crc kubenswrapper[5050]: I1123 15:35:33.455548 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wxwk\" (UniqueName: \"kubernetes.io/projected/0fda8b92-1f64-4ff5-bca8-3a37de99f9a5-kube-api-access-9wxwk\") pod \"community-operators-692kx\" (UID: \"0fda8b92-1f64-4ff5-bca8-3a37de99f9a5\") " pod="openshift-marketplace/community-operators-692kx" Nov 23 15:35:33 crc kubenswrapper[5050]: I1123 15:35:33.455673 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0fda8b92-1f64-4ff5-bca8-3a37de99f9a5-utilities\") pod \"community-operators-692kx\" (UID: \"0fda8b92-1f64-4ff5-bca8-3a37de99f9a5\") " pod="openshift-marketplace/community-operators-692kx" Nov 23 15:35:33 crc kubenswrapper[5050]: I1123 15:35:33.455780 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0fda8b92-1f64-4ff5-bca8-3a37de99f9a5-catalog-content\") pod \"community-operators-692kx\" (UID: \"0fda8b92-1f64-4ff5-bca8-3a37de99f9a5\") " pod="openshift-marketplace/community-operators-692kx" Nov 23 15:35:33 crc kubenswrapper[5050]: I1123 15:35:33.469050 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-692kx"] Nov 23 15:35:33 crc kubenswrapper[5050]: I1123 15:35:33.556962 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0fda8b92-1f64-4ff5-bca8-3a37de99f9a5-catalog-content\") pod \"community-operators-692kx\" (UID: \"0fda8b92-1f64-4ff5-bca8-3a37de99f9a5\") " pod="openshift-marketplace/community-operators-692kx" Nov 23 15:35:33 crc kubenswrapper[5050]: I1123 15:35:33.557038 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wxwk\" (UniqueName: \"kubernetes.io/projected/0fda8b92-1f64-4ff5-bca8-3a37de99f9a5-kube-api-access-9wxwk\") pod \"community-operators-692kx\" (UID: \"0fda8b92-1f64-4ff5-bca8-3a37de99f9a5\") " pod="openshift-marketplace/community-operators-692kx" Nov 23 15:35:33 crc kubenswrapper[5050]: I1123 15:35:33.557113 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0fda8b92-1f64-4ff5-bca8-3a37de99f9a5-utilities\") pod \"community-operators-692kx\" (UID: \"0fda8b92-1f64-4ff5-bca8-3a37de99f9a5\") " pod="openshift-marketplace/community-operators-692kx" Nov 23 15:35:33 crc kubenswrapper[5050]: I1123 15:35:33.557695 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0fda8b92-1f64-4ff5-bca8-3a37de99f9a5-utilities\") pod \"community-operators-692kx\" (UID: \"0fda8b92-1f64-4ff5-bca8-3a37de99f9a5\") " pod="openshift-marketplace/community-operators-692kx" Nov 23 15:35:33 crc kubenswrapper[5050]: I1123 15:35:33.558370 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0fda8b92-1f64-4ff5-bca8-3a37de99f9a5-catalog-content\") pod \"community-operators-692kx\" (UID: \"0fda8b92-1f64-4ff5-bca8-3a37de99f9a5\") " pod="openshift-marketplace/community-operators-692kx" Nov 23 15:35:33 crc kubenswrapper[5050]: I1123 15:35:33.581436 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wxwk\" (UniqueName: \"kubernetes.io/projected/0fda8b92-1f64-4ff5-bca8-3a37de99f9a5-kube-api-access-9wxwk\") pod \"community-operators-692kx\" (UID: \"0fda8b92-1f64-4ff5-bca8-3a37de99f9a5\") " pod="openshift-marketplace/community-operators-692kx" Nov 23 15:35:33 crc kubenswrapper[5050]: I1123 15:35:33.781023 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-692kx" Nov 23 15:35:34 crc kubenswrapper[5050]: I1123 15:35:34.277457 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-692kx"] Nov 23 15:35:35 crc kubenswrapper[5050]: I1123 15:35:35.068254 5050 generic.go:334] "Generic (PLEG): container finished" podID="0fda8b92-1f64-4ff5-bca8-3a37de99f9a5" containerID="4d7ce17fb0eec4050674c3a05e24ff0b736b92b82a93d0bc6d0f4815ea20a870" exitCode=0 Nov 23 15:35:35 crc kubenswrapper[5050]: I1123 15:35:35.068331 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-692kx" event={"ID":"0fda8b92-1f64-4ff5-bca8-3a37de99f9a5","Type":"ContainerDied","Data":"4d7ce17fb0eec4050674c3a05e24ff0b736b92b82a93d0bc6d0f4815ea20a870"} Nov 23 15:35:35 crc kubenswrapper[5050]: I1123 15:35:35.068367 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-692kx" event={"ID":"0fda8b92-1f64-4ff5-bca8-3a37de99f9a5","Type":"ContainerStarted","Data":"9028722dd64a357ed1ded3d4658109a67407fe83cd1b42223939d228ac8a60e8"} Nov 23 15:35:35 crc kubenswrapper[5050]: I1123 15:35:35.075072 5050 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 23 15:35:36 crc kubenswrapper[5050]: I1123 15:35:36.080096 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-692kx" event={"ID":"0fda8b92-1f64-4ff5-bca8-3a37de99f9a5","Type":"ContainerStarted","Data":"583887fe179f3c0eee1898fdbc28ded885feef92f9e54636ac1343cd9f30f7fa"} Nov 23 15:35:37 crc kubenswrapper[5050]: I1123 15:35:37.109187 5050 generic.go:334] "Generic (PLEG): container finished" podID="0fda8b92-1f64-4ff5-bca8-3a37de99f9a5" containerID="583887fe179f3c0eee1898fdbc28ded885feef92f9e54636ac1343cd9f30f7fa" exitCode=0 Nov 23 15:35:37 crc kubenswrapper[5050]: I1123 15:35:37.109300 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-692kx" event={"ID":"0fda8b92-1f64-4ff5-bca8-3a37de99f9a5","Type":"ContainerDied","Data":"583887fe179f3c0eee1898fdbc28ded885feef92f9e54636ac1343cd9f30f7fa"} Nov 23 15:35:38 crc kubenswrapper[5050]: I1123 15:35:38.126636 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-692kx" event={"ID":"0fda8b92-1f64-4ff5-bca8-3a37de99f9a5","Type":"ContainerStarted","Data":"e5a0f90a8708d1f38bf2d36dd846744ed5025a993d219c313c09872ec4737535"} Nov 23 15:35:38 crc kubenswrapper[5050]: I1123 15:35:38.152385 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-692kx" podStartSLOduration=2.5666412530000002 podStartE2EDuration="5.152361573s" podCreationTimestamp="2025-11-23 15:35:33 +0000 UTC" firstStartedPulling="2025-11-23 15:35:35.074819073 +0000 UTC m=+3230.241815548" lastFinishedPulling="2025-11-23 15:35:37.660539373 +0000 UTC m=+3232.827535868" observedRunningTime="2025-11-23 15:35:38.151415277 +0000 UTC m=+3233.318411822" watchObservedRunningTime="2025-11-23 15:35:38.152361573 +0000 UTC m=+3233.319358058" Nov 23 15:35:43 crc kubenswrapper[5050]: I1123 15:35:43.781657 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-692kx" Nov 23 15:35:43 crc kubenswrapper[5050]: I1123 15:35:43.782798 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-692kx" Nov 23 15:35:43 crc kubenswrapper[5050]: I1123 15:35:43.867932 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-692kx" Nov 23 15:35:44 crc kubenswrapper[5050]: I1123 15:35:44.252318 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-692kx" Nov 23 15:35:44 crc kubenswrapper[5050]: I1123 15:35:44.346622 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-692kx"] Nov 23 15:35:46 crc kubenswrapper[5050]: I1123 15:35:46.205300 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-692kx" podUID="0fda8b92-1f64-4ff5-bca8-3a37de99f9a5" containerName="registry-server" containerID="cri-o://e5a0f90a8708d1f38bf2d36dd846744ed5025a993d219c313c09872ec4737535" gracePeriod=2 Nov 23 15:35:46 crc kubenswrapper[5050]: I1123 15:35:46.685851 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-692kx" Nov 23 15:35:46 crc kubenswrapper[5050]: I1123 15:35:46.794698 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9wxwk\" (UniqueName: \"kubernetes.io/projected/0fda8b92-1f64-4ff5-bca8-3a37de99f9a5-kube-api-access-9wxwk\") pod \"0fda8b92-1f64-4ff5-bca8-3a37de99f9a5\" (UID: \"0fda8b92-1f64-4ff5-bca8-3a37de99f9a5\") " Nov 23 15:35:46 crc kubenswrapper[5050]: I1123 15:35:46.795221 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0fda8b92-1f64-4ff5-bca8-3a37de99f9a5-catalog-content\") pod \"0fda8b92-1f64-4ff5-bca8-3a37de99f9a5\" (UID: \"0fda8b92-1f64-4ff5-bca8-3a37de99f9a5\") " Nov 23 15:35:46 crc kubenswrapper[5050]: I1123 15:35:46.795299 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0fda8b92-1f64-4ff5-bca8-3a37de99f9a5-utilities\") pod \"0fda8b92-1f64-4ff5-bca8-3a37de99f9a5\" (UID: \"0fda8b92-1f64-4ff5-bca8-3a37de99f9a5\") " Nov 23 15:35:46 crc kubenswrapper[5050]: I1123 15:35:46.796236 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0fda8b92-1f64-4ff5-bca8-3a37de99f9a5-utilities" (OuterVolumeSpecName: "utilities") pod "0fda8b92-1f64-4ff5-bca8-3a37de99f9a5" (UID: "0fda8b92-1f64-4ff5-bca8-3a37de99f9a5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:35:46 crc kubenswrapper[5050]: I1123 15:35:46.805662 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0fda8b92-1f64-4ff5-bca8-3a37de99f9a5-kube-api-access-9wxwk" (OuterVolumeSpecName: "kube-api-access-9wxwk") pod "0fda8b92-1f64-4ff5-bca8-3a37de99f9a5" (UID: "0fda8b92-1f64-4ff5-bca8-3a37de99f9a5"). InnerVolumeSpecName "kube-api-access-9wxwk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:35:46 crc kubenswrapper[5050]: I1123 15:35:46.888357 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0fda8b92-1f64-4ff5-bca8-3a37de99f9a5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0fda8b92-1f64-4ff5-bca8-3a37de99f9a5" (UID: "0fda8b92-1f64-4ff5-bca8-3a37de99f9a5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:35:46 crc kubenswrapper[5050]: I1123 15:35:46.897649 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0fda8b92-1f64-4ff5-bca8-3a37de99f9a5-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 15:35:46 crc kubenswrapper[5050]: I1123 15:35:46.897685 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9wxwk\" (UniqueName: \"kubernetes.io/projected/0fda8b92-1f64-4ff5-bca8-3a37de99f9a5-kube-api-access-9wxwk\") on node \"crc\" DevicePath \"\"" Nov 23 15:35:46 crc kubenswrapper[5050]: I1123 15:35:46.897702 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0fda8b92-1f64-4ff5-bca8-3a37de99f9a5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 15:35:47 crc kubenswrapper[5050]: I1123 15:35:47.219035 5050 generic.go:334] "Generic (PLEG): container finished" podID="0fda8b92-1f64-4ff5-bca8-3a37de99f9a5" containerID="e5a0f90a8708d1f38bf2d36dd846744ed5025a993d219c313c09872ec4737535" exitCode=0 Nov 23 15:35:47 crc kubenswrapper[5050]: I1123 15:35:47.219091 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-692kx" event={"ID":"0fda8b92-1f64-4ff5-bca8-3a37de99f9a5","Type":"ContainerDied","Data":"e5a0f90a8708d1f38bf2d36dd846744ed5025a993d219c313c09872ec4737535"} Nov 23 15:35:47 crc kubenswrapper[5050]: I1123 15:35:47.219126 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-692kx" Nov 23 15:35:47 crc kubenswrapper[5050]: I1123 15:35:47.219150 5050 scope.go:117] "RemoveContainer" containerID="e5a0f90a8708d1f38bf2d36dd846744ed5025a993d219c313c09872ec4737535" Nov 23 15:35:47 crc kubenswrapper[5050]: I1123 15:35:47.219135 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-692kx" event={"ID":"0fda8b92-1f64-4ff5-bca8-3a37de99f9a5","Type":"ContainerDied","Data":"9028722dd64a357ed1ded3d4658109a67407fe83cd1b42223939d228ac8a60e8"} Nov 23 15:35:47 crc kubenswrapper[5050]: I1123 15:35:47.253884 5050 scope.go:117] "RemoveContainer" containerID="583887fe179f3c0eee1898fdbc28ded885feef92f9e54636ac1343cd9f30f7fa" Nov 23 15:35:47 crc kubenswrapper[5050]: I1123 15:35:47.262535 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-692kx"] Nov 23 15:35:47 crc kubenswrapper[5050]: I1123 15:35:47.270646 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-692kx"] Nov 23 15:35:47 crc kubenswrapper[5050]: I1123 15:35:47.281078 5050 scope.go:117] "RemoveContainer" containerID="4d7ce17fb0eec4050674c3a05e24ff0b736b92b82a93d0bc6d0f4815ea20a870" Nov 23 15:35:47 crc kubenswrapper[5050]: I1123 15:35:47.305058 5050 scope.go:117] "RemoveContainer" containerID="e5a0f90a8708d1f38bf2d36dd846744ed5025a993d219c313c09872ec4737535" Nov 23 15:35:47 crc kubenswrapper[5050]: E1123 15:35:47.305904 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5a0f90a8708d1f38bf2d36dd846744ed5025a993d219c313c09872ec4737535\": container with ID starting with e5a0f90a8708d1f38bf2d36dd846744ed5025a993d219c313c09872ec4737535 not found: ID does not exist" containerID="e5a0f90a8708d1f38bf2d36dd846744ed5025a993d219c313c09872ec4737535" Nov 23 15:35:47 crc kubenswrapper[5050]: I1123 15:35:47.305943 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5a0f90a8708d1f38bf2d36dd846744ed5025a993d219c313c09872ec4737535"} err="failed to get container status \"e5a0f90a8708d1f38bf2d36dd846744ed5025a993d219c313c09872ec4737535\": rpc error: code = NotFound desc = could not find container \"e5a0f90a8708d1f38bf2d36dd846744ed5025a993d219c313c09872ec4737535\": container with ID starting with e5a0f90a8708d1f38bf2d36dd846744ed5025a993d219c313c09872ec4737535 not found: ID does not exist" Nov 23 15:35:47 crc kubenswrapper[5050]: I1123 15:35:47.305973 5050 scope.go:117] "RemoveContainer" containerID="583887fe179f3c0eee1898fdbc28ded885feef92f9e54636ac1343cd9f30f7fa" Nov 23 15:35:47 crc kubenswrapper[5050]: E1123 15:35:47.306404 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"583887fe179f3c0eee1898fdbc28ded885feef92f9e54636ac1343cd9f30f7fa\": container with ID starting with 583887fe179f3c0eee1898fdbc28ded885feef92f9e54636ac1343cd9f30f7fa not found: ID does not exist" containerID="583887fe179f3c0eee1898fdbc28ded885feef92f9e54636ac1343cd9f30f7fa" Nov 23 15:35:47 crc kubenswrapper[5050]: I1123 15:35:47.306498 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"583887fe179f3c0eee1898fdbc28ded885feef92f9e54636ac1343cd9f30f7fa"} err="failed to get container status \"583887fe179f3c0eee1898fdbc28ded885feef92f9e54636ac1343cd9f30f7fa\": rpc error: code = NotFound desc = could not find container \"583887fe179f3c0eee1898fdbc28ded885feef92f9e54636ac1343cd9f30f7fa\": container with ID starting with 583887fe179f3c0eee1898fdbc28ded885feef92f9e54636ac1343cd9f30f7fa not found: ID does not exist" Nov 23 15:35:47 crc kubenswrapper[5050]: I1123 15:35:47.306544 5050 scope.go:117] "RemoveContainer" containerID="4d7ce17fb0eec4050674c3a05e24ff0b736b92b82a93d0bc6d0f4815ea20a870" Nov 23 15:35:47 crc kubenswrapper[5050]: E1123 15:35:47.307000 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d7ce17fb0eec4050674c3a05e24ff0b736b92b82a93d0bc6d0f4815ea20a870\": container with ID starting with 4d7ce17fb0eec4050674c3a05e24ff0b736b92b82a93d0bc6d0f4815ea20a870 not found: ID does not exist" containerID="4d7ce17fb0eec4050674c3a05e24ff0b736b92b82a93d0bc6d0f4815ea20a870" Nov 23 15:35:47 crc kubenswrapper[5050]: I1123 15:35:47.307032 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d7ce17fb0eec4050674c3a05e24ff0b736b92b82a93d0bc6d0f4815ea20a870"} err="failed to get container status \"4d7ce17fb0eec4050674c3a05e24ff0b736b92b82a93d0bc6d0f4815ea20a870\": rpc error: code = NotFound desc = could not find container \"4d7ce17fb0eec4050674c3a05e24ff0b736b92b82a93d0bc6d0f4815ea20a870\": container with ID starting with 4d7ce17fb0eec4050674c3a05e24ff0b736b92b82a93d0bc6d0f4815ea20a870 not found: ID does not exist" Nov 23 15:35:47 crc kubenswrapper[5050]: I1123 15:35:47.566897 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0fda8b92-1f64-4ff5-bca8-3a37de99f9a5" path="/var/lib/kubelet/pods/0fda8b92-1f64-4ff5-bca8-3a37de99f9a5/volumes" Nov 23 15:36:29 crc kubenswrapper[5050]: I1123 15:36:29.224291 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:36:29 crc kubenswrapper[5050]: I1123 15:36:29.225349 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:36:59 crc kubenswrapper[5050]: I1123 15:36:59.224886 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:36:59 crc kubenswrapper[5050]: I1123 15:36:59.226032 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:37:29 crc kubenswrapper[5050]: I1123 15:37:29.224364 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:37:29 crc kubenswrapper[5050]: I1123 15:37:29.225532 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:37:29 crc kubenswrapper[5050]: I1123 15:37:29.225613 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 15:37:29 crc kubenswrapper[5050]: I1123 15:37:29.226831 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"71c5dc3e8bad64d51a2b5da9ac65dbad97d43525614f201dc994bac805fae17d"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 15:37:29 crc kubenswrapper[5050]: I1123 15:37:29.226949 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://71c5dc3e8bad64d51a2b5da9ac65dbad97d43525614f201dc994bac805fae17d" gracePeriod=600 Nov 23 15:37:30 crc kubenswrapper[5050]: I1123 15:37:30.311904 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="71c5dc3e8bad64d51a2b5da9ac65dbad97d43525614f201dc994bac805fae17d" exitCode=0 Nov 23 15:37:30 crc kubenswrapper[5050]: I1123 15:37:30.311986 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"71c5dc3e8bad64d51a2b5da9ac65dbad97d43525614f201dc994bac805fae17d"} Nov 23 15:37:30 crc kubenswrapper[5050]: I1123 15:37:30.312988 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7"} Nov 23 15:37:30 crc kubenswrapper[5050]: I1123 15:37:30.313027 5050 scope.go:117] "RemoveContainer" containerID="ab40103857df8394b1d45d09f6678e11b0ec57d408929b19c12c0457e718ddf1" Nov 23 15:37:42 crc kubenswrapper[5050]: I1123 15:37:42.753147 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-pthjc"] Nov 23 15:37:42 crc kubenswrapper[5050]: E1123 15:37:42.756567 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fda8b92-1f64-4ff5-bca8-3a37de99f9a5" containerName="registry-server" Nov 23 15:37:42 crc kubenswrapper[5050]: I1123 15:37:42.756597 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fda8b92-1f64-4ff5-bca8-3a37de99f9a5" containerName="registry-server" Nov 23 15:37:42 crc kubenswrapper[5050]: E1123 15:37:42.756626 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fda8b92-1f64-4ff5-bca8-3a37de99f9a5" containerName="extract-utilities" Nov 23 15:37:42 crc kubenswrapper[5050]: I1123 15:37:42.756639 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fda8b92-1f64-4ff5-bca8-3a37de99f9a5" containerName="extract-utilities" Nov 23 15:37:42 crc kubenswrapper[5050]: E1123 15:37:42.756664 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fda8b92-1f64-4ff5-bca8-3a37de99f9a5" containerName="extract-content" Nov 23 15:37:42 crc kubenswrapper[5050]: I1123 15:37:42.756677 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fda8b92-1f64-4ff5-bca8-3a37de99f9a5" containerName="extract-content" Nov 23 15:37:42 crc kubenswrapper[5050]: I1123 15:37:42.756961 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="0fda8b92-1f64-4ff5-bca8-3a37de99f9a5" containerName="registry-server" Nov 23 15:37:42 crc kubenswrapper[5050]: I1123 15:37:42.758933 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pthjc" Nov 23 15:37:42 crc kubenswrapper[5050]: I1123 15:37:42.768598 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pthjc"] Nov 23 15:37:42 crc kubenswrapper[5050]: I1123 15:37:42.913725 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ab2676e-c9f9-4e1e-aa90-59535d490e5d-catalog-content\") pod \"redhat-marketplace-pthjc\" (UID: \"4ab2676e-c9f9-4e1e-aa90-59535d490e5d\") " pod="openshift-marketplace/redhat-marketplace-pthjc" Nov 23 15:37:42 crc kubenswrapper[5050]: I1123 15:37:42.913812 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ch99k\" (UniqueName: \"kubernetes.io/projected/4ab2676e-c9f9-4e1e-aa90-59535d490e5d-kube-api-access-ch99k\") pod \"redhat-marketplace-pthjc\" (UID: \"4ab2676e-c9f9-4e1e-aa90-59535d490e5d\") " pod="openshift-marketplace/redhat-marketplace-pthjc" Nov 23 15:37:42 crc kubenswrapper[5050]: I1123 15:37:42.914082 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ab2676e-c9f9-4e1e-aa90-59535d490e5d-utilities\") pod \"redhat-marketplace-pthjc\" (UID: \"4ab2676e-c9f9-4e1e-aa90-59535d490e5d\") " pod="openshift-marketplace/redhat-marketplace-pthjc" Nov 23 15:37:43 crc kubenswrapper[5050]: I1123 15:37:43.016372 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ch99k\" (UniqueName: \"kubernetes.io/projected/4ab2676e-c9f9-4e1e-aa90-59535d490e5d-kube-api-access-ch99k\") pod \"redhat-marketplace-pthjc\" (UID: \"4ab2676e-c9f9-4e1e-aa90-59535d490e5d\") " pod="openshift-marketplace/redhat-marketplace-pthjc" Nov 23 15:37:43 crc kubenswrapper[5050]: I1123 15:37:43.016481 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ab2676e-c9f9-4e1e-aa90-59535d490e5d-utilities\") pod \"redhat-marketplace-pthjc\" (UID: \"4ab2676e-c9f9-4e1e-aa90-59535d490e5d\") " pod="openshift-marketplace/redhat-marketplace-pthjc" Nov 23 15:37:43 crc kubenswrapper[5050]: I1123 15:37:43.016606 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ab2676e-c9f9-4e1e-aa90-59535d490e5d-catalog-content\") pod \"redhat-marketplace-pthjc\" (UID: \"4ab2676e-c9f9-4e1e-aa90-59535d490e5d\") " pod="openshift-marketplace/redhat-marketplace-pthjc" Nov 23 15:37:43 crc kubenswrapper[5050]: I1123 15:37:43.017243 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ab2676e-c9f9-4e1e-aa90-59535d490e5d-catalog-content\") pod \"redhat-marketplace-pthjc\" (UID: \"4ab2676e-c9f9-4e1e-aa90-59535d490e5d\") " pod="openshift-marketplace/redhat-marketplace-pthjc" Nov 23 15:37:43 crc kubenswrapper[5050]: I1123 15:37:43.017505 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ab2676e-c9f9-4e1e-aa90-59535d490e5d-utilities\") pod \"redhat-marketplace-pthjc\" (UID: \"4ab2676e-c9f9-4e1e-aa90-59535d490e5d\") " pod="openshift-marketplace/redhat-marketplace-pthjc" Nov 23 15:37:43 crc kubenswrapper[5050]: I1123 15:37:43.048043 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ch99k\" (UniqueName: \"kubernetes.io/projected/4ab2676e-c9f9-4e1e-aa90-59535d490e5d-kube-api-access-ch99k\") pod \"redhat-marketplace-pthjc\" (UID: \"4ab2676e-c9f9-4e1e-aa90-59535d490e5d\") " pod="openshift-marketplace/redhat-marketplace-pthjc" Nov 23 15:37:43 crc kubenswrapper[5050]: I1123 15:37:43.094013 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pthjc" Nov 23 15:37:43 crc kubenswrapper[5050]: I1123 15:37:43.366027 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pthjc"] Nov 23 15:37:43 crc kubenswrapper[5050]: I1123 15:37:43.475400 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pthjc" event={"ID":"4ab2676e-c9f9-4e1e-aa90-59535d490e5d","Type":"ContainerStarted","Data":"ddca526d013efd72a1caa92d6a35822efae124a4c55878f812f7a80a4e6d14f1"} Nov 23 15:37:44 crc kubenswrapper[5050]: I1123 15:37:44.485562 5050 generic.go:334] "Generic (PLEG): container finished" podID="4ab2676e-c9f9-4e1e-aa90-59535d490e5d" containerID="83ed5e83e2b2495ed86d5d7cd824c7ec39117900f8c0ce6fe227091de6087823" exitCode=0 Nov 23 15:37:44 crc kubenswrapper[5050]: I1123 15:37:44.485680 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pthjc" event={"ID":"4ab2676e-c9f9-4e1e-aa90-59535d490e5d","Type":"ContainerDied","Data":"83ed5e83e2b2495ed86d5d7cd824c7ec39117900f8c0ce6fe227091de6087823"} Nov 23 15:37:45 crc kubenswrapper[5050]: I1123 15:37:45.497672 5050 generic.go:334] "Generic (PLEG): container finished" podID="4ab2676e-c9f9-4e1e-aa90-59535d490e5d" containerID="d4c3dc31fc3c1a3c475f9c60c7526adf0783cbdd2a0bd03c683868497c60e4d5" exitCode=0 Nov 23 15:37:45 crc kubenswrapper[5050]: I1123 15:37:45.497779 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pthjc" event={"ID":"4ab2676e-c9f9-4e1e-aa90-59535d490e5d","Type":"ContainerDied","Data":"d4c3dc31fc3c1a3c475f9c60c7526adf0783cbdd2a0bd03c683868497c60e4d5"} Nov 23 15:37:46 crc kubenswrapper[5050]: I1123 15:37:46.511855 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pthjc" event={"ID":"4ab2676e-c9f9-4e1e-aa90-59535d490e5d","Type":"ContainerStarted","Data":"af7b5bcaa970c6c27a16d95bf5d79eedfe593c1402f0904744a31b1e63b9ad04"} Nov 23 15:37:46 crc kubenswrapper[5050]: I1123 15:37:46.531093 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-pthjc" podStartSLOduration=3.117630333 podStartE2EDuration="4.531068653s" podCreationTimestamp="2025-11-23 15:37:42 +0000 UTC" firstStartedPulling="2025-11-23 15:37:44.490578483 +0000 UTC m=+3359.657574998" lastFinishedPulling="2025-11-23 15:37:45.904016833 +0000 UTC m=+3361.071013318" observedRunningTime="2025-11-23 15:37:46.529932551 +0000 UTC m=+3361.696929046" watchObservedRunningTime="2025-11-23 15:37:46.531068653 +0000 UTC m=+3361.698065138" Nov 23 15:37:53 crc kubenswrapper[5050]: I1123 15:37:53.094235 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-pthjc" Nov 23 15:37:53 crc kubenswrapper[5050]: I1123 15:37:53.095263 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-pthjc" Nov 23 15:37:53 crc kubenswrapper[5050]: I1123 15:37:53.180907 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-pthjc" Nov 23 15:37:53 crc kubenswrapper[5050]: I1123 15:37:53.688019 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-pthjc" Nov 23 15:37:53 crc kubenswrapper[5050]: I1123 15:37:53.757944 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pthjc"] Nov 23 15:37:55 crc kubenswrapper[5050]: I1123 15:37:55.612813 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-pthjc" podUID="4ab2676e-c9f9-4e1e-aa90-59535d490e5d" containerName="registry-server" containerID="cri-o://af7b5bcaa970c6c27a16d95bf5d79eedfe593c1402f0904744a31b1e63b9ad04" gracePeriod=2 Nov 23 15:37:56 crc kubenswrapper[5050]: I1123 15:37:56.059677 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pthjc" Nov 23 15:37:56 crc kubenswrapper[5050]: I1123 15:37:56.174141 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ab2676e-c9f9-4e1e-aa90-59535d490e5d-utilities\") pod \"4ab2676e-c9f9-4e1e-aa90-59535d490e5d\" (UID: \"4ab2676e-c9f9-4e1e-aa90-59535d490e5d\") " Nov 23 15:37:56 crc kubenswrapper[5050]: I1123 15:37:56.174241 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ab2676e-c9f9-4e1e-aa90-59535d490e5d-catalog-content\") pod \"4ab2676e-c9f9-4e1e-aa90-59535d490e5d\" (UID: \"4ab2676e-c9f9-4e1e-aa90-59535d490e5d\") " Nov 23 15:37:56 crc kubenswrapper[5050]: I1123 15:37:56.174282 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ch99k\" (UniqueName: \"kubernetes.io/projected/4ab2676e-c9f9-4e1e-aa90-59535d490e5d-kube-api-access-ch99k\") pod \"4ab2676e-c9f9-4e1e-aa90-59535d490e5d\" (UID: \"4ab2676e-c9f9-4e1e-aa90-59535d490e5d\") " Nov 23 15:37:56 crc kubenswrapper[5050]: I1123 15:37:56.176506 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ab2676e-c9f9-4e1e-aa90-59535d490e5d-utilities" (OuterVolumeSpecName: "utilities") pod "4ab2676e-c9f9-4e1e-aa90-59535d490e5d" (UID: "4ab2676e-c9f9-4e1e-aa90-59535d490e5d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:37:56 crc kubenswrapper[5050]: I1123 15:37:56.184739 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ab2676e-c9f9-4e1e-aa90-59535d490e5d-kube-api-access-ch99k" (OuterVolumeSpecName: "kube-api-access-ch99k") pod "4ab2676e-c9f9-4e1e-aa90-59535d490e5d" (UID: "4ab2676e-c9f9-4e1e-aa90-59535d490e5d"). InnerVolumeSpecName "kube-api-access-ch99k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:37:56 crc kubenswrapper[5050]: I1123 15:37:56.197329 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ab2676e-c9f9-4e1e-aa90-59535d490e5d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4ab2676e-c9f9-4e1e-aa90-59535d490e5d" (UID: "4ab2676e-c9f9-4e1e-aa90-59535d490e5d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:37:56 crc kubenswrapper[5050]: I1123 15:37:56.275734 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ab2676e-c9f9-4e1e-aa90-59535d490e5d-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 15:37:56 crc kubenswrapper[5050]: I1123 15:37:56.275770 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ab2676e-c9f9-4e1e-aa90-59535d490e5d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 15:37:56 crc kubenswrapper[5050]: I1123 15:37:56.275783 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ch99k\" (UniqueName: \"kubernetes.io/projected/4ab2676e-c9f9-4e1e-aa90-59535d490e5d-kube-api-access-ch99k\") on node \"crc\" DevicePath \"\"" Nov 23 15:37:56 crc kubenswrapper[5050]: I1123 15:37:56.628473 5050 generic.go:334] "Generic (PLEG): container finished" podID="4ab2676e-c9f9-4e1e-aa90-59535d490e5d" containerID="af7b5bcaa970c6c27a16d95bf5d79eedfe593c1402f0904744a31b1e63b9ad04" exitCode=0 Nov 23 15:37:56 crc kubenswrapper[5050]: I1123 15:37:56.628581 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pthjc" Nov 23 15:37:56 crc kubenswrapper[5050]: I1123 15:37:56.628555 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pthjc" event={"ID":"4ab2676e-c9f9-4e1e-aa90-59535d490e5d","Type":"ContainerDied","Data":"af7b5bcaa970c6c27a16d95bf5d79eedfe593c1402f0904744a31b1e63b9ad04"} Nov 23 15:37:56 crc kubenswrapper[5050]: I1123 15:37:56.628688 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pthjc" event={"ID":"4ab2676e-c9f9-4e1e-aa90-59535d490e5d","Type":"ContainerDied","Data":"ddca526d013efd72a1caa92d6a35822efae124a4c55878f812f7a80a4e6d14f1"} Nov 23 15:37:56 crc kubenswrapper[5050]: I1123 15:37:56.628727 5050 scope.go:117] "RemoveContainer" containerID="af7b5bcaa970c6c27a16d95bf5d79eedfe593c1402f0904744a31b1e63b9ad04" Nov 23 15:37:56 crc kubenswrapper[5050]: I1123 15:37:56.666902 5050 scope.go:117] "RemoveContainer" containerID="d4c3dc31fc3c1a3c475f9c60c7526adf0783cbdd2a0bd03c683868497c60e4d5" Nov 23 15:37:56 crc kubenswrapper[5050]: I1123 15:37:56.688356 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pthjc"] Nov 23 15:37:56 crc kubenswrapper[5050]: I1123 15:37:56.700210 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-pthjc"] Nov 23 15:37:56 crc kubenswrapper[5050]: I1123 15:37:56.706478 5050 scope.go:117] "RemoveContainer" containerID="83ed5e83e2b2495ed86d5d7cd824c7ec39117900f8c0ce6fe227091de6087823" Nov 23 15:37:56 crc kubenswrapper[5050]: I1123 15:37:56.746077 5050 scope.go:117] "RemoveContainer" containerID="af7b5bcaa970c6c27a16d95bf5d79eedfe593c1402f0904744a31b1e63b9ad04" Nov 23 15:37:56 crc kubenswrapper[5050]: E1123 15:37:56.748159 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af7b5bcaa970c6c27a16d95bf5d79eedfe593c1402f0904744a31b1e63b9ad04\": container with ID starting with af7b5bcaa970c6c27a16d95bf5d79eedfe593c1402f0904744a31b1e63b9ad04 not found: ID does not exist" containerID="af7b5bcaa970c6c27a16d95bf5d79eedfe593c1402f0904744a31b1e63b9ad04" Nov 23 15:37:56 crc kubenswrapper[5050]: I1123 15:37:56.748208 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af7b5bcaa970c6c27a16d95bf5d79eedfe593c1402f0904744a31b1e63b9ad04"} err="failed to get container status \"af7b5bcaa970c6c27a16d95bf5d79eedfe593c1402f0904744a31b1e63b9ad04\": rpc error: code = NotFound desc = could not find container \"af7b5bcaa970c6c27a16d95bf5d79eedfe593c1402f0904744a31b1e63b9ad04\": container with ID starting with af7b5bcaa970c6c27a16d95bf5d79eedfe593c1402f0904744a31b1e63b9ad04 not found: ID does not exist" Nov 23 15:37:56 crc kubenswrapper[5050]: I1123 15:37:56.748242 5050 scope.go:117] "RemoveContainer" containerID="d4c3dc31fc3c1a3c475f9c60c7526adf0783cbdd2a0bd03c683868497c60e4d5" Nov 23 15:37:56 crc kubenswrapper[5050]: E1123 15:37:56.748933 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4c3dc31fc3c1a3c475f9c60c7526adf0783cbdd2a0bd03c683868497c60e4d5\": container with ID starting with d4c3dc31fc3c1a3c475f9c60c7526adf0783cbdd2a0bd03c683868497c60e4d5 not found: ID does not exist" containerID="d4c3dc31fc3c1a3c475f9c60c7526adf0783cbdd2a0bd03c683868497c60e4d5" Nov 23 15:37:56 crc kubenswrapper[5050]: I1123 15:37:56.748968 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4c3dc31fc3c1a3c475f9c60c7526adf0783cbdd2a0bd03c683868497c60e4d5"} err="failed to get container status \"d4c3dc31fc3c1a3c475f9c60c7526adf0783cbdd2a0bd03c683868497c60e4d5\": rpc error: code = NotFound desc = could not find container \"d4c3dc31fc3c1a3c475f9c60c7526adf0783cbdd2a0bd03c683868497c60e4d5\": container with ID starting with d4c3dc31fc3c1a3c475f9c60c7526adf0783cbdd2a0bd03c683868497c60e4d5 not found: ID does not exist" Nov 23 15:37:56 crc kubenswrapper[5050]: I1123 15:37:56.748995 5050 scope.go:117] "RemoveContainer" containerID="83ed5e83e2b2495ed86d5d7cd824c7ec39117900f8c0ce6fe227091de6087823" Nov 23 15:37:56 crc kubenswrapper[5050]: E1123 15:37:56.749605 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"83ed5e83e2b2495ed86d5d7cd824c7ec39117900f8c0ce6fe227091de6087823\": container with ID starting with 83ed5e83e2b2495ed86d5d7cd824c7ec39117900f8c0ce6fe227091de6087823 not found: ID does not exist" containerID="83ed5e83e2b2495ed86d5d7cd824c7ec39117900f8c0ce6fe227091de6087823" Nov 23 15:37:56 crc kubenswrapper[5050]: I1123 15:37:56.749644 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"83ed5e83e2b2495ed86d5d7cd824c7ec39117900f8c0ce6fe227091de6087823"} err="failed to get container status \"83ed5e83e2b2495ed86d5d7cd824c7ec39117900f8c0ce6fe227091de6087823\": rpc error: code = NotFound desc = could not find container \"83ed5e83e2b2495ed86d5d7cd824c7ec39117900f8c0ce6fe227091de6087823\": container with ID starting with 83ed5e83e2b2495ed86d5d7cd824c7ec39117900f8c0ce6fe227091de6087823 not found: ID does not exist" Nov 23 15:37:57 crc kubenswrapper[5050]: I1123 15:37:57.560422 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ab2676e-c9f9-4e1e-aa90-59535d490e5d" path="/var/lib/kubelet/pods/4ab2676e-c9f9-4e1e-aa90-59535d490e5d/volumes" Nov 23 15:39:29 crc kubenswrapper[5050]: I1123 15:39:29.224759 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:39:29 crc kubenswrapper[5050]: I1123 15:39:29.225744 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:39:59 crc kubenswrapper[5050]: I1123 15:39:59.225044 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:39:59 crc kubenswrapper[5050]: I1123 15:39:59.226183 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:40:29 crc kubenswrapper[5050]: I1123 15:40:29.224876 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:40:29 crc kubenswrapper[5050]: I1123 15:40:29.226590 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:40:29 crc kubenswrapper[5050]: I1123 15:40:29.226684 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 15:40:29 crc kubenswrapper[5050]: I1123 15:40:29.227798 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 15:40:29 crc kubenswrapper[5050]: I1123 15:40:29.227889 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" gracePeriod=600 Nov 23 15:40:29 crc kubenswrapper[5050]: E1123 15:40:29.361545 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:40:29 crc kubenswrapper[5050]: I1123 15:40:29.680333 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" exitCode=0 Nov 23 15:40:29 crc kubenswrapper[5050]: I1123 15:40:29.680475 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7"} Nov 23 15:40:29 crc kubenswrapper[5050]: I1123 15:40:29.680589 5050 scope.go:117] "RemoveContainer" containerID="71c5dc3e8bad64d51a2b5da9ac65dbad97d43525614f201dc994bac805fae17d" Nov 23 15:40:29 crc kubenswrapper[5050]: I1123 15:40:29.681700 5050 scope.go:117] "RemoveContainer" containerID="e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" Nov 23 15:40:29 crc kubenswrapper[5050]: E1123 15:40:29.682118 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:40:43 crc kubenswrapper[5050]: I1123 15:40:43.549933 5050 scope.go:117] "RemoveContainer" containerID="e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" Nov 23 15:40:43 crc kubenswrapper[5050]: E1123 15:40:43.551169 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:40:58 crc kubenswrapper[5050]: I1123 15:40:58.549059 5050 scope.go:117] "RemoveContainer" containerID="e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" Nov 23 15:40:58 crc kubenswrapper[5050]: E1123 15:40:58.550296 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:41:09 crc kubenswrapper[5050]: I1123 15:41:09.549154 5050 scope.go:117] "RemoveContainer" containerID="e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" Nov 23 15:41:09 crc kubenswrapper[5050]: E1123 15:41:09.550426 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:41:22 crc kubenswrapper[5050]: I1123 15:41:22.549068 5050 scope.go:117] "RemoveContainer" containerID="e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" Nov 23 15:41:22 crc kubenswrapper[5050]: E1123 15:41:22.550155 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:41:37 crc kubenswrapper[5050]: I1123 15:41:37.549900 5050 scope.go:117] "RemoveContainer" containerID="e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" Nov 23 15:41:37 crc kubenswrapper[5050]: E1123 15:41:37.551541 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:41:52 crc kubenswrapper[5050]: I1123 15:41:52.549011 5050 scope.go:117] "RemoveContainer" containerID="e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" Nov 23 15:41:52 crc kubenswrapper[5050]: E1123 15:41:52.550075 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:42:04 crc kubenswrapper[5050]: I1123 15:42:04.549097 5050 scope.go:117] "RemoveContainer" containerID="e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" Nov 23 15:42:04 crc kubenswrapper[5050]: E1123 15:42:04.550014 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:42:15 crc kubenswrapper[5050]: I1123 15:42:15.555559 5050 scope.go:117] "RemoveContainer" containerID="e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" Nov 23 15:42:15 crc kubenswrapper[5050]: E1123 15:42:15.558382 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:42:30 crc kubenswrapper[5050]: I1123 15:42:30.549261 5050 scope.go:117] "RemoveContainer" containerID="e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" Nov 23 15:42:30 crc kubenswrapper[5050]: E1123 15:42:30.550316 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:42:42 crc kubenswrapper[5050]: I1123 15:42:42.549166 5050 scope.go:117] "RemoveContainer" containerID="e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" Nov 23 15:42:42 crc kubenswrapper[5050]: E1123 15:42:42.550405 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:42:57 crc kubenswrapper[5050]: I1123 15:42:57.548977 5050 scope.go:117] "RemoveContainer" containerID="e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" Nov 23 15:42:57 crc kubenswrapper[5050]: E1123 15:42:57.550391 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:43:10 crc kubenswrapper[5050]: I1123 15:43:10.550777 5050 scope.go:117] "RemoveContainer" containerID="e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" Nov 23 15:43:10 crc kubenswrapper[5050]: E1123 15:43:10.552346 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:43:25 crc kubenswrapper[5050]: I1123 15:43:25.557348 5050 scope.go:117] "RemoveContainer" containerID="e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" Nov 23 15:43:25 crc kubenswrapper[5050]: E1123 15:43:25.558865 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:43:38 crc kubenswrapper[5050]: I1123 15:43:38.549659 5050 scope.go:117] "RemoveContainer" containerID="e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" Nov 23 15:43:38 crc kubenswrapper[5050]: E1123 15:43:38.550675 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:43:50 crc kubenswrapper[5050]: I1123 15:43:50.548589 5050 scope.go:117] "RemoveContainer" containerID="e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" Nov 23 15:43:50 crc kubenswrapper[5050]: E1123 15:43:50.549470 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:44:01 crc kubenswrapper[5050]: I1123 15:44:01.549998 5050 scope.go:117] "RemoveContainer" containerID="e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" Nov 23 15:44:01 crc kubenswrapper[5050]: E1123 15:44:01.552203 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:44:12 crc kubenswrapper[5050]: I1123 15:44:12.549141 5050 scope.go:117] "RemoveContainer" containerID="e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" Nov 23 15:44:12 crc kubenswrapper[5050]: E1123 15:44:12.550159 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:44:24 crc kubenswrapper[5050]: I1123 15:44:24.551206 5050 scope.go:117] "RemoveContainer" containerID="e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" Nov 23 15:44:24 crc kubenswrapper[5050]: E1123 15:44:24.552804 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:44:39 crc kubenswrapper[5050]: I1123 15:44:39.549732 5050 scope.go:117] "RemoveContainer" containerID="e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" Nov 23 15:44:39 crc kubenswrapper[5050]: E1123 15:44:39.550828 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:44:52 crc kubenswrapper[5050]: I1123 15:44:52.549762 5050 scope.go:117] "RemoveContainer" containerID="e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" Nov 23 15:44:52 crc kubenswrapper[5050]: E1123 15:44:52.550974 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:45:00 crc kubenswrapper[5050]: I1123 15:45:00.199800 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398545-p8x8h"] Nov 23 15:45:00 crc kubenswrapper[5050]: E1123 15:45:00.201047 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ab2676e-c9f9-4e1e-aa90-59535d490e5d" containerName="registry-server" Nov 23 15:45:00 crc kubenswrapper[5050]: I1123 15:45:00.201065 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ab2676e-c9f9-4e1e-aa90-59535d490e5d" containerName="registry-server" Nov 23 15:45:00 crc kubenswrapper[5050]: E1123 15:45:00.201081 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ab2676e-c9f9-4e1e-aa90-59535d490e5d" containerName="extract-content" Nov 23 15:45:00 crc kubenswrapper[5050]: I1123 15:45:00.201121 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ab2676e-c9f9-4e1e-aa90-59535d490e5d" containerName="extract-content" Nov 23 15:45:00 crc kubenswrapper[5050]: E1123 15:45:00.201135 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ab2676e-c9f9-4e1e-aa90-59535d490e5d" containerName="extract-utilities" Nov 23 15:45:00 crc kubenswrapper[5050]: I1123 15:45:00.201143 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ab2676e-c9f9-4e1e-aa90-59535d490e5d" containerName="extract-utilities" Nov 23 15:45:00 crc kubenswrapper[5050]: I1123 15:45:00.201462 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ab2676e-c9f9-4e1e-aa90-59535d490e5d" containerName="registry-server" Nov 23 15:45:00 crc kubenswrapper[5050]: I1123 15:45:00.204493 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398545-p8x8h" Nov 23 15:45:00 crc kubenswrapper[5050]: I1123 15:45:00.210009 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 23 15:45:00 crc kubenswrapper[5050]: I1123 15:45:00.211727 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 23 15:45:00 crc kubenswrapper[5050]: I1123 15:45:00.217345 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398545-p8x8h"] Nov 23 15:45:00 crc kubenswrapper[5050]: I1123 15:45:00.367646 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tqgft\" (UniqueName: \"kubernetes.io/projected/0d9c1450-0c73-427d-be1c-18f7ff013f00-kube-api-access-tqgft\") pod \"collect-profiles-29398545-p8x8h\" (UID: \"0d9c1450-0c73-427d-be1c-18f7ff013f00\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398545-p8x8h" Nov 23 15:45:00 crc kubenswrapper[5050]: I1123 15:45:00.368614 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0d9c1450-0c73-427d-be1c-18f7ff013f00-config-volume\") pod \"collect-profiles-29398545-p8x8h\" (UID: \"0d9c1450-0c73-427d-be1c-18f7ff013f00\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398545-p8x8h" Nov 23 15:45:00 crc kubenswrapper[5050]: I1123 15:45:00.368855 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0d9c1450-0c73-427d-be1c-18f7ff013f00-secret-volume\") pod \"collect-profiles-29398545-p8x8h\" (UID: \"0d9c1450-0c73-427d-be1c-18f7ff013f00\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398545-p8x8h" Nov 23 15:45:00 crc kubenswrapper[5050]: I1123 15:45:00.471170 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0d9c1450-0c73-427d-be1c-18f7ff013f00-config-volume\") pod \"collect-profiles-29398545-p8x8h\" (UID: \"0d9c1450-0c73-427d-be1c-18f7ff013f00\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398545-p8x8h" Nov 23 15:45:00 crc kubenswrapper[5050]: I1123 15:45:00.471383 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0d9c1450-0c73-427d-be1c-18f7ff013f00-secret-volume\") pod \"collect-profiles-29398545-p8x8h\" (UID: \"0d9c1450-0c73-427d-be1c-18f7ff013f00\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398545-p8x8h" Nov 23 15:45:00 crc kubenswrapper[5050]: I1123 15:45:00.471518 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tqgft\" (UniqueName: \"kubernetes.io/projected/0d9c1450-0c73-427d-be1c-18f7ff013f00-kube-api-access-tqgft\") pod \"collect-profiles-29398545-p8x8h\" (UID: \"0d9c1450-0c73-427d-be1c-18f7ff013f00\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398545-p8x8h" Nov 23 15:45:00 crc kubenswrapper[5050]: I1123 15:45:00.472770 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0d9c1450-0c73-427d-be1c-18f7ff013f00-config-volume\") pod \"collect-profiles-29398545-p8x8h\" (UID: \"0d9c1450-0c73-427d-be1c-18f7ff013f00\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398545-p8x8h" Nov 23 15:45:00 crc kubenswrapper[5050]: I1123 15:45:00.486091 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0d9c1450-0c73-427d-be1c-18f7ff013f00-secret-volume\") pod \"collect-profiles-29398545-p8x8h\" (UID: \"0d9c1450-0c73-427d-be1c-18f7ff013f00\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398545-p8x8h" Nov 23 15:45:00 crc kubenswrapper[5050]: I1123 15:45:00.508384 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tqgft\" (UniqueName: \"kubernetes.io/projected/0d9c1450-0c73-427d-be1c-18f7ff013f00-kube-api-access-tqgft\") pod \"collect-profiles-29398545-p8x8h\" (UID: \"0d9c1450-0c73-427d-be1c-18f7ff013f00\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398545-p8x8h" Nov 23 15:45:00 crc kubenswrapper[5050]: I1123 15:45:00.537176 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398545-p8x8h" Nov 23 15:45:00 crc kubenswrapper[5050]: I1123 15:45:00.787849 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398545-p8x8h"] Nov 23 15:45:01 crc kubenswrapper[5050]: I1123 15:45:01.555233 5050 generic.go:334] "Generic (PLEG): container finished" podID="0d9c1450-0c73-427d-be1c-18f7ff013f00" containerID="9fe892806dd3eb11fe3379fd174be6aa9514b32ac2d48fea80d79a8b04cf4575" exitCode=0 Nov 23 15:45:01 crc kubenswrapper[5050]: I1123 15:45:01.560260 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398545-p8x8h" event={"ID":"0d9c1450-0c73-427d-be1c-18f7ff013f00","Type":"ContainerDied","Data":"9fe892806dd3eb11fe3379fd174be6aa9514b32ac2d48fea80d79a8b04cf4575"} Nov 23 15:45:01 crc kubenswrapper[5050]: I1123 15:45:01.560494 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398545-p8x8h" event={"ID":"0d9c1450-0c73-427d-be1c-18f7ff013f00","Type":"ContainerStarted","Data":"1529fcddd2c79fb2820575f74db844829867061fc6c54f7c92214cb68b7b16b1"} Nov 23 15:45:02 crc kubenswrapper[5050]: I1123 15:45:02.933938 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398545-p8x8h" Nov 23 15:45:03 crc kubenswrapper[5050]: I1123 15:45:03.122939 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0d9c1450-0c73-427d-be1c-18f7ff013f00-secret-volume\") pod \"0d9c1450-0c73-427d-be1c-18f7ff013f00\" (UID: \"0d9c1450-0c73-427d-be1c-18f7ff013f00\") " Nov 23 15:45:03 crc kubenswrapper[5050]: I1123 15:45:03.123680 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0d9c1450-0c73-427d-be1c-18f7ff013f00-config-volume\") pod \"0d9c1450-0c73-427d-be1c-18f7ff013f00\" (UID: \"0d9c1450-0c73-427d-be1c-18f7ff013f00\") " Nov 23 15:45:03 crc kubenswrapper[5050]: I1123 15:45:03.123761 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tqgft\" (UniqueName: \"kubernetes.io/projected/0d9c1450-0c73-427d-be1c-18f7ff013f00-kube-api-access-tqgft\") pod \"0d9c1450-0c73-427d-be1c-18f7ff013f00\" (UID: \"0d9c1450-0c73-427d-be1c-18f7ff013f00\") " Nov 23 15:45:03 crc kubenswrapper[5050]: I1123 15:45:03.124809 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d9c1450-0c73-427d-be1c-18f7ff013f00-config-volume" (OuterVolumeSpecName: "config-volume") pod "0d9c1450-0c73-427d-be1c-18f7ff013f00" (UID: "0d9c1450-0c73-427d-be1c-18f7ff013f00"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:45:03 crc kubenswrapper[5050]: I1123 15:45:03.132538 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d9c1450-0c73-427d-be1c-18f7ff013f00-kube-api-access-tqgft" (OuterVolumeSpecName: "kube-api-access-tqgft") pod "0d9c1450-0c73-427d-be1c-18f7ff013f00" (UID: "0d9c1450-0c73-427d-be1c-18f7ff013f00"). InnerVolumeSpecName "kube-api-access-tqgft". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:45:03 crc kubenswrapper[5050]: I1123 15:45:03.133162 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d9c1450-0c73-427d-be1c-18f7ff013f00-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "0d9c1450-0c73-427d-be1c-18f7ff013f00" (UID: "0d9c1450-0c73-427d-be1c-18f7ff013f00"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 15:45:03 crc kubenswrapper[5050]: I1123 15:45:03.225408 5050 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0d9c1450-0c73-427d-be1c-18f7ff013f00-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 23 15:45:03 crc kubenswrapper[5050]: I1123 15:45:03.225478 5050 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0d9c1450-0c73-427d-be1c-18f7ff013f00-config-volume\") on node \"crc\" DevicePath \"\"" Nov 23 15:45:03 crc kubenswrapper[5050]: I1123 15:45:03.225496 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tqgft\" (UniqueName: \"kubernetes.io/projected/0d9c1450-0c73-427d-be1c-18f7ff013f00-kube-api-access-tqgft\") on node \"crc\" DevicePath \"\"" Nov 23 15:45:03 crc kubenswrapper[5050]: I1123 15:45:03.549756 5050 scope.go:117] "RemoveContainer" containerID="e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" Nov 23 15:45:03 crc kubenswrapper[5050]: E1123 15:45:03.550573 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:45:03 crc kubenswrapper[5050]: I1123 15:45:03.582130 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398545-p8x8h" event={"ID":"0d9c1450-0c73-427d-be1c-18f7ff013f00","Type":"ContainerDied","Data":"1529fcddd2c79fb2820575f74db844829867061fc6c54f7c92214cb68b7b16b1"} Nov 23 15:45:03 crc kubenswrapper[5050]: I1123 15:45:03.582196 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398545-p8x8h" Nov 23 15:45:03 crc kubenswrapper[5050]: I1123 15:45:03.582206 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1529fcddd2c79fb2820575f74db844829867061fc6c54f7c92214cb68b7b16b1" Nov 23 15:45:04 crc kubenswrapper[5050]: I1123 15:45:04.040128 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398500-97j52"] Nov 23 15:45:04 crc kubenswrapper[5050]: I1123 15:45:04.049870 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398500-97j52"] Nov 23 15:45:05 crc kubenswrapper[5050]: I1123 15:45:05.563388 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e3888fc-521a-4f7d-9f93-74ca490639a2" path="/var/lib/kubelet/pods/0e3888fc-521a-4f7d-9f93-74ca490639a2/volumes" Nov 23 15:45:17 crc kubenswrapper[5050]: I1123 15:45:17.548903 5050 scope.go:117] "RemoveContainer" containerID="e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" Nov 23 15:45:17 crc kubenswrapper[5050]: E1123 15:45:17.550139 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:45:28 crc kubenswrapper[5050]: I1123 15:45:28.549126 5050 scope.go:117] "RemoveContainer" containerID="e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" Nov 23 15:45:28 crc kubenswrapper[5050]: E1123 15:45:28.550496 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:45:43 crc kubenswrapper[5050]: I1123 15:45:43.549185 5050 scope.go:117] "RemoveContainer" containerID="e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" Nov 23 15:45:44 crc kubenswrapper[5050]: I1123 15:45:44.003936 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"c018858fba0ccf920c591a468e0dc80298af3eae62c5b69c8c55966a7a98893b"} Nov 23 15:46:05 crc kubenswrapper[5050]: I1123 15:46:05.462181 5050 scope.go:117] "RemoveContainer" containerID="138037c713ab0fa4192e9ebda67a28af602733a323c753d428a4d54cafb4b619" Nov 23 15:47:44 crc kubenswrapper[5050]: I1123 15:47:44.427406 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-7f7q8"] Nov 23 15:47:44 crc kubenswrapper[5050]: E1123 15:47:44.429025 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d9c1450-0c73-427d-be1c-18f7ff013f00" containerName="collect-profiles" Nov 23 15:47:44 crc kubenswrapper[5050]: I1123 15:47:44.429054 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d9c1450-0c73-427d-be1c-18f7ff013f00" containerName="collect-profiles" Nov 23 15:47:44 crc kubenswrapper[5050]: I1123 15:47:44.429360 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d9c1450-0c73-427d-be1c-18f7ff013f00" containerName="collect-profiles" Nov 23 15:47:44 crc kubenswrapper[5050]: I1123 15:47:44.432725 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7f7q8" Nov 23 15:47:44 crc kubenswrapper[5050]: I1123 15:47:44.455220 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7f7q8"] Nov 23 15:47:44 crc kubenswrapper[5050]: I1123 15:47:44.586787 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/96f5f380-d9c0-423d-9128-29adc764043d-catalog-content\") pod \"redhat-operators-7f7q8\" (UID: \"96f5f380-d9c0-423d-9128-29adc764043d\") " pod="openshift-marketplace/redhat-operators-7f7q8" Nov 23 15:47:44 crc kubenswrapper[5050]: I1123 15:47:44.586850 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/96f5f380-d9c0-423d-9128-29adc764043d-utilities\") pod \"redhat-operators-7f7q8\" (UID: \"96f5f380-d9c0-423d-9128-29adc764043d\") " pod="openshift-marketplace/redhat-operators-7f7q8" Nov 23 15:47:44 crc kubenswrapper[5050]: I1123 15:47:44.587977 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f57hn\" (UniqueName: \"kubernetes.io/projected/96f5f380-d9c0-423d-9128-29adc764043d-kube-api-access-f57hn\") pod \"redhat-operators-7f7q8\" (UID: \"96f5f380-d9c0-423d-9128-29adc764043d\") " pod="openshift-marketplace/redhat-operators-7f7q8" Nov 23 15:47:44 crc kubenswrapper[5050]: I1123 15:47:44.690458 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f57hn\" (UniqueName: \"kubernetes.io/projected/96f5f380-d9c0-423d-9128-29adc764043d-kube-api-access-f57hn\") pod \"redhat-operators-7f7q8\" (UID: \"96f5f380-d9c0-423d-9128-29adc764043d\") " pod="openshift-marketplace/redhat-operators-7f7q8" Nov 23 15:47:44 crc kubenswrapper[5050]: I1123 15:47:44.690544 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/96f5f380-d9c0-423d-9128-29adc764043d-catalog-content\") pod \"redhat-operators-7f7q8\" (UID: \"96f5f380-d9c0-423d-9128-29adc764043d\") " pod="openshift-marketplace/redhat-operators-7f7q8" Nov 23 15:47:44 crc kubenswrapper[5050]: I1123 15:47:44.690567 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/96f5f380-d9c0-423d-9128-29adc764043d-utilities\") pod \"redhat-operators-7f7q8\" (UID: \"96f5f380-d9c0-423d-9128-29adc764043d\") " pod="openshift-marketplace/redhat-operators-7f7q8" Nov 23 15:47:44 crc kubenswrapper[5050]: I1123 15:47:44.691074 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/96f5f380-d9c0-423d-9128-29adc764043d-utilities\") pod \"redhat-operators-7f7q8\" (UID: \"96f5f380-d9c0-423d-9128-29adc764043d\") " pod="openshift-marketplace/redhat-operators-7f7q8" Nov 23 15:47:44 crc kubenswrapper[5050]: I1123 15:47:44.691423 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/96f5f380-d9c0-423d-9128-29adc764043d-catalog-content\") pod \"redhat-operators-7f7q8\" (UID: \"96f5f380-d9c0-423d-9128-29adc764043d\") " pod="openshift-marketplace/redhat-operators-7f7q8" Nov 23 15:47:44 crc kubenswrapper[5050]: I1123 15:47:44.715325 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f57hn\" (UniqueName: \"kubernetes.io/projected/96f5f380-d9c0-423d-9128-29adc764043d-kube-api-access-f57hn\") pod \"redhat-operators-7f7q8\" (UID: \"96f5f380-d9c0-423d-9128-29adc764043d\") " pod="openshift-marketplace/redhat-operators-7f7q8" Nov 23 15:47:44 crc kubenswrapper[5050]: I1123 15:47:44.764074 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7f7q8" Nov 23 15:47:45 crc kubenswrapper[5050]: I1123 15:47:45.236788 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7f7q8"] Nov 23 15:47:45 crc kubenswrapper[5050]: I1123 15:47:45.377974 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7f7q8" event={"ID":"96f5f380-d9c0-423d-9128-29adc764043d","Type":"ContainerStarted","Data":"ca985e3f398ffd3c8c25ca30bec42872df7ca3d97e3dd1816abbad0407821689"} Nov 23 15:47:46 crc kubenswrapper[5050]: I1123 15:47:46.390495 5050 generic.go:334] "Generic (PLEG): container finished" podID="96f5f380-d9c0-423d-9128-29adc764043d" containerID="a01529763b65c20d14eea80140eaea0ffae9c9232646034f333ca90ba6540c7b" exitCode=0 Nov 23 15:47:46 crc kubenswrapper[5050]: I1123 15:47:46.390578 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7f7q8" event={"ID":"96f5f380-d9c0-423d-9128-29adc764043d","Type":"ContainerDied","Data":"a01529763b65c20d14eea80140eaea0ffae9c9232646034f333ca90ba6540c7b"} Nov 23 15:47:46 crc kubenswrapper[5050]: I1123 15:47:46.393956 5050 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 23 15:47:47 crc kubenswrapper[5050]: I1123 15:47:47.402023 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7f7q8" event={"ID":"96f5f380-d9c0-423d-9128-29adc764043d","Type":"ContainerStarted","Data":"e4bc5f625b9cf514d10bcfc46708502c38f6aaff1d606ac3d7d9e19701da0e82"} Nov 23 15:47:48 crc kubenswrapper[5050]: I1123 15:47:48.413309 5050 generic.go:334] "Generic (PLEG): container finished" podID="96f5f380-d9c0-423d-9128-29adc764043d" containerID="e4bc5f625b9cf514d10bcfc46708502c38f6aaff1d606ac3d7d9e19701da0e82" exitCode=0 Nov 23 15:47:48 crc kubenswrapper[5050]: I1123 15:47:48.413463 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7f7q8" event={"ID":"96f5f380-d9c0-423d-9128-29adc764043d","Type":"ContainerDied","Data":"e4bc5f625b9cf514d10bcfc46708502c38f6aaff1d606ac3d7d9e19701da0e82"} Nov 23 15:47:48 crc kubenswrapper[5050]: I1123 15:47:48.589053 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2mw2q"] Nov 23 15:47:48 crc kubenswrapper[5050]: I1123 15:47:48.590870 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2mw2q" Nov 23 15:47:48 crc kubenswrapper[5050]: I1123 15:47:48.611517 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2mw2q"] Nov 23 15:47:48 crc kubenswrapper[5050]: I1123 15:47:48.760526 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nt7hb\" (UniqueName: \"kubernetes.io/projected/c431a122-1b66-4a0b-aaf3-895a1575665d-kube-api-access-nt7hb\") pod \"certified-operators-2mw2q\" (UID: \"c431a122-1b66-4a0b-aaf3-895a1575665d\") " pod="openshift-marketplace/certified-operators-2mw2q" Nov 23 15:47:48 crc kubenswrapper[5050]: I1123 15:47:48.760618 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c431a122-1b66-4a0b-aaf3-895a1575665d-utilities\") pod \"certified-operators-2mw2q\" (UID: \"c431a122-1b66-4a0b-aaf3-895a1575665d\") " pod="openshift-marketplace/certified-operators-2mw2q" Nov 23 15:47:48 crc kubenswrapper[5050]: I1123 15:47:48.760640 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c431a122-1b66-4a0b-aaf3-895a1575665d-catalog-content\") pod \"certified-operators-2mw2q\" (UID: \"c431a122-1b66-4a0b-aaf3-895a1575665d\") " pod="openshift-marketplace/certified-operators-2mw2q" Nov 23 15:47:48 crc kubenswrapper[5050]: I1123 15:47:48.862916 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nt7hb\" (UniqueName: \"kubernetes.io/projected/c431a122-1b66-4a0b-aaf3-895a1575665d-kube-api-access-nt7hb\") pod \"certified-operators-2mw2q\" (UID: \"c431a122-1b66-4a0b-aaf3-895a1575665d\") " pod="openshift-marketplace/certified-operators-2mw2q" Nov 23 15:47:48 crc kubenswrapper[5050]: I1123 15:47:48.863052 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c431a122-1b66-4a0b-aaf3-895a1575665d-utilities\") pod \"certified-operators-2mw2q\" (UID: \"c431a122-1b66-4a0b-aaf3-895a1575665d\") " pod="openshift-marketplace/certified-operators-2mw2q" Nov 23 15:47:48 crc kubenswrapper[5050]: I1123 15:47:48.863089 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c431a122-1b66-4a0b-aaf3-895a1575665d-catalog-content\") pod \"certified-operators-2mw2q\" (UID: \"c431a122-1b66-4a0b-aaf3-895a1575665d\") " pod="openshift-marketplace/certified-operators-2mw2q" Nov 23 15:47:48 crc kubenswrapper[5050]: I1123 15:47:48.864101 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c431a122-1b66-4a0b-aaf3-895a1575665d-utilities\") pod \"certified-operators-2mw2q\" (UID: \"c431a122-1b66-4a0b-aaf3-895a1575665d\") " pod="openshift-marketplace/certified-operators-2mw2q" Nov 23 15:47:48 crc kubenswrapper[5050]: I1123 15:47:48.864195 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c431a122-1b66-4a0b-aaf3-895a1575665d-catalog-content\") pod \"certified-operators-2mw2q\" (UID: \"c431a122-1b66-4a0b-aaf3-895a1575665d\") " pod="openshift-marketplace/certified-operators-2mw2q" Nov 23 15:47:48 crc kubenswrapper[5050]: I1123 15:47:48.899173 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nt7hb\" (UniqueName: \"kubernetes.io/projected/c431a122-1b66-4a0b-aaf3-895a1575665d-kube-api-access-nt7hb\") pod \"certified-operators-2mw2q\" (UID: \"c431a122-1b66-4a0b-aaf3-895a1575665d\") " pod="openshift-marketplace/certified-operators-2mw2q" Nov 23 15:47:48 crc kubenswrapper[5050]: I1123 15:47:48.912665 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2mw2q" Nov 23 15:47:49 crc kubenswrapper[5050]: I1123 15:47:49.423369 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7f7q8" event={"ID":"96f5f380-d9c0-423d-9128-29adc764043d","Type":"ContainerStarted","Data":"8e69672a5d3d739bfd5f3b43c51cb9f632e31b6f99f201ce6c0ea8b4556594f0"} Nov 23 15:47:49 crc kubenswrapper[5050]: I1123 15:47:49.461286 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-7f7q8" podStartSLOduration=2.9592609359999997 podStartE2EDuration="5.461259724s" podCreationTimestamp="2025-11-23 15:47:44 +0000 UTC" firstStartedPulling="2025-11-23 15:47:46.393627906 +0000 UTC m=+3961.560624391" lastFinishedPulling="2025-11-23 15:47:48.895626694 +0000 UTC m=+3964.062623179" observedRunningTime="2025-11-23 15:47:49.456289913 +0000 UTC m=+3964.623286398" watchObservedRunningTime="2025-11-23 15:47:49.461259724 +0000 UTC m=+3964.628256209" Nov 23 15:47:49 crc kubenswrapper[5050]: W1123 15:47:49.469271 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc431a122_1b66_4a0b_aaf3_895a1575665d.slice/crio-200a152f0e9c18369e41fc3019284ef0aca02b5c5db7532f2de6480718cc1d7f WatchSource:0}: Error finding container 200a152f0e9c18369e41fc3019284ef0aca02b5c5db7532f2de6480718cc1d7f: Status 404 returned error can't find the container with id 200a152f0e9c18369e41fc3019284ef0aca02b5c5db7532f2de6480718cc1d7f Nov 23 15:47:49 crc kubenswrapper[5050]: I1123 15:47:49.488884 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2mw2q"] Nov 23 15:47:50 crc kubenswrapper[5050]: I1123 15:47:50.431881 5050 generic.go:334] "Generic (PLEG): container finished" podID="c431a122-1b66-4a0b-aaf3-895a1575665d" containerID="88babc02387315c72f04f6fecc1f77472cd967e67152decaec4d5c0d2d17c471" exitCode=0 Nov 23 15:47:50 crc kubenswrapper[5050]: I1123 15:47:50.431966 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2mw2q" event={"ID":"c431a122-1b66-4a0b-aaf3-895a1575665d","Type":"ContainerDied","Data":"88babc02387315c72f04f6fecc1f77472cd967e67152decaec4d5c0d2d17c471"} Nov 23 15:47:50 crc kubenswrapper[5050]: I1123 15:47:50.432032 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2mw2q" event={"ID":"c431a122-1b66-4a0b-aaf3-895a1575665d","Type":"ContainerStarted","Data":"200a152f0e9c18369e41fc3019284ef0aca02b5c5db7532f2de6480718cc1d7f"} Nov 23 15:47:51 crc kubenswrapper[5050]: I1123 15:47:51.444221 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2mw2q" event={"ID":"c431a122-1b66-4a0b-aaf3-895a1575665d","Type":"ContainerStarted","Data":"876cd4814419cfc61230c38796aa2b2f261c3d0d7e31d6e502241fdbdd59c5de"} Nov 23 15:47:52 crc kubenswrapper[5050]: I1123 15:47:52.455355 5050 generic.go:334] "Generic (PLEG): container finished" podID="c431a122-1b66-4a0b-aaf3-895a1575665d" containerID="876cd4814419cfc61230c38796aa2b2f261c3d0d7e31d6e502241fdbdd59c5de" exitCode=0 Nov 23 15:47:52 crc kubenswrapper[5050]: I1123 15:47:52.455412 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2mw2q" event={"ID":"c431a122-1b66-4a0b-aaf3-895a1575665d","Type":"ContainerDied","Data":"876cd4814419cfc61230c38796aa2b2f261c3d0d7e31d6e502241fdbdd59c5de"} Nov 23 15:47:53 crc kubenswrapper[5050]: I1123 15:47:53.467826 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2mw2q" event={"ID":"c431a122-1b66-4a0b-aaf3-895a1575665d","Type":"ContainerStarted","Data":"da278d55a472f9608509c8e1b170d57e3cb633c49f2d082bb43a369bfa63e758"} Nov 23 15:47:53 crc kubenswrapper[5050]: I1123 15:47:53.499388 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2mw2q" podStartSLOduration=3.078402918 podStartE2EDuration="5.499354457s" podCreationTimestamp="2025-11-23 15:47:48 +0000 UTC" firstStartedPulling="2025-11-23 15:47:50.433711498 +0000 UTC m=+3965.600707983" lastFinishedPulling="2025-11-23 15:47:52.854663037 +0000 UTC m=+3968.021659522" observedRunningTime="2025-11-23 15:47:53.493970074 +0000 UTC m=+3968.660966559" watchObservedRunningTime="2025-11-23 15:47:53.499354457 +0000 UTC m=+3968.666350972" Nov 23 15:47:54 crc kubenswrapper[5050]: I1123 15:47:54.764976 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-7f7q8" Nov 23 15:47:54 crc kubenswrapper[5050]: I1123 15:47:54.765035 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-7f7q8" Nov 23 15:47:55 crc kubenswrapper[5050]: I1123 15:47:55.832424 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-7f7q8" podUID="96f5f380-d9c0-423d-9128-29adc764043d" containerName="registry-server" probeResult="failure" output=< Nov 23 15:47:55 crc kubenswrapper[5050]: timeout: failed to connect service ":50051" within 1s Nov 23 15:47:55 crc kubenswrapper[5050]: > Nov 23 15:47:58 crc kubenswrapper[5050]: I1123 15:47:58.914091 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2mw2q" Nov 23 15:47:58 crc kubenswrapper[5050]: I1123 15:47:58.914708 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2mw2q" Nov 23 15:47:58 crc kubenswrapper[5050]: I1123 15:47:58.988713 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2mw2q" Nov 23 15:47:59 crc kubenswrapper[5050]: I1123 15:47:59.224221 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:47:59 crc kubenswrapper[5050]: I1123 15:47:59.224336 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:47:59 crc kubenswrapper[5050]: I1123 15:47:59.604612 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2mw2q" Nov 23 15:47:59 crc kubenswrapper[5050]: I1123 15:47:59.676221 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2mw2q"] Nov 23 15:48:01 crc kubenswrapper[5050]: I1123 15:48:01.545587 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-2mw2q" podUID="c431a122-1b66-4a0b-aaf3-895a1575665d" containerName="registry-server" containerID="cri-o://da278d55a472f9608509c8e1b170d57e3cb633c49f2d082bb43a369bfa63e758" gracePeriod=2 Nov 23 15:48:02 crc kubenswrapper[5050]: I1123 15:48:02.558736 5050 generic.go:334] "Generic (PLEG): container finished" podID="c431a122-1b66-4a0b-aaf3-895a1575665d" containerID="da278d55a472f9608509c8e1b170d57e3cb633c49f2d082bb43a369bfa63e758" exitCode=0 Nov 23 15:48:02 crc kubenswrapper[5050]: I1123 15:48:02.558805 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2mw2q" event={"ID":"c431a122-1b66-4a0b-aaf3-895a1575665d","Type":"ContainerDied","Data":"da278d55a472f9608509c8e1b170d57e3cb633c49f2d082bb43a369bfa63e758"} Nov 23 15:48:02 crc kubenswrapper[5050]: I1123 15:48:02.558874 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2mw2q" event={"ID":"c431a122-1b66-4a0b-aaf3-895a1575665d","Type":"ContainerDied","Data":"200a152f0e9c18369e41fc3019284ef0aca02b5c5db7532f2de6480718cc1d7f"} Nov 23 15:48:02 crc kubenswrapper[5050]: I1123 15:48:02.558896 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="200a152f0e9c18369e41fc3019284ef0aca02b5c5db7532f2de6480718cc1d7f" Nov 23 15:48:02 crc kubenswrapper[5050]: I1123 15:48:02.581951 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2mw2q" Nov 23 15:48:02 crc kubenswrapper[5050]: I1123 15:48:02.725552 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c431a122-1b66-4a0b-aaf3-895a1575665d-utilities\") pod \"c431a122-1b66-4a0b-aaf3-895a1575665d\" (UID: \"c431a122-1b66-4a0b-aaf3-895a1575665d\") " Nov 23 15:48:02 crc kubenswrapper[5050]: I1123 15:48:02.726213 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nt7hb\" (UniqueName: \"kubernetes.io/projected/c431a122-1b66-4a0b-aaf3-895a1575665d-kube-api-access-nt7hb\") pod \"c431a122-1b66-4a0b-aaf3-895a1575665d\" (UID: \"c431a122-1b66-4a0b-aaf3-895a1575665d\") " Nov 23 15:48:02 crc kubenswrapper[5050]: I1123 15:48:02.726532 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c431a122-1b66-4a0b-aaf3-895a1575665d-catalog-content\") pod \"c431a122-1b66-4a0b-aaf3-895a1575665d\" (UID: \"c431a122-1b66-4a0b-aaf3-895a1575665d\") " Nov 23 15:48:02 crc kubenswrapper[5050]: I1123 15:48:02.727142 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c431a122-1b66-4a0b-aaf3-895a1575665d-utilities" (OuterVolumeSpecName: "utilities") pod "c431a122-1b66-4a0b-aaf3-895a1575665d" (UID: "c431a122-1b66-4a0b-aaf3-895a1575665d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:48:02 crc kubenswrapper[5050]: I1123 15:48:02.737611 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c431a122-1b66-4a0b-aaf3-895a1575665d-kube-api-access-nt7hb" (OuterVolumeSpecName: "kube-api-access-nt7hb") pod "c431a122-1b66-4a0b-aaf3-895a1575665d" (UID: "c431a122-1b66-4a0b-aaf3-895a1575665d"). InnerVolumeSpecName "kube-api-access-nt7hb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:48:02 crc kubenswrapper[5050]: I1123 15:48:02.778133 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c431a122-1b66-4a0b-aaf3-895a1575665d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c431a122-1b66-4a0b-aaf3-895a1575665d" (UID: "c431a122-1b66-4a0b-aaf3-895a1575665d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:48:02 crc kubenswrapper[5050]: I1123 15:48:02.829397 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c431a122-1b66-4a0b-aaf3-895a1575665d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 15:48:02 crc kubenswrapper[5050]: I1123 15:48:02.829498 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c431a122-1b66-4a0b-aaf3-895a1575665d-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 15:48:02 crc kubenswrapper[5050]: I1123 15:48:02.829521 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nt7hb\" (UniqueName: \"kubernetes.io/projected/c431a122-1b66-4a0b-aaf3-895a1575665d-kube-api-access-nt7hb\") on node \"crc\" DevicePath \"\"" Nov 23 15:48:03 crc kubenswrapper[5050]: I1123 15:48:03.566746 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2mw2q" Nov 23 15:48:03 crc kubenswrapper[5050]: I1123 15:48:03.621908 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2mw2q"] Nov 23 15:48:03 crc kubenswrapper[5050]: I1123 15:48:03.632123 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-2mw2q"] Nov 23 15:48:04 crc kubenswrapper[5050]: I1123 15:48:04.838132 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-7f7q8" Nov 23 15:48:04 crc kubenswrapper[5050]: I1123 15:48:04.930193 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-7f7q8" Nov 23 15:48:05 crc kubenswrapper[5050]: I1123 15:48:05.569654 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c431a122-1b66-4a0b-aaf3-895a1575665d" path="/var/lib/kubelet/pods/c431a122-1b66-4a0b-aaf3-895a1575665d/volumes" Nov 23 15:48:05 crc kubenswrapper[5050]: I1123 15:48:05.827528 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7f7q8"] Nov 23 15:48:06 crc kubenswrapper[5050]: I1123 15:48:06.598299 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-7f7q8" podUID="96f5f380-d9c0-423d-9128-29adc764043d" containerName="registry-server" containerID="cri-o://8e69672a5d3d739bfd5f3b43c51cb9f632e31b6f99f201ce6c0ea8b4556594f0" gracePeriod=2 Nov 23 15:48:07 crc kubenswrapper[5050]: I1123 15:48:07.123931 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7f7q8" Nov 23 15:48:07 crc kubenswrapper[5050]: I1123 15:48:07.210167 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f57hn\" (UniqueName: \"kubernetes.io/projected/96f5f380-d9c0-423d-9128-29adc764043d-kube-api-access-f57hn\") pod \"96f5f380-d9c0-423d-9128-29adc764043d\" (UID: \"96f5f380-d9c0-423d-9128-29adc764043d\") " Nov 23 15:48:07 crc kubenswrapper[5050]: I1123 15:48:07.210265 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/96f5f380-d9c0-423d-9128-29adc764043d-utilities\") pod \"96f5f380-d9c0-423d-9128-29adc764043d\" (UID: \"96f5f380-d9c0-423d-9128-29adc764043d\") " Nov 23 15:48:07 crc kubenswrapper[5050]: I1123 15:48:07.210341 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/96f5f380-d9c0-423d-9128-29adc764043d-catalog-content\") pod \"96f5f380-d9c0-423d-9128-29adc764043d\" (UID: \"96f5f380-d9c0-423d-9128-29adc764043d\") " Nov 23 15:48:07 crc kubenswrapper[5050]: I1123 15:48:07.211589 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/96f5f380-d9c0-423d-9128-29adc764043d-utilities" (OuterVolumeSpecName: "utilities") pod "96f5f380-d9c0-423d-9128-29adc764043d" (UID: "96f5f380-d9c0-423d-9128-29adc764043d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:48:07 crc kubenswrapper[5050]: I1123 15:48:07.220943 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96f5f380-d9c0-423d-9128-29adc764043d-kube-api-access-f57hn" (OuterVolumeSpecName: "kube-api-access-f57hn") pod "96f5f380-d9c0-423d-9128-29adc764043d" (UID: "96f5f380-d9c0-423d-9128-29adc764043d"). InnerVolumeSpecName "kube-api-access-f57hn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:48:07 crc kubenswrapper[5050]: I1123 15:48:07.313109 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f57hn\" (UniqueName: \"kubernetes.io/projected/96f5f380-d9c0-423d-9128-29adc764043d-kube-api-access-f57hn\") on node \"crc\" DevicePath \"\"" Nov 23 15:48:07 crc kubenswrapper[5050]: I1123 15:48:07.313184 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/96f5f380-d9c0-423d-9128-29adc764043d-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 15:48:07 crc kubenswrapper[5050]: I1123 15:48:07.333757 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/96f5f380-d9c0-423d-9128-29adc764043d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "96f5f380-d9c0-423d-9128-29adc764043d" (UID: "96f5f380-d9c0-423d-9128-29adc764043d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:48:07 crc kubenswrapper[5050]: I1123 15:48:07.415213 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/96f5f380-d9c0-423d-9128-29adc764043d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 15:48:07 crc kubenswrapper[5050]: I1123 15:48:07.613814 5050 generic.go:334] "Generic (PLEG): container finished" podID="96f5f380-d9c0-423d-9128-29adc764043d" containerID="8e69672a5d3d739bfd5f3b43c51cb9f632e31b6f99f201ce6c0ea8b4556594f0" exitCode=0 Nov 23 15:48:07 crc kubenswrapper[5050]: I1123 15:48:07.613851 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7f7q8" event={"ID":"96f5f380-d9c0-423d-9128-29adc764043d","Type":"ContainerDied","Data":"8e69672a5d3d739bfd5f3b43c51cb9f632e31b6f99f201ce6c0ea8b4556594f0"} Nov 23 15:48:07 crc kubenswrapper[5050]: I1123 15:48:07.614013 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7f7q8" event={"ID":"96f5f380-d9c0-423d-9128-29adc764043d","Type":"ContainerDied","Data":"ca985e3f398ffd3c8c25ca30bec42872df7ca3d97e3dd1816abbad0407821689"} Nov 23 15:48:07 crc kubenswrapper[5050]: I1123 15:48:07.613882 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7f7q8" Nov 23 15:48:07 crc kubenswrapper[5050]: I1123 15:48:07.614064 5050 scope.go:117] "RemoveContainer" containerID="8e69672a5d3d739bfd5f3b43c51cb9f632e31b6f99f201ce6c0ea8b4556594f0" Nov 23 15:48:07 crc kubenswrapper[5050]: I1123 15:48:07.658387 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7f7q8"] Nov 23 15:48:07 crc kubenswrapper[5050]: I1123 15:48:07.668104 5050 scope.go:117] "RemoveContainer" containerID="e4bc5f625b9cf514d10bcfc46708502c38f6aaff1d606ac3d7d9e19701da0e82" Nov 23 15:48:07 crc kubenswrapper[5050]: I1123 15:48:07.669005 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-7f7q8"] Nov 23 15:48:07 crc kubenswrapper[5050]: I1123 15:48:07.722360 5050 scope.go:117] "RemoveContainer" containerID="a01529763b65c20d14eea80140eaea0ffae9c9232646034f333ca90ba6540c7b" Nov 23 15:48:07 crc kubenswrapper[5050]: I1123 15:48:07.770126 5050 scope.go:117] "RemoveContainer" containerID="8e69672a5d3d739bfd5f3b43c51cb9f632e31b6f99f201ce6c0ea8b4556594f0" Nov 23 15:48:07 crc kubenswrapper[5050]: E1123 15:48:07.771289 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e69672a5d3d739bfd5f3b43c51cb9f632e31b6f99f201ce6c0ea8b4556594f0\": container with ID starting with 8e69672a5d3d739bfd5f3b43c51cb9f632e31b6f99f201ce6c0ea8b4556594f0 not found: ID does not exist" containerID="8e69672a5d3d739bfd5f3b43c51cb9f632e31b6f99f201ce6c0ea8b4556594f0" Nov 23 15:48:07 crc kubenswrapper[5050]: I1123 15:48:07.771390 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e69672a5d3d739bfd5f3b43c51cb9f632e31b6f99f201ce6c0ea8b4556594f0"} err="failed to get container status \"8e69672a5d3d739bfd5f3b43c51cb9f632e31b6f99f201ce6c0ea8b4556594f0\": rpc error: code = NotFound desc = could not find container \"8e69672a5d3d739bfd5f3b43c51cb9f632e31b6f99f201ce6c0ea8b4556594f0\": container with ID starting with 8e69672a5d3d739bfd5f3b43c51cb9f632e31b6f99f201ce6c0ea8b4556594f0 not found: ID does not exist" Nov 23 15:48:07 crc kubenswrapper[5050]: I1123 15:48:07.771921 5050 scope.go:117] "RemoveContainer" containerID="e4bc5f625b9cf514d10bcfc46708502c38f6aaff1d606ac3d7d9e19701da0e82" Nov 23 15:48:07 crc kubenswrapper[5050]: E1123 15:48:07.773346 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4bc5f625b9cf514d10bcfc46708502c38f6aaff1d606ac3d7d9e19701da0e82\": container with ID starting with e4bc5f625b9cf514d10bcfc46708502c38f6aaff1d606ac3d7d9e19701da0e82 not found: ID does not exist" containerID="e4bc5f625b9cf514d10bcfc46708502c38f6aaff1d606ac3d7d9e19701da0e82" Nov 23 15:48:07 crc kubenswrapper[5050]: I1123 15:48:07.773417 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4bc5f625b9cf514d10bcfc46708502c38f6aaff1d606ac3d7d9e19701da0e82"} err="failed to get container status \"e4bc5f625b9cf514d10bcfc46708502c38f6aaff1d606ac3d7d9e19701da0e82\": rpc error: code = NotFound desc = could not find container \"e4bc5f625b9cf514d10bcfc46708502c38f6aaff1d606ac3d7d9e19701da0e82\": container with ID starting with e4bc5f625b9cf514d10bcfc46708502c38f6aaff1d606ac3d7d9e19701da0e82 not found: ID does not exist" Nov 23 15:48:07 crc kubenswrapper[5050]: I1123 15:48:07.773495 5050 scope.go:117] "RemoveContainer" containerID="a01529763b65c20d14eea80140eaea0ffae9c9232646034f333ca90ba6540c7b" Nov 23 15:48:07 crc kubenswrapper[5050]: E1123 15:48:07.774142 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a01529763b65c20d14eea80140eaea0ffae9c9232646034f333ca90ba6540c7b\": container with ID starting with a01529763b65c20d14eea80140eaea0ffae9c9232646034f333ca90ba6540c7b not found: ID does not exist" containerID="a01529763b65c20d14eea80140eaea0ffae9c9232646034f333ca90ba6540c7b" Nov 23 15:48:07 crc kubenswrapper[5050]: I1123 15:48:07.774198 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a01529763b65c20d14eea80140eaea0ffae9c9232646034f333ca90ba6540c7b"} err="failed to get container status \"a01529763b65c20d14eea80140eaea0ffae9c9232646034f333ca90ba6540c7b\": rpc error: code = NotFound desc = could not find container \"a01529763b65c20d14eea80140eaea0ffae9c9232646034f333ca90ba6540c7b\": container with ID starting with a01529763b65c20d14eea80140eaea0ffae9c9232646034f333ca90ba6540c7b not found: ID does not exist" Nov 23 15:48:09 crc kubenswrapper[5050]: I1123 15:48:09.563855 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96f5f380-d9c0-423d-9128-29adc764043d" path="/var/lib/kubelet/pods/96f5f380-d9c0-423d-9128-29adc764043d/volumes" Nov 23 15:48:21 crc kubenswrapper[5050]: I1123 15:48:21.541865 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-j7rp8"] Nov 23 15:48:21 crc kubenswrapper[5050]: E1123 15:48:21.543659 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c431a122-1b66-4a0b-aaf3-895a1575665d" containerName="extract-utilities" Nov 23 15:48:21 crc kubenswrapper[5050]: I1123 15:48:21.543699 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="c431a122-1b66-4a0b-aaf3-895a1575665d" containerName="extract-utilities" Nov 23 15:48:21 crc kubenswrapper[5050]: E1123 15:48:21.543739 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96f5f380-d9c0-423d-9128-29adc764043d" containerName="extract-utilities" Nov 23 15:48:21 crc kubenswrapper[5050]: I1123 15:48:21.543762 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="96f5f380-d9c0-423d-9128-29adc764043d" containerName="extract-utilities" Nov 23 15:48:21 crc kubenswrapper[5050]: E1123 15:48:21.543811 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96f5f380-d9c0-423d-9128-29adc764043d" containerName="registry-server" Nov 23 15:48:21 crc kubenswrapper[5050]: I1123 15:48:21.543833 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="96f5f380-d9c0-423d-9128-29adc764043d" containerName="registry-server" Nov 23 15:48:21 crc kubenswrapper[5050]: E1123 15:48:21.543866 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96f5f380-d9c0-423d-9128-29adc764043d" containerName="extract-content" Nov 23 15:48:21 crc kubenswrapper[5050]: I1123 15:48:21.543884 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="96f5f380-d9c0-423d-9128-29adc764043d" containerName="extract-content" Nov 23 15:48:21 crc kubenswrapper[5050]: E1123 15:48:21.543920 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c431a122-1b66-4a0b-aaf3-895a1575665d" containerName="registry-server" Nov 23 15:48:21 crc kubenswrapper[5050]: I1123 15:48:21.543939 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="c431a122-1b66-4a0b-aaf3-895a1575665d" containerName="registry-server" Nov 23 15:48:21 crc kubenswrapper[5050]: E1123 15:48:21.543973 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c431a122-1b66-4a0b-aaf3-895a1575665d" containerName="extract-content" Nov 23 15:48:21 crc kubenswrapper[5050]: I1123 15:48:21.543992 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="c431a122-1b66-4a0b-aaf3-895a1575665d" containerName="extract-content" Nov 23 15:48:21 crc kubenswrapper[5050]: I1123 15:48:21.544424 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="96f5f380-d9c0-423d-9128-29adc764043d" containerName="registry-server" Nov 23 15:48:21 crc kubenswrapper[5050]: I1123 15:48:21.544490 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="c431a122-1b66-4a0b-aaf3-895a1575665d" containerName="registry-server" Nov 23 15:48:21 crc kubenswrapper[5050]: I1123 15:48:21.546420 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j7rp8" Nov 23 15:48:21 crc kubenswrapper[5050]: I1123 15:48:21.565279 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j7rp8"] Nov 23 15:48:21 crc kubenswrapper[5050]: I1123 15:48:21.664286 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6xtzq\" (UniqueName: \"kubernetes.io/projected/656b241f-da87-4616-ad93-a22876a5f5e0-kube-api-access-6xtzq\") pod \"redhat-marketplace-j7rp8\" (UID: \"656b241f-da87-4616-ad93-a22876a5f5e0\") " pod="openshift-marketplace/redhat-marketplace-j7rp8" Nov 23 15:48:21 crc kubenswrapper[5050]: I1123 15:48:21.664343 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/656b241f-da87-4616-ad93-a22876a5f5e0-utilities\") pod \"redhat-marketplace-j7rp8\" (UID: \"656b241f-da87-4616-ad93-a22876a5f5e0\") " pod="openshift-marketplace/redhat-marketplace-j7rp8" Nov 23 15:48:21 crc kubenswrapper[5050]: I1123 15:48:21.664426 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/656b241f-da87-4616-ad93-a22876a5f5e0-catalog-content\") pod \"redhat-marketplace-j7rp8\" (UID: \"656b241f-da87-4616-ad93-a22876a5f5e0\") " pod="openshift-marketplace/redhat-marketplace-j7rp8" Nov 23 15:48:21 crc kubenswrapper[5050]: I1123 15:48:21.766836 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/656b241f-da87-4616-ad93-a22876a5f5e0-catalog-content\") pod \"redhat-marketplace-j7rp8\" (UID: \"656b241f-da87-4616-ad93-a22876a5f5e0\") " pod="openshift-marketplace/redhat-marketplace-j7rp8" Nov 23 15:48:21 crc kubenswrapper[5050]: I1123 15:48:21.766223 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/656b241f-da87-4616-ad93-a22876a5f5e0-catalog-content\") pod \"redhat-marketplace-j7rp8\" (UID: \"656b241f-da87-4616-ad93-a22876a5f5e0\") " pod="openshift-marketplace/redhat-marketplace-j7rp8" Nov 23 15:48:21 crc kubenswrapper[5050]: I1123 15:48:21.767005 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6xtzq\" (UniqueName: \"kubernetes.io/projected/656b241f-da87-4616-ad93-a22876a5f5e0-kube-api-access-6xtzq\") pod \"redhat-marketplace-j7rp8\" (UID: \"656b241f-da87-4616-ad93-a22876a5f5e0\") " pod="openshift-marketplace/redhat-marketplace-j7rp8" Nov 23 15:48:21 crc kubenswrapper[5050]: I1123 15:48:21.767025 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/656b241f-da87-4616-ad93-a22876a5f5e0-utilities\") pod \"redhat-marketplace-j7rp8\" (UID: \"656b241f-da87-4616-ad93-a22876a5f5e0\") " pod="openshift-marketplace/redhat-marketplace-j7rp8" Nov 23 15:48:21 crc kubenswrapper[5050]: I1123 15:48:21.767876 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/656b241f-da87-4616-ad93-a22876a5f5e0-utilities\") pod \"redhat-marketplace-j7rp8\" (UID: \"656b241f-da87-4616-ad93-a22876a5f5e0\") " pod="openshift-marketplace/redhat-marketplace-j7rp8" Nov 23 15:48:21 crc kubenswrapper[5050]: I1123 15:48:21.794913 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6xtzq\" (UniqueName: \"kubernetes.io/projected/656b241f-da87-4616-ad93-a22876a5f5e0-kube-api-access-6xtzq\") pod \"redhat-marketplace-j7rp8\" (UID: \"656b241f-da87-4616-ad93-a22876a5f5e0\") " pod="openshift-marketplace/redhat-marketplace-j7rp8" Nov 23 15:48:21 crc kubenswrapper[5050]: I1123 15:48:21.889972 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j7rp8" Nov 23 15:48:22 crc kubenswrapper[5050]: I1123 15:48:22.165048 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j7rp8"] Nov 23 15:48:22 crc kubenswrapper[5050]: I1123 15:48:22.763565 5050 generic.go:334] "Generic (PLEG): container finished" podID="656b241f-da87-4616-ad93-a22876a5f5e0" containerID="2deb69cf118e06d61a12dabe983a0a3204a7967f1ecb581797eb5b25b927f735" exitCode=0 Nov 23 15:48:22 crc kubenswrapper[5050]: I1123 15:48:22.763676 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j7rp8" event={"ID":"656b241f-da87-4616-ad93-a22876a5f5e0","Type":"ContainerDied","Data":"2deb69cf118e06d61a12dabe983a0a3204a7967f1ecb581797eb5b25b927f735"} Nov 23 15:48:22 crc kubenswrapper[5050]: I1123 15:48:22.763784 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j7rp8" event={"ID":"656b241f-da87-4616-ad93-a22876a5f5e0","Type":"ContainerStarted","Data":"fbc6bbc370e1261b1d3b484687b96f2c86e52ec4161d8d14881be32936e04e5a"} Nov 23 15:48:23 crc kubenswrapper[5050]: I1123 15:48:23.778118 5050 generic.go:334] "Generic (PLEG): container finished" podID="656b241f-da87-4616-ad93-a22876a5f5e0" containerID="9dc9d3df3ff0fee66236c6e0e8fff343bdef9d8e2aae505eb9a78f0cbf232def" exitCode=0 Nov 23 15:48:23 crc kubenswrapper[5050]: I1123 15:48:23.778241 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j7rp8" event={"ID":"656b241f-da87-4616-ad93-a22876a5f5e0","Type":"ContainerDied","Data":"9dc9d3df3ff0fee66236c6e0e8fff343bdef9d8e2aae505eb9a78f0cbf232def"} Nov 23 15:48:24 crc kubenswrapper[5050]: I1123 15:48:24.798906 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j7rp8" event={"ID":"656b241f-da87-4616-ad93-a22876a5f5e0","Type":"ContainerStarted","Data":"a56148089dc384dbf07ce8bffff3c3efc4a14b9f930f9e528c10c96149b70ca2"} Nov 23 15:48:24 crc kubenswrapper[5050]: I1123 15:48:24.833930 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-j7rp8" podStartSLOduration=2.34120914 podStartE2EDuration="3.833904578s" podCreationTimestamp="2025-11-23 15:48:21 +0000 UTC" firstStartedPulling="2025-11-23 15:48:22.768086768 +0000 UTC m=+3997.935083283" lastFinishedPulling="2025-11-23 15:48:24.260782196 +0000 UTC m=+3999.427778721" observedRunningTime="2025-11-23 15:48:24.830672256 +0000 UTC m=+3999.997668771" watchObservedRunningTime="2025-11-23 15:48:24.833904578 +0000 UTC m=+4000.000901093" Nov 23 15:48:29 crc kubenswrapper[5050]: I1123 15:48:29.225057 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:48:29 crc kubenswrapper[5050]: I1123 15:48:29.226309 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:48:31 crc kubenswrapper[5050]: I1123 15:48:31.931543 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-j7rp8" Nov 23 15:48:31 crc kubenswrapper[5050]: I1123 15:48:31.932672 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-j7rp8" Nov 23 15:48:32 crc kubenswrapper[5050]: I1123 15:48:32.019007 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-j7rp8" Nov 23 15:48:32 crc kubenswrapper[5050]: I1123 15:48:32.962968 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-j7rp8" Nov 23 15:48:33 crc kubenswrapper[5050]: I1123 15:48:33.040482 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-j7rp8"] Nov 23 15:48:34 crc kubenswrapper[5050]: I1123 15:48:34.903275 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-j7rp8" podUID="656b241f-da87-4616-ad93-a22876a5f5e0" containerName="registry-server" containerID="cri-o://a56148089dc384dbf07ce8bffff3c3efc4a14b9f930f9e528c10c96149b70ca2" gracePeriod=2 Nov 23 15:48:35 crc kubenswrapper[5050]: I1123 15:48:35.415315 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j7rp8" Nov 23 15:48:35 crc kubenswrapper[5050]: I1123 15:48:35.525593 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/656b241f-da87-4616-ad93-a22876a5f5e0-utilities\") pod \"656b241f-da87-4616-ad93-a22876a5f5e0\" (UID: \"656b241f-da87-4616-ad93-a22876a5f5e0\") " Nov 23 15:48:35 crc kubenswrapper[5050]: I1123 15:48:35.525995 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6xtzq\" (UniqueName: \"kubernetes.io/projected/656b241f-da87-4616-ad93-a22876a5f5e0-kube-api-access-6xtzq\") pod \"656b241f-da87-4616-ad93-a22876a5f5e0\" (UID: \"656b241f-da87-4616-ad93-a22876a5f5e0\") " Nov 23 15:48:35 crc kubenswrapper[5050]: I1123 15:48:35.526086 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/656b241f-da87-4616-ad93-a22876a5f5e0-catalog-content\") pod \"656b241f-da87-4616-ad93-a22876a5f5e0\" (UID: \"656b241f-da87-4616-ad93-a22876a5f5e0\") " Nov 23 15:48:35 crc kubenswrapper[5050]: I1123 15:48:35.527670 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/656b241f-da87-4616-ad93-a22876a5f5e0-utilities" (OuterVolumeSpecName: "utilities") pod "656b241f-da87-4616-ad93-a22876a5f5e0" (UID: "656b241f-da87-4616-ad93-a22876a5f5e0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:48:35 crc kubenswrapper[5050]: I1123 15:48:35.531175 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/656b241f-da87-4616-ad93-a22876a5f5e0-kube-api-access-6xtzq" (OuterVolumeSpecName: "kube-api-access-6xtzq") pod "656b241f-da87-4616-ad93-a22876a5f5e0" (UID: "656b241f-da87-4616-ad93-a22876a5f5e0"). InnerVolumeSpecName "kube-api-access-6xtzq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:48:35 crc kubenswrapper[5050]: I1123 15:48:35.558801 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/656b241f-da87-4616-ad93-a22876a5f5e0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "656b241f-da87-4616-ad93-a22876a5f5e0" (UID: "656b241f-da87-4616-ad93-a22876a5f5e0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:48:35 crc kubenswrapper[5050]: I1123 15:48:35.628507 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/656b241f-da87-4616-ad93-a22876a5f5e0-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 15:48:35 crc kubenswrapper[5050]: I1123 15:48:35.628597 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6xtzq\" (UniqueName: \"kubernetes.io/projected/656b241f-da87-4616-ad93-a22876a5f5e0-kube-api-access-6xtzq\") on node \"crc\" DevicePath \"\"" Nov 23 15:48:35 crc kubenswrapper[5050]: I1123 15:48:35.628612 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/656b241f-da87-4616-ad93-a22876a5f5e0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 15:48:35 crc kubenswrapper[5050]: I1123 15:48:35.916585 5050 generic.go:334] "Generic (PLEG): container finished" podID="656b241f-da87-4616-ad93-a22876a5f5e0" containerID="a56148089dc384dbf07ce8bffff3c3efc4a14b9f930f9e528c10c96149b70ca2" exitCode=0 Nov 23 15:48:35 crc kubenswrapper[5050]: I1123 15:48:35.916708 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j7rp8" Nov 23 15:48:35 crc kubenswrapper[5050]: I1123 15:48:35.916689 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j7rp8" event={"ID":"656b241f-da87-4616-ad93-a22876a5f5e0","Type":"ContainerDied","Data":"a56148089dc384dbf07ce8bffff3c3efc4a14b9f930f9e528c10c96149b70ca2"} Nov 23 15:48:35 crc kubenswrapper[5050]: I1123 15:48:35.916914 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j7rp8" event={"ID":"656b241f-da87-4616-ad93-a22876a5f5e0","Type":"ContainerDied","Data":"fbc6bbc370e1261b1d3b484687b96f2c86e52ec4161d8d14881be32936e04e5a"} Nov 23 15:48:35 crc kubenswrapper[5050]: I1123 15:48:35.916952 5050 scope.go:117] "RemoveContainer" containerID="a56148089dc384dbf07ce8bffff3c3efc4a14b9f930f9e528c10c96149b70ca2" Nov 23 15:48:35 crc kubenswrapper[5050]: I1123 15:48:35.959732 5050 scope.go:117] "RemoveContainer" containerID="9dc9d3df3ff0fee66236c6e0e8fff343bdef9d8e2aae505eb9a78f0cbf232def" Nov 23 15:48:35 crc kubenswrapper[5050]: I1123 15:48:35.967060 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-j7rp8"] Nov 23 15:48:35 crc kubenswrapper[5050]: I1123 15:48:35.976177 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-j7rp8"] Nov 23 15:48:35 crc kubenswrapper[5050]: I1123 15:48:35.995580 5050 scope.go:117] "RemoveContainer" containerID="2deb69cf118e06d61a12dabe983a0a3204a7967f1ecb581797eb5b25b927f735" Nov 23 15:48:36 crc kubenswrapper[5050]: I1123 15:48:36.031750 5050 scope.go:117] "RemoveContainer" containerID="a56148089dc384dbf07ce8bffff3c3efc4a14b9f930f9e528c10c96149b70ca2" Nov 23 15:48:36 crc kubenswrapper[5050]: E1123 15:48:36.032436 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a56148089dc384dbf07ce8bffff3c3efc4a14b9f930f9e528c10c96149b70ca2\": container with ID starting with a56148089dc384dbf07ce8bffff3c3efc4a14b9f930f9e528c10c96149b70ca2 not found: ID does not exist" containerID="a56148089dc384dbf07ce8bffff3c3efc4a14b9f930f9e528c10c96149b70ca2" Nov 23 15:48:36 crc kubenswrapper[5050]: I1123 15:48:36.032493 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a56148089dc384dbf07ce8bffff3c3efc4a14b9f930f9e528c10c96149b70ca2"} err="failed to get container status \"a56148089dc384dbf07ce8bffff3c3efc4a14b9f930f9e528c10c96149b70ca2\": rpc error: code = NotFound desc = could not find container \"a56148089dc384dbf07ce8bffff3c3efc4a14b9f930f9e528c10c96149b70ca2\": container with ID starting with a56148089dc384dbf07ce8bffff3c3efc4a14b9f930f9e528c10c96149b70ca2 not found: ID does not exist" Nov 23 15:48:36 crc kubenswrapper[5050]: I1123 15:48:36.032524 5050 scope.go:117] "RemoveContainer" containerID="9dc9d3df3ff0fee66236c6e0e8fff343bdef9d8e2aae505eb9a78f0cbf232def" Nov 23 15:48:36 crc kubenswrapper[5050]: E1123 15:48:36.033132 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9dc9d3df3ff0fee66236c6e0e8fff343bdef9d8e2aae505eb9a78f0cbf232def\": container with ID starting with 9dc9d3df3ff0fee66236c6e0e8fff343bdef9d8e2aae505eb9a78f0cbf232def not found: ID does not exist" containerID="9dc9d3df3ff0fee66236c6e0e8fff343bdef9d8e2aae505eb9a78f0cbf232def" Nov 23 15:48:36 crc kubenswrapper[5050]: I1123 15:48:36.033351 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9dc9d3df3ff0fee66236c6e0e8fff343bdef9d8e2aae505eb9a78f0cbf232def"} err="failed to get container status \"9dc9d3df3ff0fee66236c6e0e8fff343bdef9d8e2aae505eb9a78f0cbf232def\": rpc error: code = NotFound desc = could not find container \"9dc9d3df3ff0fee66236c6e0e8fff343bdef9d8e2aae505eb9a78f0cbf232def\": container with ID starting with 9dc9d3df3ff0fee66236c6e0e8fff343bdef9d8e2aae505eb9a78f0cbf232def not found: ID does not exist" Nov 23 15:48:36 crc kubenswrapper[5050]: I1123 15:48:36.033410 5050 scope.go:117] "RemoveContainer" containerID="2deb69cf118e06d61a12dabe983a0a3204a7967f1ecb581797eb5b25b927f735" Nov 23 15:48:36 crc kubenswrapper[5050]: E1123 15:48:36.033837 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2deb69cf118e06d61a12dabe983a0a3204a7967f1ecb581797eb5b25b927f735\": container with ID starting with 2deb69cf118e06d61a12dabe983a0a3204a7967f1ecb581797eb5b25b927f735 not found: ID does not exist" containerID="2deb69cf118e06d61a12dabe983a0a3204a7967f1ecb581797eb5b25b927f735" Nov 23 15:48:36 crc kubenswrapper[5050]: I1123 15:48:36.033871 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2deb69cf118e06d61a12dabe983a0a3204a7967f1ecb581797eb5b25b927f735"} err="failed to get container status \"2deb69cf118e06d61a12dabe983a0a3204a7967f1ecb581797eb5b25b927f735\": rpc error: code = NotFound desc = could not find container \"2deb69cf118e06d61a12dabe983a0a3204a7967f1ecb581797eb5b25b927f735\": container with ID starting with 2deb69cf118e06d61a12dabe983a0a3204a7967f1ecb581797eb5b25b927f735 not found: ID does not exist" Nov 23 15:48:37 crc kubenswrapper[5050]: I1123 15:48:37.564204 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="656b241f-da87-4616-ad93-a22876a5f5e0" path="/var/lib/kubelet/pods/656b241f-da87-4616-ad93-a22876a5f5e0/volumes" Nov 23 15:48:45 crc kubenswrapper[5050]: I1123 15:48:45.085932 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6xslw"] Nov 23 15:48:45 crc kubenswrapper[5050]: E1123 15:48:45.087665 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="656b241f-da87-4616-ad93-a22876a5f5e0" containerName="extract-content" Nov 23 15:48:45 crc kubenswrapper[5050]: I1123 15:48:45.087694 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="656b241f-da87-4616-ad93-a22876a5f5e0" containerName="extract-content" Nov 23 15:48:45 crc kubenswrapper[5050]: E1123 15:48:45.087725 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="656b241f-da87-4616-ad93-a22876a5f5e0" containerName="registry-server" Nov 23 15:48:45 crc kubenswrapper[5050]: I1123 15:48:45.087738 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="656b241f-da87-4616-ad93-a22876a5f5e0" containerName="registry-server" Nov 23 15:48:45 crc kubenswrapper[5050]: E1123 15:48:45.087790 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="656b241f-da87-4616-ad93-a22876a5f5e0" containerName="extract-utilities" Nov 23 15:48:45 crc kubenswrapper[5050]: I1123 15:48:45.087807 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="656b241f-da87-4616-ad93-a22876a5f5e0" containerName="extract-utilities" Nov 23 15:48:45 crc kubenswrapper[5050]: I1123 15:48:45.088067 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="656b241f-da87-4616-ad93-a22876a5f5e0" containerName="registry-server" Nov 23 15:48:45 crc kubenswrapper[5050]: I1123 15:48:45.090560 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6xslw" Nov 23 15:48:45 crc kubenswrapper[5050]: I1123 15:48:45.120384 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a68616e-b43e-42b6-89bf-cd875ce46118-utilities\") pod \"community-operators-6xslw\" (UID: \"7a68616e-b43e-42b6-89bf-cd875ce46118\") " pod="openshift-marketplace/community-operators-6xslw" Nov 23 15:48:45 crc kubenswrapper[5050]: I1123 15:48:45.120523 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a68616e-b43e-42b6-89bf-cd875ce46118-catalog-content\") pod \"community-operators-6xslw\" (UID: \"7a68616e-b43e-42b6-89bf-cd875ce46118\") " pod="openshift-marketplace/community-operators-6xslw" Nov 23 15:48:45 crc kubenswrapper[5050]: I1123 15:48:45.120583 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6mkq\" (UniqueName: \"kubernetes.io/projected/7a68616e-b43e-42b6-89bf-cd875ce46118-kube-api-access-c6mkq\") pod \"community-operators-6xslw\" (UID: \"7a68616e-b43e-42b6-89bf-cd875ce46118\") " pod="openshift-marketplace/community-operators-6xslw" Nov 23 15:48:45 crc kubenswrapper[5050]: I1123 15:48:45.124376 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6xslw"] Nov 23 15:48:45 crc kubenswrapper[5050]: I1123 15:48:45.222864 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a68616e-b43e-42b6-89bf-cd875ce46118-catalog-content\") pod \"community-operators-6xslw\" (UID: \"7a68616e-b43e-42b6-89bf-cd875ce46118\") " pod="openshift-marketplace/community-operators-6xslw" Nov 23 15:48:45 crc kubenswrapper[5050]: I1123 15:48:45.222926 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6mkq\" (UniqueName: \"kubernetes.io/projected/7a68616e-b43e-42b6-89bf-cd875ce46118-kube-api-access-c6mkq\") pod \"community-operators-6xslw\" (UID: \"7a68616e-b43e-42b6-89bf-cd875ce46118\") " pod="openshift-marketplace/community-operators-6xslw" Nov 23 15:48:45 crc kubenswrapper[5050]: I1123 15:48:45.223059 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a68616e-b43e-42b6-89bf-cd875ce46118-utilities\") pod \"community-operators-6xslw\" (UID: \"7a68616e-b43e-42b6-89bf-cd875ce46118\") " pod="openshift-marketplace/community-operators-6xslw" Nov 23 15:48:45 crc kubenswrapper[5050]: I1123 15:48:45.223823 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a68616e-b43e-42b6-89bf-cd875ce46118-utilities\") pod \"community-operators-6xslw\" (UID: \"7a68616e-b43e-42b6-89bf-cd875ce46118\") " pod="openshift-marketplace/community-operators-6xslw" Nov 23 15:48:45 crc kubenswrapper[5050]: I1123 15:48:45.223851 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a68616e-b43e-42b6-89bf-cd875ce46118-catalog-content\") pod \"community-operators-6xslw\" (UID: \"7a68616e-b43e-42b6-89bf-cd875ce46118\") " pod="openshift-marketplace/community-operators-6xslw" Nov 23 15:48:45 crc kubenswrapper[5050]: I1123 15:48:45.251020 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6mkq\" (UniqueName: \"kubernetes.io/projected/7a68616e-b43e-42b6-89bf-cd875ce46118-kube-api-access-c6mkq\") pod \"community-operators-6xslw\" (UID: \"7a68616e-b43e-42b6-89bf-cd875ce46118\") " pod="openshift-marketplace/community-operators-6xslw" Nov 23 15:48:45 crc kubenswrapper[5050]: I1123 15:48:45.438516 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6xslw" Nov 23 15:48:45 crc kubenswrapper[5050]: I1123 15:48:45.979552 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6xslw"] Nov 23 15:48:46 crc kubenswrapper[5050]: I1123 15:48:46.031718 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6xslw" event={"ID":"7a68616e-b43e-42b6-89bf-cd875ce46118","Type":"ContainerStarted","Data":"d92e5e304ae2140f1e647c6b2ae06a527da82a22f8f3919ae2e518474fe1cf44"} Nov 23 15:48:47 crc kubenswrapper[5050]: I1123 15:48:47.041870 5050 generic.go:334] "Generic (PLEG): container finished" podID="7a68616e-b43e-42b6-89bf-cd875ce46118" containerID="e1c95f03aeca628f898a01c421e3e168a48c4356206312af5d853deedda5544a" exitCode=0 Nov 23 15:48:47 crc kubenswrapper[5050]: I1123 15:48:47.041925 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6xslw" event={"ID":"7a68616e-b43e-42b6-89bf-cd875ce46118","Type":"ContainerDied","Data":"e1c95f03aeca628f898a01c421e3e168a48c4356206312af5d853deedda5544a"} Nov 23 15:48:48 crc kubenswrapper[5050]: I1123 15:48:48.076701 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6xslw" event={"ID":"7a68616e-b43e-42b6-89bf-cd875ce46118","Type":"ContainerStarted","Data":"0de48d801d968ade7552146a4c5bd6513501e382d1cea211425bdc91bc9d7513"} Nov 23 15:48:49 crc kubenswrapper[5050]: I1123 15:48:49.088382 5050 generic.go:334] "Generic (PLEG): container finished" podID="7a68616e-b43e-42b6-89bf-cd875ce46118" containerID="0de48d801d968ade7552146a4c5bd6513501e382d1cea211425bdc91bc9d7513" exitCode=0 Nov 23 15:48:49 crc kubenswrapper[5050]: I1123 15:48:49.088600 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6xslw" event={"ID":"7a68616e-b43e-42b6-89bf-cd875ce46118","Type":"ContainerDied","Data":"0de48d801d968ade7552146a4c5bd6513501e382d1cea211425bdc91bc9d7513"} Nov 23 15:48:50 crc kubenswrapper[5050]: I1123 15:48:50.101608 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6xslw" event={"ID":"7a68616e-b43e-42b6-89bf-cd875ce46118","Type":"ContainerStarted","Data":"02f6f23e2631fbd8b3303599e39748d4cca626ba6e8aec6b156b591421554697"} Nov 23 15:48:50 crc kubenswrapper[5050]: I1123 15:48:50.129230 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6xslw" podStartSLOduration=2.668400375 podStartE2EDuration="5.129199675s" podCreationTimestamp="2025-11-23 15:48:45 +0000 UTC" firstStartedPulling="2025-11-23 15:48:47.044100222 +0000 UTC m=+4022.211096697" lastFinishedPulling="2025-11-23 15:48:49.504899502 +0000 UTC m=+4024.671895997" observedRunningTime="2025-11-23 15:48:50.121297861 +0000 UTC m=+4025.288294386" watchObservedRunningTime="2025-11-23 15:48:50.129199675 +0000 UTC m=+4025.296196200" Nov 23 15:48:55 crc kubenswrapper[5050]: I1123 15:48:55.439403 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6xslw" Nov 23 15:48:55 crc kubenswrapper[5050]: I1123 15:48:55.440125 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6xslw" Nov 23 15:48:55 crc kubenswrapper[5050]: I1123 15:48:55.516656 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6xslw" Nov 23 15:48:56 crc kubenswrapper[5050]: I1123 15:48:56.235574 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6xslw" Nov 23 15:48:57 crc kubenswrapper[5050]: I1123 15:48:57.264505 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6xslw"] Nov 23 15:48:58 crc kubenswrapper[5050]: I1123 15:48:58.191315 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6xslw" podUID="7a68616e-b43e-42b6-89bf-cd875ce46118" containerName="registry-server" containerID="cri-o://02f6f23e2631fbd8b3303599e39748d4cca626ba6e8aec6b156b591421554697" gracePeriod=2 Nov 23 15:48:58 crc kubenswrapper[5050]: I1123 15:48:58.630233 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6xslw" Nov 23 15:48:58 crc kubenswrapper[5050]: I1123 15:48:58.653355 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c6mkq\" (UniqueName: \"kubernetes.io/projected/7a68616e-b43e-42b6-89bf-cd875ce46118-kube-api-access-c6mkq\") pod \"7a68616e-b43e-42b6-89bf-cd875ce46118\" (UID: \"7a68616e-b43e-42b6-89bf-cd875ce46118\") " Nov 23 15:48:58 crc kubenswrapper[5050]: I1123 15:48:58.653525 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a68616e-b43e-42b6-89bf-cd875ce46118-utilities\") pod \"7a68616e-b43e-42b6-89bf-cd875ce46118\" (UID: \"7a68616e-b43e-42b6-89bf-cd875ce46118\") " Nov 23 15:48:58 crc kubenswrapper[5050]: I1123 15:48:58.653588 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a68616e-b43e-42b6-89bf-cd875ce46118-catalog-content\") pod \"7a68616e-b43e-42b6-89bf-cd875ce46118\" (UID: \"7a68616e-b43e-42b6-89bf-cd875ce46118\") " Nov 23 15:48:58 crc kubenswrapper[5050]: I1123 15:48:58.655797 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a68616e-b43e-42b6-89bf-cd875ce46118-utilities" (OuterVolumeSpecName: "utilities") pod "7a68616e-b43e-42b6-89bf-cd875ce46118" (UID: "7a68616e-b43e-42b6-89bf-cd875ce46118"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:48:58 crc kubenswrapper[5050]: I1123 15:48:58.666864 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a68616e-b43e-42b6-89bf-cd875ce46118-kube-api-access-c6mkq" (OuterVolumeSpecName: "kube-api-access-c6mkq") pod "7a68616e-b43e-42b6-89bf-cd875ce46118" (UID: "7a68616e-b43e-42b6-89bf-cd875ce46118"). InnerVolumeSpecName "kube-api-access-c6mkq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:48:58 crc kubenswrapper[5050]: I1123 15:48:58.755187 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c6mkq\" (UniqueName: \"kubernetes.io/projected/7a68616e-b43e-42b6-89bf-cd875ce46118-kube-api-access-c6mkq\") on node \"crc\" DevicePath \"\"" Nov 23 15:48:58 crc kubenswrapper[5050]: I1123 15:48:58.755252 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a68616e-b43e-42b6-89bf-cd875ce46118-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 15:48:58 crc kubenswrapper[5050]: I1123 15:48:58.822207 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a68616e-b43e-42b6-89bf-cd875ce46118-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7a68616e-b43e-42b6-89bf-cd875ce46118" (UID: "7a68616e-b43e-42b6-89bf-cd875ce46118"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:48:58 crc kubenswrapper[5050]: I1123 15:48:58.855867 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a68616e-b43e-42b6-89bf-cd875ce46118-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 15:48:59 crc kubenswrapper[5050]: I1123 15:48:59.217712 5050 generic.go:334] "Generic (PLEG): container finished" podID="7a68616e-b43e-42b6-89bf-cd875ce46118" containerID="02f6f23e2631fbd8b3303599e39748d4cca626ba6e8aec6b156b591421554697" exitCode=0 Nov 23 15:48:59 crc kubenswrapper[5050]: I1123 15:48:59.217793 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6xslw" Nov 23 15:48:59 crc kubenswrapper[5050]: I1123 15:48:59.217817 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6xslw" event={"ID":"7a68616e-b43e-42b6-89bf-cd875ce46118","Type":"ContainerDied","Data":"02f6f23e2631fbd8b3303599e39748d4cca626ba6e8aec6b156b591421554697"} Nov 23 15:48:59 crc kubenswrapper[5050]: I1123 15:48:59.218403 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6xslw" event={"ID":"7a68616e-b43e-42b6-89bf-cd875ce46118","Type":"ContainerDied","Data":"d92e5e304ae2140f1e647c6b2ae06a527da82a22f8f3919ae2e518474fe1cf44"} Nov 23 15:48:59 crc kubenswrapper[5050]: I1123 15:48:59.218503 5050 scope.go:117] "RemoveContainer" containerID="02f6f23e2631fbd8b3303599e39748d4cca626ba6e8aec6b156b591421554697" Nov 23 15:48:59 crc kubenswrapper[5050]: I1123 15:48:59.225025 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:48:59 crc kubenswrapper[5050]: I1123 15:48:59.225131 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:48:59 crc kubenswrapper[5050]: I1123 15:48:59.225215 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 15:48:59 crc kubenswrapper[5050]: I1123 15:48:59.226664 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c018858fba0ccf920c591a468e0dc80298af3eae62c5b69c8c55966a7a98893b"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 15:48:59 crc kubenswrapper[5050]: I1123 15:48:59.226813 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://c018858fba0ccf920c591a468e0dc80298af3eae62c5b69c8c55966a7a98893b" gracePeriod=600 Nov 23 15:48:59 crc kubenswrapper[5050]: I1123 15:48:59.273506 5050 scope.go:117] "RemoveContainer" containerID="0de48d801d968ade7552146a4c5bd6513501e382d1cea211425bdc91bc9d7513" Nov 23 15:48:59 crc kubenswrapper[5050]: I1123 15:48:59.292155 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6xslw"] Nov 23 15:48:59 crc kubenswrapper[5050]: I1123 15:48:59.302727 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6xslw"] Nov 23 15:48:59 crc kubenswrapper[5050]: I1123 15:48:59.309893 5050 scope.go:117] "RemoveContainer" containerID="e1c95f03aeca628f898a01c421e3e168a48c4356206312af5d853deedda5544a" Nov 23 15:48:59 crc kubenswrapper[5050]: I1123 15:48:59.339634 5050 scope.go:117] "RemoveContainer" containerID="02f6f23e2631fbd8b3303599e39748d4cca626ba6e8aec6b156b591421554697" Nov 23 15:48:59 crc kubenswrapper[5050]: E1123 15:48:59.340567 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02f6f23e2631fbd8b3303599e39748d4cca626ba6e8aec6b156b591421554697\": container with ID starting with 02f6f23e2631fbd8b3303599e39748d4cca626ba6e8aec6b156b591421554697 not found: ID does not exist" containerID="02f6f23e2631fbd8b3303599e39748d4cca626ba6e8aec6b156b591421554697" Nov 23 15:48:59 crc kubenswrapper[5050]: I1123 15:48:59.340638 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02f6f23e2631fbd8b3303599e39748d4cca626ba6e8aec6b156b591421554697"} err="failed to get container status \"02f6f23e2631fbd8b3303599e39748d4cca626ba6e8aec6b156b591421554697\": rpc error: code = NotFound desc = could not find container \"02f6f23e2631fbd8b3303599e39748d4cca626ba6e8aec6b156b591421554697\": container with ID starting with 02f6f23e2631fbd8b3303599e39748d4cca626ba6e8aec6b156b591421554697 not found: ID does not exist" Nov 23 15:48:59 crc kubenswrapper[5050]: I1123 15:48:59.340686 5050 scope.go:117] "RemoveContainer" containerID="0de48d801d968ade7552146a4c5bd6513501e382d1cea211425bdc91bc9d7513" Nov 23 15:48:59 crc kubenswrapper[5050]: E1123 15:48:59.341135 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0de48d801d968ade7552146a4c5bd6513501e382d1cea211425bdc91bc9d7513\": container with ID starting with 0de48d801d968ade7552146a4c5bd6513501e382d1cea211425bdc91bc9d7513 not found: ID does not exist" containerID="0de48d801d968ade7552146a4c5bd6513501e382d1cea211425bdc91bc9d7513" Nov 23 15:48:59 crc kubenswrapper[5050]: I1123 15:48:59.341186 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0de48d801d968ade7552146a4c5bd6513501e382d1cea211425bdc91bc9d7513"} err="failed to get container status \"0de48d801d968ade7552146a4c5bd6513501e382d1cea211425bdc91bc9d7513\": rpc error: code = NotFound desc = could not find container \"0de48d801d968ade7552146a4c5bd6513501e382d1cea211425bdc91bc9d7513\": container with ID starting with 0de48d801d968ade7552146a4c5bd6513501e382d1cea211425bdc91bc9d7513 not found: ID does not exist" Nov 23 15:48:59 crc kubenswrapper[5050]: I1123 15:48:59.341215 5050 scope.go:117] "RemoveContainer" containerID="e1c95f03aeca628f898a01c421e3e168a48c4356206312af5d853deedda5544a" Nov 23 15:48:59 crc kubenswrapper[5050]: E1123 15:48:59.341951 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1c95f03aeca628f898a01c421e3e168a48c4356206312af5d853deedda5544a\": container with ID starting with e1c95f03aeca628f898a01c421e3e168a48c4356206312af5d853deedda5544a not found: ID does not exist" containerID="e1c95f03aeca628f898a01c421e3e168a48c4356206312af5d853deedda5544a" Nov 23 15:48:59 crc kubenswrapper[5050]: I1123 15:48:59.341997 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1c95f03aeca628f898a01c421e3e168a48c4356206312af5d853deedda5544a"} err="failed to get container status \"e1c95f03aeca628f898a01c421e3e168a48c4356206312af5d853deedda5544a\": rpc error: code = NotFound desc = could not find container \"e1c95f03aeca628f898a01c421e3e168a48c4356206312af5d853deedda5544a\": container with ID starting with e1c95f03aeca628f898a01c421e3e168a48c4356206312af5d853deedda5544a not found: ID does not exist" Nov 23 15:48:59 crc kubenswrapper[5050]: I1123 15:48:59.564706 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a68616e-b43e-42b6-89bf-cd875ce46118" path="/var/lib/kubelet/pods/7a68616e-b43e-42b6-89bf-cd875ce46118/volumes" Nov 23 15:49:00 crc kubenswrapper[5050]: I1123 15:49:00.231854 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="c018858fba0ccf920c591a468e0dc80298af3eae62c5b69c8c55966a7a98893b" exitCode=0 Nov 23 15:49:00 crc kubenswrapper[5050]: I1123 15:49:00.231989 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"c018858fba0ccf920c591a468e0dc80298af3eae62c5b69c8c55966a7a98893b"} Nov 23 15:49:00 crc kubenswrapper[5050]: I1123 15:49:00.232598 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb"} Nov 23 15:49:00 crc kubenswrapper[5050]: I1123 15:49:00.232637 5050 scope.go:117] "RemoveContainer" containerID="e80820057d7c13424a9a485b7e6bd85579aafd40a2db7241b0ec47e8ef9054e7" Nov 23 15:50:59 crc kubenswrapper[5050]: I1123 15:50:59.224577 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:50:59 crc kubenswrapper[5050]: I1123 15:50:59.225515 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:51:29 crc kubenswrapper[5050]: I1123 15:51:29.229157 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:51:29 crc kubenswrapper[5050]: I1123 15:51:29.230282 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:51:59 crc kubenswrapper[5050]: I1123 15:51:59.225036 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:51:59 crc kubenswrapper[5050]: I1123 15:51:59.226027 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:51:59 crc kubenswrapper[5050]: I1123 15:51:59.226108 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 15:51:59 crc kubenswrapper[5050]: I1123 15:51:59.226931 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 15:51:59 crc kubenswrapper[5050]: I1123 15:51:59.227038 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" gracePeriod=600 Nov 23 15:51:59 crc kubenswrapper[5050]: E1123 15:51:59.361420 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:51:59 crc kubenswrapper[5050]: I1123 15:51:59.377231 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" exitCode=0 Nov 23 15:51:59 crc kubenswrapper[5050]: I1123 15:51:59.377296 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb"} Nov 23 15:51:59 crc kubenswrapper[5050]: I1123 15:51:59.377345 5050 scope.go:117] "RemoveContainer" containerID="c018858fba0ccf920c591a468e0dc80298af3eae62c5b69c8c55966a7a98893b" Nov 23 15:51:59 crc kubenswrapper[5050]: I1123 15:51:59.379586 5050 scope.go:117] "RemoveContainer" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" Nov 23 15:51:59 crc kubenswrapper[5050]: E1123 15:51:59.380243 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:52:13 crc kubenswrapper[5050]: I1123 15:52:13.549195 5050 scope.go:117] "RemoveContainer" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" Nov 23 15:52:13 crc kubenswrapper[5050]: E1123 15:52:13.550659 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:52:28 crc kubenswrapper[5050]: I1123 15:52:28.549061 5050 scope.go:117] "RemoveContainer" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" Nov 23 15:52:28 crc kubenswrapper[5050]: E1123 15:52:28.550744 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:52:40 crc kubenswrapper[5050]: I1123 15:52:40.550260 5050 scope.go:117] "RemoveContainer" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" Nov 23 15:52:40 crc kubenswrapper[5050]: E1123 15:52:40.551531 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:52:51 crc kubenswrapper[5050]: I1123 15:52:51.550949 5050 scope.go:117] "RemoveContainer" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" Nov 23 15:52:51 crc kubenswrapper[5050]: E1123 15:52:51.552323 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:53:04 crc kubenswrapper[5050]: I1123 15:53:04.548959 5050 scope.go:117] "RemoveContainer" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" Nov 23 15:53:04 crc kubenswrapper[5050]: E1123 15:53:04.550206 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:53:15 crc kubenswrapper[5050]: I1123 15:53:15.557589 5050 scope.go:117] "RemoveContainer" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" Nov 23 15:53:15 crc kubenswrapper[5050]: E1123 15:53:15.558661 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:53:28 crc kubenswrapper[5050]: I1123 15:53:28.548856 5050 scope.go:117] "RemoveContainer" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" Nov 23 15:53:28 crc kubenswrapper[5050]: E1123 15:53:28.550367 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:53:41 crc kubenswrapper[5050]: I1123 15:53:41.549654 5050 scope.go:117] "RemoveContainer" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" Nov 23 15:53:41 crc kubenswrapper[5050]: E1123 15:53:41.550952 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:53:52 crc kubenswrapper[5050]: I1123 15:53:52.549690 5050 scope.go:117] "RemoveContainer" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" Nov 23 15:53:52 crc kubenswrapper[5050]: E1123 15:53:52.551053 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:54:05 crc kubenswrapper[5050]: I1123 15:54:05.760592 5050 scope.go:117] "RemoveContainer" containerID="876cd4814419cfc61230c38796aa2b2f261c3d0d7e31d6e502241fdbdd59c5de" Nov 23 15:54:05 crc kubenswrapper[5050]: I1123 15:54:05.807775 5050 scope.go:117] "RemoveContainer" containerID="88babc02387315c72f04f6fecc1f77472cd967e67152decaec4d5c0d2d17c471" Nov 23 15:54:05 crc kubenswrapper[5050]: I1123 15:54:05.854855 5050 scope.go:117] "RemoveContainer" containerID="da278d55a472f9608509c8e1b170d57e3cb633c49f2d082bb43a369bfa63e758" Nov 23 15:54:06 crc kubenswrapper[5050]: I1123 15:54:06.549575 5050 scope.go:117] "RemoveContainer" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" Nov 23 15:54:06 crc kubenswrapper[5050]: E1123 15:54:06.550247 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:54:19 crc kubenswrapper[5050]: I1123 15:54:19.548559 5050 scope.go:117] "RemoveContainer" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" Nov 23 15:54:19 crc kubenswrapper[5050]: E1123 15:54:19.549662 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:54:31 crc kubenswrapper[5050]: I1123 15:54:31.549149 5050 scope.go:117] "RemoveContainer" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" Nov 23 15:54:31 crc kubenswrapper[5050]: E1123 15:54:31.550491 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:54:44 crc kubenswrapper[5050]: I1123 15:54:44.549403 5050 scope.go:117] "RemoveContainer" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" Nov 23 15:54:44 crc kubenswrapper[5050]: E1123 15:54:44.550635 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:54:55 crc kubenswrapper[5050]: I1123 15:54:55.558109 5050 scope.go:117] "RemoveContainer" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" Nov 23 15:54:55 crc kubenswrapper[5050]: E1123 15:54:55.559619 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:55:07 crc kubenswrapper[5050]: I1123 15:55:07.549305 5050 scope.go:117] "RemoveContainer" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" Nov 23 15:55:07 crc kubenswrapper[5050]: E1123 15:55:07.550489 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:55:18 crc kubenswrapper[5050]: I1123 15:55:18.548955 5050 scope.go:117] "RemoveContainer" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" Nov 23 15:55:18 crc kubenswrapper[5050]: E1123 15:55:18.550111 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:55:30 crc kubenswrapper[5050]: I1123 15:55:30.548866 5050 scope.go:117] "RemoveContainer" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" Nov 23 15:55:30 crc kubenswrapper[5050]: E1123 15:55:30.550499 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:55:32 crc kubenswrapper[5050]: I1123 15:55:32.190907 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-cnd8p"] Nov 23 15:55:32 crc kubenswrapper[5050]: I1123 15:55:32.201758 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-cnd8p"] Nov 23 15:55:32 crc kubenswrapper[5050]: I1123 15:55:32.349440 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-bc9sb"] Nov 23 15:55:32 crc kubenswrapper[5050]: E1123 15:55:32.350105 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a68616e-b43e-42b6-89bf-cd875ce46118" containerName="extract-content" Nov 23 15:55:32 crc kubenswrapper[5050]: I1123 15:55:32.350147 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a68616e-b43e-42b6-89bf-cd875ce46118" containerName="extract-content" Nov 23 15:55:32 crc kubenswrapper[5050]: E1123 15:55:32.350199 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a68616e-b43e-42b6-89bf-cd875ce46118" containerName="registry-server" Nov 23 15:55:32 crc kubenswrapper[5050]: I1123 15:55:32.350217 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a68616e-b43e-42b6-89bf-cd875ce46118" containerName="registry-server" Nov 23 15:55:32 crc kubenswrapper[5050]: E1123 15:55:32.350250 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a68616e-b43e-42b6-89bf-cd875ce46118" containerName="extract-utilities" Nov 23 15:55:32 crc kubenswrapper[5050]: I1123 15:55:32.350268 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a68616e-b43e-42b6-89bf-cd875ce46118" containerName="extract-utilities" Nov 23 15:55:32 crc kubenswrapper[5050]: I1123 15:55:32.350706 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a68616e-b43e-42b6-89bf-cd875ce46118" containerName="registry-server" Nov 23 15:55:32 crc kubenswrapper[5050]: I1123 15:55:32.351832 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-bc9sb" Nov 23 15:55:32 crc kubenswrapper[5050]: I1123 15:55:32.357996 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Nov 23 15:55:32 crc kubenswrapper[5050]: I1123 15:55:32.359138 5050 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-c7dg6" Nov 23 15:55:32 crc kubenswrapper[5050]: I1123 15:55:32.359718 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Nov 23 15:55:32 crc kubenswrapper[5050]: I1123 15:55:32.359747 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Nov 23 15:55:32 crc kubenswrapper[5050]: I1123 15:55:32.361268 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-bc9sb"] Nov 23 15:55:32 crc kubenswrapper[5050]: I1123 15:55:32.507631 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/cf4736a0-f0b8-414d-a15b-8de6b46ed9d3-crc-storage\") pod \"crc-storage-crc-bc9sb\" (UID: \"cf4736a0-f0b8-414d-a15b-8de6b46ed9d3\") " pod="crc-storage/crc-storage-crc-bc9sb" Nov 23 15:55:32 crc kubenswrapper[5050]: I1123 15:55:32.507732 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jld7\" (UniqueName: \"kubernetes.io/projected/cf4736a0-f0b8-414d-a15b-8de6b46ed9d3-kube-api-access-4jld7\") pod \"crc-storage-crc-bc9sb\" (UID: \"cf4736a0-f0b8-414d-a15b-8de6b46ed9d3\") " pod="crc-storage/crc-storage-crc-bc9sb" Nov 23 15:55:32 crc kubenswrapper[5050]: I1123 15:55:32.507822 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/cf4736a0-f0b8-414d-a15b-8de6b46ed9d3-node-mnt\") pod \"crc-storage-crc-bc9sb\" (UID: \"cf4736a0-f0b8-414d-a15b-8de6b46ed9d3\") " pod="crc-storage/crc-storage-crc-bc9sb" Nov 23 15:55:32 crc kubenswrapper[5050]: I1123 15:55:32.610307 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/cf4736a0-f0b8-414d-a15b-8de6b46ed9d3-crc-storage\") pod \"crc-storage-crc-bc9sb\" (UID: \"cf4736a0-f0b8-414d-a15b-8de6b46ed9d3\") " pod="crc-storage/crc-storage-crc-bc9sb" Nov 23 15:55:32 crc kubenswrapper[5050]: I1123 15:55:32.610436 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jld7\" (UniqueName: \"kubernetes.io/projected/cf4736a0-f0b8-414d-a15b-8de6b46ed9d3-kube-api-access-4jld7\") pod \"crc-storage-crc-bc9sb\" (UID: \"cf4736a0-f0b8-414d-a15b-8de6b46ed9d3\") " pod="crc-storage/crc-storage-crc-bc9sb" Nov 23 15:55:32 crc kubenswrapper[5050]: I1123 15:55:32.610524 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/cf4736a0-f0b8-414d-a15b-8de6b46ed9d3-node-mnt\") pod \"crc-storage-crc-bc9sb\" (UID: \"cf4736a0-f0b8-414d-a15b-8de6b46ed9d3\") " pod="crc-storage/crc-storage-crc-bc9sb" Nov 23 15:55:32 crc kubenswrapper[5050]: I1123 15:55:32.611135 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/cf4736a0-f0b8-414d-a15b-8de6b46ed9d3-node-mnt\") pod \"crc-storage-crc-bc9sb\" (UID: \"cf4736a0-f0b8-414d-a15b-8de6b46ed9d3\") " pod="crc-storage/crc-storage-crc-bc9sb" Nov 23 15:55:32 crc kubenswrapper[5050]: I1123 15:55:32.611722 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/cf4736a0-f0b8-414d-a15b-8de6b46ed9d3-crc-storage\") pod \"crc-storage-crc-bc9sb\" (UID: \"cf4736a0-f0b8-414d-a15b-8de6b46ed9d3\") " pod="crc-storage/crc-storage-crc-bc9sb" Nov 23 15:55:32 crc kubenswrapper[5050]: I1123 15:55:32.651839 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jld7\" (UniqueName: \"kubernetes.io/projected/cf4736a0-f0b8-414d-a15b-8de6b46ed9d3-kube-api-access-4jld7\") pod \"crc-storage-crc-bc9sb\" (UID: \"cf4736a0-f0b8-414d-a15b-8de6b46ed9d3\") " pod="crc-storage/crc-storage-crc-bc9sb" Nov 23 15:55:32 crc kubenswrapper[5050]: I1123 15:55:32.691878 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-bc9sb" Nov 23 15:55:33 crc kubenswrapper[5050]: I1123 15:55:33.204592 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-bc9sb"] Nov 23 15:55:33 crc kubenswrapper[5050]: I1123 15:55:33.216354 5050 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 23 15:55:33 crc kubenswrapper[5050]: I1123 15:55:33.567395 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a413b4e9-0e2b-4026-a585-cc7931dc856d" path="/var/lib/kubelet/pods/a413b4e9-0e2b-4026-a585-cc7931dc856d/volumes" Nov 23 15:55:33 crc kubenswrapper[5050]: I1123 15:55:33.724463 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-bc9sb" event={"ID":"cf4736a0-f0b8-414d-a15b-8de6b46ed9d3","Type":"ContainerStarted","Data":"a64f8f9d8b0448a0feee8a87bb42c859dace3a2c537ca5132726f8178ef2621d"} Nov 23 15:55:35 crc kubenswrapper[5050]: I1123 15:55:35.751612 5050 generic.go:334] "Generic (PLEG): container finished" podID="cf4736a0-f0b8-414d-a15b-8de6b46ed9d3" containerID="cf148f11c2448ed887db614fa53c16566f4b8284694e122f0d289638f341b499" exitCode=0 Nov 23 15:55:35 crc kubenswrapper[5050]: I1123 15:55:35.752224 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-bc9sb" event={"ID":"cf4736a0-f0b8-414d-a15b-8de6b46ed9d3","Type":"ContainerDied","Data":"cf148f11c2448ed887db614fa53c16566f4b8284694e122f0d289638f341b499"} Nov 23 15:55:37 crc kubenswrapper[5050]: I1123 15:55:37.159720 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-bc9sb" Nov 23 15:55:37 crc kubenswrapper[5050]: I1123 15:55:37.318808 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/cf4736a0-f0b8-414d-a15b-8de6b46ed9d3-crc-storage\") pod \"cf4736a0-f0b8-414d-a15b-8de6b46ed9d3\" (UID: \"cf4736a0-f0b8-414d-a15b-8de6b46ed9d3\") " Nov 23 15:55:37 crc kubenswrapper[5050]: I1123 15:55:37.319020 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4jld7\" (UniqueName: \"kubernetes.io/projected/cf4736a0-f0b8-414d-a15b-8de6b46ed9d3-kube-api-access-4jld7\") pod \"cf4736a0-f0b8-414d-a15b-8de6b46ed9d3\" (UID: \"cf4736a0-f0b8-414d-a15b-8de6b46ed9d3\") " Nov 23 15:55:37 crc kubenswrapper[5050]: I1123 15:55:37.319089 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/cf4736a0-f0b8-414d-a15b-8de6b46ed9d3-node-mnt\") pod \"cf4736a0-f0b8-414d-a15b-8de6b46ed9d3\" (UID: \"cf4736a0-f0b8-414d-a15b-8de6b46ed9d3\") " Nov 23 15:55:37 crc kubenswrapper[5050]: I1123 15:55:37.319849 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cf4736a0-f0b8-414d-a15b-8de6b46ed9d3-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "cf4736a0-f0b8-414d-a15b-8de6b46ed9d3" (UID: "cf4736a0-f0b8-414d-a15b-8de6b46ed9d3"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 15:55:37 crc kubenswrapper[5050]: I1123 15:55:37.320112 5050 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/cf4736a0-f0b8-414d-a15b-8de6b46ed9d3-node-mnt\") on node \"crc\" DevicePath \"\"" Nov 23 15:55:37 crc kubenswrapper[5050]: I1123 15:55:37.328670 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf4736a0-f0b8-414d-a15b-8de6b46ed9d3-kube-api-access-4jld7" (OuterVolumeSpecName: "kube-api-access-4jld7") pod "cf4736a0-f0b8-414d-a15b-8de6b46ed9d3" (UID: "cf4736a0-f0b8-414d-a15b-8de6b46ed9d3"). InnerVolumeSpecName "kube-api-access-4jld7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:55:37 crc kubenswrapper[5050]: I1123 15:55:37.341395 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf4736a0-f0b8-414d-a15b-8de6b46ed9d3-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "cf4736a0-f0b8-414d-a15b-8de6b46ed9d3" (UID: "cf4736a0-f0b8-414d-a15b-8de6b46ed9d3"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:55:37 crc kubenswrapper[5050]: I1123 15:55:37.422905 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4jld7\" (UniqueName: \"kubernetes.io/projected/cf4736a0-f0b8-414d-a15b-8de6b46ed9d3-kube-api-access-4jld7\") on node \"crc\" DevicePath \"\"" Nov 23 15:55:37 crc kubenswrapper[5050]: I1123 15:55:37.422962 5050 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/cf4736a0-f0b8-414d-a15b-8de6b46ed9d3-crc-storage\") on node \"crc\" DevicePath \"\"" Nov 23 15:55:37 crc kubenswrapper[5050]: I1123 15:55:37.776432 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-bc9sb" event={"ID":"cf4736a0-f0b8-414d-a15b-8de6b46ed9d3","Type":"ContainerDied","Data":"a64f8f9d8b0448a0feee8a87bb42c859dace3a2c537ca5132726f8178ef2621d"} Nov 23 15:55:37 crc kubenswrapper[5050]: I1123 15:55:37.776590 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a64f8f9d8b0448a0feee8a87bb42c859dace3a2c537ca5132726f8178ef2621d" Nov 23 15:55:37 crc kubenswrapper[5050]: I1123 15:55:37.776739 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-bc9sb" Nov 23 15:55:39 crc kubenswrapper[5050]: I1123 15:55:39.808523 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-bc9sb"] Nov 23 15:55:39 crc kubenswrapper[5050]: I1123 15:55:39.816861 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-bc9sb"] Nov 23 15:55:39 crc kubenswrapper[5050]: I1123 15:55:39.935192 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-76dcc"] Nov 23 15:55:39 crc kubenswrapper[5050]: E1123 15:55:39.935879 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf4736a0-f0b8-414d-a15b-8de6b46ed9d3" containerName="storage" Nov 23 15:55:39 crc kubenswrapper[5050]: I1123 15:55:39.935913 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf4736a0-f0b8-414d-a15b-8de6b46ed9d3" containerName="storage" Nov 23 15:55:39 crc kubenswrapper[5050]: I1123 15:55:39.938204 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf4736a0-f0b8-414d-a15b-8de6b46ed9d3" containerName="storage" Nov 23 15:55:39 crc kubenswrapper[5050]: I1123 15:55:39.939357 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-76dcc" Nov 23 15:55:39 crc kubenswrapper[5050]: I1123 15:55:39.942130 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Nov 23 15:55:39 crc kubenswrapper[5050]: I1123 15:55:39.942916 5050 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-c7dg6" Nov 23 15:55:39 crc kubenswrapper[5050]: I1123 15:55:39.943158 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Nov 23 15:55:39 crc kubenswrapper[5050]: I1123 15:55:39.943859 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Nov 23 15:55:39 crc kubenswrapper[5050]: I1123 15:55:39.953614 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-76dcc"] Nov 23 15:55:39 crc kubenswrapper[5050]: I1123 15:55:39.966856 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c-crc-storage\") pod \"crc-storage-crc-76dcc\" (UID: \"b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c\") " pod="crc-storage/crc-storage-crc-76dcc" Nov 23 15:55:39 crc kubenswrapper[5050]: I1123 15:55:39.966980 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c-node-mnt\") pod \"crc-storage-crc-76dcc\" (UID: \"b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c\") " pod="crc-storage/crc-storage-crc-76dcc" Nov 23 15:55:39 crc kubenswrapper[5050]: I1123 15:55:39.967033 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phw26\" (UniqueName: \"kubernetes.io/projected/b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c-kube-api-access-phw26\") pod \"crc-storage-crc-76dcc\" (UID: \"b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c\") " pod="crc-storage/crc-storage-crc-76dcc" Nov 23 15:55:40 crc kubenswrapper[5050]: I1123 15:55:40.069106 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c-node-mnt\") pod \"crc-storage-crc-76dcc\" (UID: \"b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c\") " pod="crc-storage/crc-storage-crc-76dcc" Nov 23 15:55:40 crc kubenswrapper[5050]: I1123 15:55:40.069208 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phw26\" (UniqueName: \"kubernetes.io/projected/b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c-kube-api-access-phw26\") pod \"crc-storage-crc-76dcc\" (UID: \"b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c\") " pod="crc-storage/crc-storage-crc-76dcc" Nov 23 15:55:40 crc kubenswrapper[5050]: I1123 15:55:40.069400 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c-crc-storage\") pod \"crc-storage-crc-76dcc\" (UID: \"b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c\") " pod="crc-storage/crc-storage-crc-76dcc" Nov 23 15:55:40 crc kubenswrapper[5050]: I1123 15:55:40.069554 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c-node-mnt\") pod \"crc-storage-crc-76dcc\" (UID: \"b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c\") " pod="crc-storage/crc-storage-crc-76dcc" Nov 23 15:55:40 crc kubenswrapper[5050]: I1123 15:55:40.070886 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c-crc-storage\") pod \"crc-storage-crc-76dcc\" (UID: \"b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c\") " pod="crc-storage/crc-storage-crc-76dcc" Nov 23 15:55:40 crc kubenswrapper[5050]: I1123 15:55:40.094933 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phw26\" (UniqueName: \"kubernetes.io/projected/b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c-kube-api-access-phw26\") pod \"crc-storage-crc-76dcc\" (UID: \"b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c\") " pod="crc-storage/crc-storage-crc-76dcc" Nov 23 15:55:40 crc kubenswrapper[5050]: I1123 15:55:40.271752 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-76dcc" Nov 23 15:55:40 crc kubenswrapper[5050]: I1123 15:55:40.749963 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-76dcc"] Nov 23 15:55:40 crc kubenswrapper[5050]: I1123 15:55:40.818322 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-76dcc" event={"ID":"b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c","Type":"ContainerStarted","Data":"d23f2ffd4b98bc3892ca6c332e4662b85702ceaaf97a0ad8382223f1a2616b5e"} Nov 23 15:55:41 crc kubenswrapper[5050]: I1123 15:55:41.564432 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf4736a0-f0b8-414d-a15b-8de6b46ed9d3" path="/var/lib/kubelet/pods/cf4736a0-f0b8-414d-a15b-8de6b46ed9d3/volumes" Nov 23 15:55:41 crc kubenswrapper[5050]: I1123 15:55:41.832666 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-76dcc" event={"ID":"b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c","Type":"ContainerStarted","Data":"593da474dd445d278f18aabf00e4b1ac3c6c3577f611d45e32c49d1a24983995"} Nov 23 15:55:41 crc kubenswrapper[5050]: I1123 15:55:41.861731 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="crc-storage/crc-storage-crc-76dcc" podStartSLOduration=2.44057848 podStartE2EDuration="2.86170079s" podCreationTimestamp="2025-11-23 15:55:39 +0000 UTC" firstStartedPulling="2025-11-23 15:55:40.764415186 +0000 UTC m=+4435.931411681" lastFinishedPulling="2025-11-23 15:55:41.185537456 +0000 UTC m=+4436.352533991" observedRunningTime="2025-11-23 15:55:41.859320933 +0000 UTC m=+4437.026317458" watchObservedRunningTime="2025-11-23 15:55:41.86170079 +0000 UTC m=+4437.028697305" Nov 23 15:55:42 crc kubenswrapper[5050]: I1123 15:55:42.845156 5050 generic.go:334] "Generic (PLEG): container finished" podID="b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c" containerID="593da474dd445d278f18aabf00e4b1ac3c6c3577f611d45e32c49d1a24983995" exitCode=0 Nov 23 15:55:42 crc kubenswrapper[5050]: I1123 15:55:42.845222 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-76dcc" event={"ID":"b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c","Type":"ContainerDied","Data":"593da474dd445d278f18aabf00e4b1ac3c6c3577f611d45e32c49d1a24983995"} Nov 23 15:55:44 crc kubenswrapper[5050]: I1123 15:55:44.548579 5050 scope.go:117] "RemoveContainer" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" Nov 23 15:55:44 crc kubenswrapper[5050]: E1123 15:55:44.549546 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:55:44 crc kubenswrapper[5050]: I1123 15:55:44.935780 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-76dcc" Nov 23 15:55:44 crc kubenswrapper[5050]: I1123 15:55:44.964607 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c-node-mnt\") pod \"b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c\" (UID: \"b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c\") " Nov 23 15:55:44 crc kubenswrapper[5050]: I1123 15:55:44.964782 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c-crc-storage\") pod \"b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c\" (UID: \"b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c\") " Nov 23 15:55:44 crc kubenswrapper[5050]: I1123 15:55:44.964796 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c" (UID: "b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 15:55:44 crc kubenswrapper[5050]: I1123 15:55:44.964921 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-phw26\" (UniqueName: \"kubernetes.io/projected/b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c-kube-api-access-phw26\") pod \"b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c\" (UID: \"b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c\") " Nov 23 15:55:44 crc kubenswrapper[5050]: I1123 15:55:44.965478 5050 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c-node-mnt\") on node \"crc\" DevicePath \"\"" Nov 23 15:55:45 crc kubenswrapper[5050]: I1123 15:55:45.003343 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c-kube-api-access-phw26" (OuterVolumeSpecName: "kube-api-access-phw26") pod "b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c" (UID: "b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c"). InnerVolumeSpecName "kube-api-access-phw26". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:55:45 crc kubenswrapper[5050]: I1123 15:55:45.005198 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c" (UID: "b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:55:45 crc kubenswrapper[5050]: I1123 15:55:45.068671 5050 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c-crc-storage\") on node \"crc\" DevicePath \"\"" Nov 23 15:55:45 crc kubenswrapper[5050]: I1123 15:55:45.068748 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-phw26\" (UniqueName: \"kubernetes.io/projected/b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c-kube-api-access-phw26\") on node \"crc\" DevicePath \"\"" Nov 23 15:55:45 crc kubenswrapper[5050]: I1123 15:55:45.879002 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-76dcc" event={"ID":"b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c","Type":"ContainerDied","Data":"d23f2ffd4b98bc3892ca6c332e4662b85702ceaaf97a0ad8382223f1a2616b5e"} Nov 23 15:55:45 crc kubenswrapper[5050]: I1123 15:55:45.879438 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d23f2ffd4b98bc3892ca6c332e4662b85702ceaaf97a0ad8382223f1a2616b5e" Nov 23 15:55:45 crc kubenswrapper[5050]: I1123 15:55:45.879324 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-76dcc" Nov 23 15:55:55 crc kubenswrapper[5050]: I1123 15:55:55.554064 5050 scope.go:117] "RemoveContainer" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" Nov 23 15:55:55 crc kubenswrapper[5050]: E1123 15:55:55.555196 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:56:05 crc kubenswrapper[5050]: I1123 15:56:05.956034 5050 scope.go:117] "RemoveContainer" containerID="7d37866eed4c545e2928239320928a33397fe13f24921be0726982094684866e" Nov 23 15:56:08 crc kubenswrapper[5050]: I1123 15:56:08.549711 5050 scope.go:117] "RemoveContainer" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" Nov 23 15:56:08 crc kubenswrapper[5050]: E1123 15:56:08.550405 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:56:19 crc kubenswrapper[5050]: I1123 15:56:19.549972 5050 scope.go:117] "RemoveContainer" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" Nov 23 15:56:19 crc kubenswrapper[5050]: E1123 15:56:19.550982 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:56:33 crc kubenswrapper[5050]: I1123 15:56:33.550660 5050 scope.go:117] "RemoveContainer" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" Nov 23 15:56:33 crc kubenswrapper[5050]: E1123 15:56:33.551707 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:56:45 crc kubenswrapper[5050]: I1123 15:56:45.563803 5050 scope.go:117] "RemoveContainer" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" Nov 23 15:56:45 crc kubenswrapper[5050]: E1123 15:56:45.565062 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:56:57 crc kubenswrapper[5050]: I1123 15:56:57.549072 5050 scope.go:117] "RemoveContainer" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" Nov 23 15:56:57 crc kubenswrapper[5050]: E1123 15:56:57.549981 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 15:57:09 crc kubenswrapper[5050]: I1123 15:57:09.548708 5050 scope.go:117] "RemoveContainer" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" Nov 23 15:57:10 crc kubenswrapper[5050]: I1123 15:57:10.702972 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"c18d191ff51dd31497250d2492a5d834c9d4fb3c3d8d531c739172043c30b828"} Nov 23 15:57:44 crc kubenswrapper[5050]: I1123 15:57:44.873244 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5ffb9"] Nov 23 15:57:44 crc kubenswrapper[5050]: E1123 15:57:44.874489 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c" containerName="storage" Nov 23 15:57:44 crc kubenswrapper[5050]: I1123 15:57:44.874505 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c" containerName="storage" Nov 23 15:57:44 crc kubenswrapper[5050]: I1123 15:57:44.874733 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7f5bf7f-538e-4e68-89eb-d6a0ff9e336c" containerName="storage" Nov 23 15:57:44 crc kubenswrapper[5050]: I1123 15:57:44.878918 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5ffb9" Nov 23 15:57:44 crc kubenswrapper[5050]: I1123 15:57:44.908192 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5ffb9"] Nov 23 15:57:44 crc kubenswrapper[5050]: I1123 15:57:44.937232 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d08d0ea3-36c3-4d31-ab45-50054765ff11-catalog-content\") pod \"redhat-operators-5ffb9\" (UID: \"d08d0ea3-36c3-4d31-ab45-50054765ff11\") " pod="openshift-marketplace/redhat-operators-5ffb9" Nov 23 15:57:44 crc kubenswrapper[5050]: I1123 15:57:44.937651 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h6wz9\" (UniqueName: \"kubernetes.io/projected/d08d0ea3-36c3-4d31-ab45-50054765ff11-kube-api-access-h6wz9\") pod \"redhat-operators-5ffb9\" (UID: \"d08d0ea3-36c3-4d31-ab45-50054765ff11\") " pod="openshift-marketplace/redhat-operators-5ffb9" Nov 23 15:57:44 crc kubenswrapper[5050]: I1123 15:57:44.937817 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d08d0ea3-36c3-4d31-ab45-50054765ff11-utilities\") pod \"redhat-operators-5ffb9\" (UID: \"d08d0ea3-36c3-4d31-ab45-50054765ff11\") " pod="openshift-marketplace/redhat-operators-5ffb9" Nov 23 15:57:45 crc kubenswrapper[5050]: I1123 15:57:45.039004 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h6wz9\" (UniqueName: \"kubernetes.io/projected/d08d0ea3-36c3-4d31-ab45-50054765ff11-kube-api-access-h6wz9\") pod \"redhat-operators-5ffb9\" (UID: \"d08d0ea3-36c3-4d31-ab45-50054765ff11\") " pod="openshift-marketplace/redhat-operators-5ffb9" Nov 23 15:57:45 crc kubenswrapper[5050]: I1123 15:57:45.039071 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d08d0ea3-36c3-4d31-ab45-50054765ff11-utilities\") pod \"redhat-operators-5ffb9\" (UID: \"d08d0ea3-36c3-4d31-ab45-50054765ff11\") " pod="openshift-marketplace/redhat-operators-5ffb9" Nov 23 15:57:45 crc kubenswrapper[5050]: I1123 15:57:45.039139 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d08d0ea3-36c3-4d31-ab45-50054765ff11-catalog-content\") pod \"redhat-operators-5ffb9\" (UID: \"d08d0ea3-36c3-4d31-ab45-50054765ff11\") " pod="openshift-marketplace/redhat-operators-5ffb9" Nov 23 15:57:45 crc kubenswrapper[5050]: I1123 15:57:45.039768 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d08d0ea3-36c3-4d31-ab45-50054765ff11-catalog-content\") pod \"redhat-operators-5ffb9\" (UID: \"d08d0ea3-36c3-4d31-ab45-50054765ff11\") " pod="openshift-marketplace/redhat-operators-5ffb9" Nov 23 15:57:45 crc kubenswrapper[5050]: I1123 15:57:45.040002 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d08d0ea3-36c3-4d31-ab45-50054765ff11-utilities\") pod \"redhat-operators-5ffb9\" (UID: \"d08d0ea3-36c3-4d31-ab45-50054765ff11\") " pod="openshift-marketplace/redhat-operators-5ffb9" Nov 23 15:57:45 crc kubenswrapper[5050]: I1123 15:57:45.079345 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h6wz9\" (UniqueName: \"kubernetes.io/projected/d08d0ea3-36c3-4d31-ab45-50054765ff11-kube-api-access-h6wz9\") pod \"redhat-operators-5ffb9\" (UID: \"d08d0ea3-36c3-4d31-ab45-50054765ff11\") " pod="openshift-marketplace/redhat-operators-5ffb9" Nov 23 15:57:45 crc kubenswrapper[5050]: I1123 15:57:45.215669 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5ffb9" Nov 23 15:57:45 crc kubenswrapper[5050]: I1123 15:57:45.709584 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5ffb9"] Nov 23 15:57:46 crc kubenswrapper[5050]: I1123 15:57:46.098186 5050 generic.go:334] "Generic (PLEG): container finished" podID="d08d0ea3-36c3-4d31-ab45-50054765ff11" containerID="e7637b6e3006791ee9545c57a19489145626bf4ad0baed9702315ef7c1d5aad0" exitCode=0 Nov 23 15:57:46 crc kubenswrapper[5050]: I1123 15:57:46.098349 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5ffb9" event={"ID":"d08d0ea3-36c3-4d31-ab45-50054765ff11","Type":"ContainerDied","Data":"e7637b6e3006791ee9545c57a19489145626bf4ad0baed9702315ef7c1d5aad0"} Nov 23 15:57:46 crc kubenswrapper[5050]: I1123 15:57:46.098627 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5ffb9" event={"ID":"d08d0ea3-36c3-4d31-ab45-50054765ff11","Type":"ContainerStarted","Data":"420c522d799beaa64309341166af942943d60ccae1942f8c5acbf9925dfa66a0"} Nov 23 15:57:47 crc kubenswrapper[5050]: I1123 15:57:47.112025 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5ffb9" event={"ID":"d08d0ea3-36c3-4d31-ab45-50054765ff11","Type":"ContainerStarted","Data":"a3bb22c02df4e02329a5556689eeac00f0772a83b722ab0186243677d48581a6"} Nov 23 15:57:48 crc kubenswrapper[5050]: I1123 15:57:48.126436 5050 generic.go:334] "Generic (PLEG): container finished" podID="d08d0ea3-36c3-4d31-ab45-50054765ff11" containerID="a3bb22c02df4e02329a5556689eeac00f0772a83b722ab0186243677d48581a6" exitCode=0 Nov 23 15:57:48 crc kubenswrapper[5050]: I1123 15:57:48.126539 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5ffb9" event={"ID":"d08d0ea3-36c3-4d31-ab45-50054765ff11","Type":"ContainerDied","Data":"a3bb22c02df4e02329a5556689eeac00f0772a83b722ab0186243677d48581a6"} Nov 23 15:57:49 crc kubenswrapper[5050]: I1123 15:57:49.138435 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5ffb9" event={"ID":"d08d0ea3-36c3-4d31-ab45-50054765ff11","Type":"ContainerStarted","Data":"f6a9375b4846ee4b9127bd3b3192d904d9b4d940b21d0e35f8dee80c1cba4e60"} Nov 23 15:57:49 crc kubenswrapper[5050]: I1123 15:57:49.171750 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5ffb9" podStartSLOduration=2.707019558 podStartE2EDuration="5.171716998s" podCreationTimestamp="2025-11-23 15:57:44 +0000 UTC" firstStartedPulling="2025-11-23 15:57:46.100940359 +0000 UTC m=+4561.267936844" lastFinishedPulling="2025-11-23 15:57:48.565637759 +0000 UTC m=+4563.732634284" observedRunningTime="2025-11-23 15:57:49.171289966 +0000 UTC m=+4564.338286511" watchObservedRunningTime="2025-11-23 15:57:49.171716998 +0000 UTC m=+4564.338713523" Nov 23 15:57:52 crc kubenswrapper[5050]: I1123 15:57:52.245096 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-66v2t"] Nov 23 15:57:52 crc kubenswrapper[5050]: I1123 15:57:52.247401 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-66v2t" Nov 23 15:57:52 crc kubenswrapper[5050]: I1123 15:57:52.274402 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-66v2t"] Nov 23 15:57:52 crc kubenswrapper[5050]: I1123 15:57:52.297209 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ea779d8-101f-4ddb-9590-c05ea86749b6-utilities\") pod \"certified-operators-66v2t\" (UID: \"3ea779d8-101f-4ddb-9590-c05ea86749b6\") " pod="openshift-marketplace/certified-operators-66v2t" Nov 23 15:57:52 crc kubenswrapper[5050]: I1123 15:57:52.297399 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8t2d4\" (UniqueName: \"kubernetes.io/projected/3ea779d8-101f-4ddb-9590-c05ea86749b6-kube-api-access-8t2d4\") pod \"certified-operators-66v2t\" (UID: \"3ea779d8-101f-4ddb-9590-c05ea86749b6\") " pod="openshift-marketplace/certified-operators-66v2t" Nov 23 15:57:52 crc kubenswrapper[5050]: I1123 15:57:52.297628 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ea779d8-101f-4ddb-9590-c05ea86749b6-catalog-content\") pod \"certified-operators-66v2t\" (UID: \"3ea779d8-101f-4ddb-9590-c05ea86749b6\") " pod="openshift-marketplace/certified-operators-66v2t" Nov 23 15:57:52 crc kubenswrapper[5050]: I1123 15:57:52.399041 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ea779d8-101f-4ddb-9590-c05ea86749b6-utilities\") pod \"certified-operators-66v2t\" (UID: \"3ea779d8-101f-4ddb-9590-c05ea86749b6\") " pod="openshift-marketplace/certified-operators-66v2t" Nov 23 15:57:52 crc kubenswrapper[5050]: I1123 15:57:52.399151 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8t2d4\" (UniqueName: \"kubernetes.io/projected/3ea779d8-101f-4ddb-9590-c05ea86749b6-kube-api-access-8t2d4\") pod \"certified-operators-66v2t\" (UID: \"3ea779d8-101f-4ddb-9590-c05ea86749b6\") " pod="openshift-marketplace/certified-operators-66v2t" Nov 23 15:57:52 crc kubenswrapper[5050]: I1123 15:57:52.399237 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ea779d8-101f-4ddb-9590-c05ea86749b6-catalog-content\") pod \"certified-operators-66v2t\" (UID: \"3ea779d8-101f-4ddb-9590-c05ea86749b6\") " pod="openshift-marketplace/certified-operators-66v2t" Nov 23 15:57:52 crc kubenswrapper[5050]: I1123 15:57:52.399905 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ea779d8-101f-4ddb-9590-c05ea86749b6-utilities\") pod \"certified-operators-66v2t\" (UID: \"3ea779d8-101f-4ddb-9590-c05ea86749b6\") " pod="openshift-marketplace/certified-operators-66v2t" Nov 23 15:57:52 crc kubenswrapper[5050]: I1123 15:57:52.400030 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ea779d8-101f-4ddb-9590-c05ea86749b6-catalog-content\") pod \"certified-operators-66v2t\" (UID: \"3ea779d8-101f-4ddb-9590-c05ea86749b6\") " pod="openshift-marketplace/certified-operators-66v2t" Nov 23 15:57:52 crc kubenswrapper[5050]: I1123 15:57:52.427817 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8t2d4\" (UniqueName: \"kubernetes.io/projected/3ea779d8-101f-4ddb-9590-c05ea86749b6-kube-api-access-8t2d4\") pod \"certified-operators-66v2t\" (UID: \"3ea779d8-101f-4ddb-9590-c05ea86749b6\") " pod="openshift-marketplace/certified-operators-66v2t" Nov 23 15:57:52 crc kubenswrapper[5050]: I1123 15:57:52.610484 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-66v2t" Nov 23 15:57:53 crc kubenswrapper[5050]: I1123 15:57:53.120940 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-66v2t"] Nov 23 15:57:53 crc kubenswrapper[5050]: I1123 15:57:53.176479 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-66v2t" event={"ID":"3ea779d8-101f-4ddb-9590-c05ea86749b6","Type":"ContainerStarted","Data":"fcdb7cbb3c434c356a5060ec16ad5fea5ecd0f43695ea024029aba5d729f8847"} Nov 23 15:57:54 crc kubenswrapper[5050]: I1123 15:57:54.197930 5050 generic.go:334] "Generic (PLEG): container finished" podID="3ea779d8-101f-4ddb-9590-c05ea86749b6" containerID="1bf01272b28c34bda46df2c969aae624b7c4d6f9240ed0a262eeb2ee97f6ba6e" exitCode=0 Nov 23 15:57:54 crc kubenswrapper[5050]: I1123 15:57:54.198197 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-66v2t" event={"ID":"3ea779d8-101f-4ddb-9590-c05ea86749b6","Type":"ContainerDied","Data":"1bf01272b28c34bda46df2c969aae624b7c4d6f9240ed0a262eeb2ee97f6ba6e"} Nov 23 15:57:55 crc kubenswrapper[5050]: I1123 15:57:55.216394 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5ffb9" Nov 23 15:57:55 crc kubenswrapper[5050]: I1123 15:57:55.216915 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5ffb9" Nov 23 15:57:56 crc kubenswrapper[5050]: I1123 15:57:56.223659 5050 generic.go:334] "Generic (PLEG): container finished" podID="3ea779d8-101f-4ddb-9590-c05ea86749b6" containerID="4ac233fdb650d985592afa100bb855a6a41a71613b7a18bbb8c40ae56b25d8af" exitCode=0 Nov 23 15:57:56 crc kubenswrapper[5050]: I1123 15:57:56.223744 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-66v2t" event={"ID":"3ea779d8-101f-4ddb-9590-c05ea86749b6","Type":"ContainerDied","Data":"4ac233fdb650d985592afa100bb855a6a41a71613b7a18bbb8c40ae56b25d8af"} Nov 23 15:57:56 crc kubenswrapper[5050]: I1123 15:57:56.282118 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5ffb9" podUID="d08d0ea3-36c3-4d31-ab45-50054765ff11" containerName="registry-server" probeResult="failure" output=< Nov 23 15:57:56 crc kubenswrapper[5050]: timeout: failed to connect service ":50051" within 1s Nov 23 15:57:56 crc kubenswrapper[5050]: > Nov 23 15:57:57 crc kubenswrapper[5050]: I1123 15:57:57.239705 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-66v2t" event={"ID":"3ea779d8-101f-4ddb-9590-c05ea86749b6","Type":"ContainerStarted","Data":"d96442e79d90b9cc96d70e3785861572f77fbb3286d5b249b43e7c8f2b881ffd"} Nov 23 15:57:57 crc kubenswrapper[5050]: I1123 15:57:57.269956 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-66v2t" podStartSLOduration=2.83116388 podStartE2EDuration="5.269926766s" podCreationTimestamp="2025-11-23 15:57:52 +0000 UTC" firstStartedPulling="2025-11-23 15:57:54.203040596 +0000 UTC m=+4569.370037121" lastFinishedPulling="2025-11-23 15:57:56.641803512 +0000 UTC m=+4571.808800007" observedRunningTime="2025-11-23 15:57:57.263809283 +0000 UTC m=+4572.430805778" watchObservedRunningTime="2025-11-23 15:57:57.269926766 +0000 UTC m=+4572.436923261" Nov 23 15:58:02 crc kubenswrapper[5050]: I1123 15:58:02.611393 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-66v2t" Nov 23 15:58:02 crc kubenswrapper[5050]: I1123 15:58:02.612049 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-66v2t" Nov 23 15:58:02 crc kubenswrapper[5050]: I1123 15:58:02.680610 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-66v2t" Nov 23 15:58:03 crc kubenswrapper[5050]: I1123 15:58:03.382873 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-66v2t" Nov 23 15:58:03 crc kubenswrapper[5050]: I1123 15:58:03.460664 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-66v2t"] Nov 23 15:58:05 crc kubenswrapper[5050]: I1123 15:58:05.275834 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5ffb9" Nov 23 15:58:05 crc kubenswrapper[5050]: I1123 15:58:05.327721 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-66v2t" podUID="3ea779d8-101f-4ddb-9590-c05ea86749b6" containerName="registry-server" containerID="cri-o://d96442e79d90b9cc96d70e3785861572f77fbb3286d5b249b43e7c8f2b881ffd" gracePeriod=2 Nov 23 15:58:05 crc kubenswrapper[5050]: I1123 15:58:05.369592 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5ffb9" Nov 23 15:58:05 crc kubenswrapper[5050]: I1123 15:58:05.957749 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-66v2t" Nov 23 15:58:06 crc kubenswrapper[5050]: I1123 15:58:06.081659 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ea779d8-101f-4ddb-9590-c05ea86749b6-utilities\") pod \"3ea779d8-101f-4ddb-9590-c05ea86749b6\" (UID: \"3ea779d8-101f-4ddb-9590-c05ea86749b6\") " Nov 23 15:58:06 crc kubenswrapper[5050]: I1123 15:58:06.082168 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8t2d4\" (UniqueName: \"kubernetes.io/projected/3ea779d8-101f-4ddb-9590-c05ea86749b6-kube-api-access-8t2d4\") pod \"3ea779d8-101f-4ddb-9590-c05ea86749b6\" (UID: \"3ea779d8-101f-4ddb-9590-c05ea86749b6\") " Nov 23 15:58:06 crc kubenswrapper[5050]: I1123 15:58:06.082192 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ea779d8-101f-4ddb-9590-c05ea86749b6-catalog-content\") pod \"3ea779d8-101f-4ddb-9590-c05ea86749b6\" (UID: \"3ea779d8-101f-4ddb-9590-c05ea86749b6\") " Nov 23 15:58:06 crc kubenswrapper[5050]: I1123 15:58:06.084770 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ea779d8-101f-4ddb-9590-c05ea86749b6-utilities" (OuterVolumeSpecName: "utilities") pod "3ea779d8-101f-4ddb-9590-c05ea86749b6" (UID: "3ea779d8-101f-4ddb-9590-c05ea86749b6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:58:06 crc kubenswrapper[5050]: I1123 15:58:06.092382 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ea779d8-101f-4ddb-9590-c05ea86749b6-kube-api-access-8t2d4" (OuterVolumeSpecName: "kube-api-access-8t2d4") pod "3ea779d8-101f-4ddb-9590-c05ea86749b6" (UID: "3ea779d8-101f-4ddb-9590-c05ea86749b6"). InnerVolumeSpecName "kube-api-access-8t2d4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:58:06 crc kubenswrapper[5050]: I1123 15:58:06.154276 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ea779d8-101f-4ddb-9590-c05ea86749b6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3ea779d8-101f-4ddb-9590-c05ea86749b6" (UID: "3ea779d8-101f-4ddb-9590-c05ea86749b6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:58:06 crc kubenswrapper[5050]: I1123 15:58:06.184723 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ea779d8-101f-4ddb-9590-c05ea86749b6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 15:58:06 crc kubenswrapper[5050]: I1123 15:58:06.184778 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ea779d8-101f-4ddb-9590-c05ea86749b6-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 15:58:06 crc kubenswrapper[5050]: I1123 15:58:06.184803 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8t2d4\" (UniqueName: \"kubernetes.io/projected/3ea779d8-101f-4ddb-9590-c05ea86749b6-kube-api-access-8t2d4\") on node \"crc\" DevicePath \"\"" Nov 23 15:58:06 crc kubenswrapper[5050]: I1123 15:58:06.342106 5050 generic.go:334] "Generic (PLEG): container finished" podID="3ea779d8-101f-4ddb-9590-c05ea86749b6" containerID="d96442e79d90b9cc96d70e3785861572f77fbb3286d5b249b43e7c8f2b881ffd" exitCode=0 Nov 23 15:58:06 crc kubenswrapper[5050]: I1123 15:58:06.342252 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-66v2t" Nov 23 15:58:06 crc kubenswrapper[5050]: I1123 15:58:06.342225 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-66v2t" event={"ID":"3ea779d8-101f-4ddb-9590-c05ea86749b6","Type":"ContainerDied","Data":"d96442e79d90b9cc96d70e3785861572f77fbb3286d5b249b43e7c8f2b881ffd"} Nov 23 15:58:06 crc kubenswrapper[5050]: I1123 15:58:06.342546 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-66v2t" event={"ID":"3ea779d8-101f-4ddb-9590-c05ea86749b6","Type":"ContainerDied","Data":"fcdb7cbb3c434c356a5060ec16ad5fea5ecd0f43695ea024029aba5d729f8847"} Nov 23 15:58:06 crc kubenswrapper[5050]: I1123 15:58:06.342629 5050 scope.go:117] "RemoveContainer" containerID="d96442e79d90b9cc96d70e3785861572f77fbb3286d5b249b43e7c8f2b881ffd" Nov 23 15:58:06 crc kubenswrapper[5050]: I1123 15:58:06.395162 5050 scope.go:117] "RemoveContainer" containerID="4ac233fdb650d985592afa100bb855a6a41a71613b7a18bbb8c40ae56b25d8af" Nov 23 15:58:06 crc kubenswrapper[5050]: I1123 15:58:06.404301 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-66v2t"] Nov 23 15:58:06 crc kubenswrapper[5050]: I1123 15:58:06.417402 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-66v2t"] Nov 23 15:58:06 crc kubenswrapper[5050]: I1123 15:58:06.428024 5050 scope.go:117] "RemoveContainer" containerID="1bf01272b28c34bda46df2c969aae624b7c4d6f9240ed0a262eeb2ee97f6ba6e" Nov 23 15:58:06 crc kubenswrapper[5050]: I1123 15:58:06.470347 5050 scope.go:117] "RemoveContainer" containerID="d96442e79d90b9cc96d70e3785861572f77fbb3286d5b249b43e7c8f2b881ffd" Nov 23 15:58:06 crc kubenswrapper[5050]: E1123 15:58:06.473126 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d96442e79d90b9cc96d70e3785861572f77fbb3286d5b249b43e7c8f2b881ffd\": container with ID starting with d96442e79d90b9cc96d70e3785861572f77fbb3286d5b249b43e7c8f2b881ffd not found: ID does not exist" containerID="d96442e79d90b9cc96d70e3785861572f77fbb3286d5b249b43e7c8f2b881ffd" Nov 23 15:58:06 crc kubenswrapper[5050]: I1123 15:58:06.473218 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d96442e79d90b9cc96d70e3785861572f77fbb3286d5b249b43e7c8f2b881ffd"} err="failed to get container status \"d96442e79d90b9cc96d70e3785861572f77fbb3286d5b249b43e7c8f2b881ffd\": rpc error: code = NotFound desc = could not find container \"d96442e79d90b9cc96d70e3785861572f77fbb3286d5b249b43e7c8f2b881ffd\": container with ID starting with d96442e79d90b9cc96d70e3785861572f77fbb3286d5b249b43e7c8f2b881ffd not found: ID does not exist" Nov 23 15:58:06 crc kubenswrapper[5050]: I1123 15:58:06.473268 5050 scope.go:117] "RemoveContainer" containerID="4ac233fdb650d985592afa100bb855a6a41a71613b7a18bbb8c40ae56b25d8af" Nov 23 15:58:06 crc kubenswrapper[5050]: E1123 15:58:06.473871 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ac233fdb650d985592afa100bb855a6a41a71613b7a18bbb8c40ae56b25d8af\": container with ID starting with 4ac233fdb650d985592afa100bb855a6a41a71613b7a18bbb8c40ae56b25d8af not found: ID does not exist" containerID="4ac233fdb650d985592afa100bb855a6a41a71613b7a18bbb8c40ae56b25d8af" Nov 23 15:58:06 crc kubenswrapper[5050]: I1123 15:58:06.473993 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ac233fdb650d985592afa100bb855a6a41a71613b7a18bbb8c40ae56b25d8af"} err="failed to get container status \"4ac233fdb650d985592afa100bb855a6a41a71613b7a18bbb8c40ae56b25d8af\": rpc error: code = NotFound desc = could not find container \"4ac233fdb650d985592afa100bb855a6a41a71613b7a18bbb8c40ae56b25d8af\": container with ID starting with 4ac233fdb650d985592afa100bb855a6a41a71613b7a18bbb8c40ae56b25d8af not found: ID does not exist" Nov 23 15:58:06 crc kubenswrapper[5050]: I1123 15:58:06.474068 5050 scope.go:117] "RemoveContainer" containerID="1bf01272b28c34bda46df2c969aae624b7c4d6f9240ed0a262eeb2ee97f6ba6e" Nov 23 15:58:06 crc kubenswrapper[5050]: E1123 15:58:06.474571 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1bf01272b28c34bda46df2c969aae624b7c4d6f9240ed0a262eeb2ee97f6ba6e\": container with ID starting with 1bf01272b28c34bda46df2c969aae624b7c4d6f9240ed0a262eeb2ee97f6ba6e not found: ID does not exist" containerID="1bf01272b28c34bda46df2c969aae624b7c4d6f9240ed0a262eeb2ee97f6ba6e" Nov 23 15:58:06 crc kubenswrapper[5050]: I1123 15:58:06.474604 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1bf01272b28c34bda46df2c969aae624b7c4d6f9240ed0a262eeb2ee97f6ba6e"} err="failed to get container status \"1bf01272b28c34bda46df2c969aae624b7c4d6f9240ed0a262eeb2ee97f6ba6e\": rpc error: code = NotFound desc = could not find container \"1bf01272b28c34bda46df2c969aae624b7c4d6f9240ed0a262eeb2ee97f6ba6e\": container with ID starting with 1bf01272b28c34bda46df2c969aae624b7c4d6f9240ed0a262eeb2ee97f6ba6e not found: ID does not exist" Nov 23 15:58:06 crc kubenswrapper[5050]: I1123 15:58:06.535209 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5ffb9"] Nov 23 15:58:06 crc kubenswrapper[5050]: I1123 15:58:06.535527 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5ffb9" podUID="d08d0ea3-36c3-4d31-ab45-50054765ff11" containerName="registry-server" containerID="cri-o://f6a9375b4846ee4b9127bd3b3192d904d9b4d940b21d0e35f8dee80c1cba4e60" gracePeriod=2 Nov 23 15:58:07 crc kubenswrapper[5050]: I1123 15:58:07.356398 5050 generic.go:334] "Generic (PLEG): container finished" podID="d08d0ea3-36c3-4d31-ab45-50054765ff11" containerID="f6a9375b4846ee4b9127bd3b3192d904d9b4d940b21d0e35f8dee80c1cba4e60" exitCode=0 Nov 23 15:58:07 crc kubenswrapper[5050]: I1123 15:58:07.356529 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5ffb9" event={"ID":"d08d0ea3-36c3-4d31-ab45-50054765ff11","Type":"ContainerDied","Data":"f6a9375b4846ee4b9127bd3b3192d904d9b4d940b21d0e35f8dee80c1cba4e60"} Nov 23 15:58:07 crc kubenswrapper[5050]: I1123 15:58:07.524593 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5ffb9" Nov 23 15:58:07 crc kubenswrapper[5050]: I1123 15:58:07.578058 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ea779d8-101f-4ddb-9590-c05ea86749b6" path="/var/lib/kubelet/pods/3ea779d8-101f-4ddb-9590-c05ea86749b6/volumes" Nov 23 15:58:07 crc kubenswrapper[5050]: I1123 15:58:07.615408 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d08d0ea3-36c3-4d31-ab45-50054765ff11-catalog-content\") pod \"d08d0ea3-36c3-4d31-ab45-50054765ff11\" (UID: \"d08d0ea3-36c3-4d31-ab45-50054765ff11\") " Nov 23 15:58:07 crc kubenswrapper[5050]: I1123 15:58:07.615504 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d08d0ea3-36c3-4d31-ab45-50054765ff11-utilities\") pod \"d08d0ea3-36c3-4d31-ab45-50054765ff11\" (UID: \"d08d0ea3-36c3-4d31-ab45-50054765ff11\") " Nov 23 15:58:07 crc kubenswrapper[5050]: I1123 15:58:07.615563 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h6wz9\" (UniqueName: \"kubernetes.io/projected/d08d0ea3-36c3-4d31-ab45-50054765ff11-kube-api-access-h6wz9\") pod \"d08d0ea3-36c3-4d31-ab45-50054765ff11\" (UID: \"d08d0ea3-36c3-4d31-ab45-50054765ff11\") " Nov 23 15:58:07 crc kubenswrapper[5050]: I1123 15:58:07.616785 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d08d0ea3-36c3-4d31-ab45-50054765ff11-utilities" (OuterVolumeSpecName: "utilities") pod "d08d0ea3-36c3-4d31-ab45-50054765ff11" (UID: "d08d0ea3-36c3-4d31-ab45-50054765ff11"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:58:07 crc kubenswrapper[5050]: I1123 15:58:07.617327 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d08d0ea3-36c3-4d31-ab45-50054765ff11-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 15:58:07 crc kubenswrapper[5050]: I1123 15:58:07.625669 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d08d0ea3-36c3-4d31-ab45-50054765ff11-kube-api-access-h6wz9" (OuterVolumeSpecName: "kube-api-access-h6wz9") pod "d08d0ea3-36c3-4d31-ab45-50054765ff11" (UID: "d08d0ea3-36c3-4d31-ab45-50054765ff11"). InnerVolumeSpecName "kube-api-access-h6wz9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:58:07 crc kubenswrapper[5050]: I1123 15:58:07.720621 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h6wz9\" (UniqueName: \"kubernetes.io/projected/d08d0ea3-36c3-4d31-ab45-50054765ff11-kube-api-access-h6wz9\") on node \"crc\" DevicePath \"\"" Nov 23 15:58:07 crc kubenswrapper[5050]: I1123 15:58:07.749944 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d08d0ea3-36c3-4d31-ab45-50054765ff11-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d08d0ea3-36c3-4d31-ab45-50054765ff11" (UID: "d08d0ea3-36c3-4d31-ab45-50054765ff11"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:58:07 crc kubenswrapper[5050]: I1123 15:58:07.823422 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d08d0ea3-36c3-4d31-ab45-50054765ff11-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 15:58:08 crc kubenswrapper[5050]: I1123 15:58:08.375321 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5ffb9" event={"ID":"d08d0ea3-36c3-4d31-ab45-50054765ff11","Type":"ContainerDied","Data":"420c522d799beaa64309341166af942943d60ccae1942f8c5acbf9925dfa66a0"} Nov 23 15:58:08 crc kubenswrapper[5050]: I1123 15:58:08.375415 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5ffb9" Nov 23 15:58:08 crc kubenswrapper[5050]: I1123 15:58:08.375466 5050 scope.go:117] "RemoveContainer" containerID="f6a9375b4846ee4b9127bd3b3192d904d9b4d940b21d0e35f8dee80c1cba4e60" Nov 23 15:58:08 crc kubenswrapper[5050]: I1123 15:58:08.404382 5050 scope.go:117] "RemoveContainer" containerID="a3bb22c02df4e02329a5556689eeac00f0772a83b722ab0186243677d48581a6" Nov 23 15:58:08 crc kubenswrapper[5050]: I1123 15:58:08.427871 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5ffb9"] Nov 23 15:58:08 crc kubenswrapper[5050]: I1123 15:58:08.435770 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5ffb9"] Nov 23 15:58:08 crc kubenswrapper[5050]: I1123 15:58:08.441703 5050 scope.go:117] "RemoveContainer" containerID="e7637b6e3006791ee9545c57a19489145626bf4ad0baed9702315ef7c1d5aad0" Nov 23 15:58:09 crc kubenswrapper[5050]: I1123 15:58:09.567635 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d08d0ea3-36c3-4d31-ab45-50054765ff11" path="/var/lib/kubelet/pods/d08d0ea3-36c3-4d31-ab45-50054765ff11/volumes" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.265121 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-fs5x5"] Nov 23 15:59:06 crc kubenswrapper[5050]: E1123 15:59:06.266228 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d08d0ea3-36c3-4d31-ab45-50054765ff11" containerName="registry-server" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.266246 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="d08d0ea3-36c3-4d31-ab45-50054765ff11" containerName="registry-server" Nov 23 15:59:06 crc kubenswrapper[5050]: E1123 15:59:06.266284 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d08d0ea3-36c3-4d31-ab45-50054765ff11" containerName="extract-content" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.266290 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="d08d0ea3-36c3-4d31-ab45-50054765ff11" containerName="extract-content" Nov 23 15:59:06 crc kubenswrapper[5050]: E1123 15:59:06.266305 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ea779d8-101f-4ddb-9590-c05ea86749b6" containerName="extract-content" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.266312 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ea779d8-101f-4ddb-9590-c05ea86749b6" containerName="extract-content" Nov 23 15:59:06 crc kubenswrapper[5050]: E1123 15:59:06.266323 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ea779d8-101f-4ddb-9590-c05ea86749b6" containerName="registry-server" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.266328 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ea779d8-101f-4ddb-9590-c05ea86749b6" containerName="registry-server" Nov 23 15:59:06 crc kubenswrapper[5050]: E1123 15:59:06.266338 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d08d0ea3-36c3-4d31-ab45-50054765ff11" containerName="extract-utilities" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.266344 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="d08d0ea3-36c3-4d31-ab45-50054765ff11" containerName="extract-utilities" Nov 23 15:59:06 crc kubenswrapper[5050]: E1123 15:59:06.266358 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ea779d8-101f-4ddb-9590-c05ea86749b6" containerName="extract-utilities" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.266364 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ea779d8-101f-4ddb-9590-c05ea86749b6" containerName="extract-utilities" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.266522 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="d08d0ea3-36c3-4d31-ab45-50054765ff11" containerName="registry-server" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.266547 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ea779d8-101f-4ddb-9590-c05ea86749b6" containerName="registry-server" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.267382 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7b5456f5-fs5x5" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.269068 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.269559 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.271929 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.272142 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-lcx8m" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.272890 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.283166 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-fs5x5"] Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.313640 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cae208aa-722b-4bf1-9566-1b82203eeb6e-config\") pod \"dnsmasq-dns-5d7b5456f5-fs5x5\" (UID: \"cae208aa-722b-4bf1-9566-1b82203eeb6e\") " pod="openstack/dnsmasq-dns-5d7b5456f5-fs5x5" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.313737 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cae208aa-722b-4bf1-9566-1b82203eeb6e-dns-svc\") pod \"dnsmasq-dns-5d7b5456f5-fs5x5\" (UID: \"cae208aa-722b-4bf1-9566-1b82203eeb6e\") " pod="openstack/dnsmasq-dns-5d7b5456f5-fs5x5" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.313788 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5vwq5\" (UniqueName: \"kubernetes.io/projected/cae208aa-722b-4bf1-9566-1b82203eeb6e-kube-api-access-5vwq5\") pod \"dnsmasq-dns-5d7b5456f5-fs5x5\" (UID: \"cae208aa-722b-4bf1-9566-1b82203eeb6e\") " pod="openstack/dnsmasq-dns-5d7b5456f5-fs5x5" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.415381 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5vwq5\" (UniqueName: \"kubernetes.io/projected/cae208aa-722b-4bf1-9566-1b82203eeb6e-kube-api-access-5vwq5\") pod \"dnsmasq-dns-5d7b5456f5-fs5x5\" (UID: \"cae208aa-722b-4bf1-9566-1b82203eeb6e\") " pod="openstack/dnsmasq-dns-5d7b5456f5-fs5x5" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.416574 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cae208aa-722b-4bf1-9566-1b82203eeb6e-config\") pod \"dnsmasq-dns-5d7b5456f5-fs5x5\" (UID: \"cae208aa-722b-4bf1-9566-1b82203eeb6e\") " pod="openstack/dnsmasq-dns-5d7b5456f5-fs5x5" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.417565 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cae208aa-722b-4bf1-9566-1b82203eeb6e-config\") pod \"dnsmasq-dns-5d7b5456f5-fs5x5\" (UID: \"cae208aa-722b-4bf1-9566-1b82203eeb6e\") " pod="openstack/dnsmasq-dns-5d7b5456f5-fs5x5" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.417726 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cae208aa-722b-4bf1-9566-1b82203eeb6e-dns-svc\") pod \"dnsmasq-dns-5d7b5456f5-fs5x5\" (UID: \"cae208aa-722b-4bf1-9566-1b82203eeb6e\") " pod="openstack/dnsmasq-dns-5d7b5456f5-fs5x5" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.418334 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cae208aa-722b-4bf1-9566-1b82203eeb6e-dns-svc\") pod \"dnsmasq-dns-5d7b5456f5-fs5x5\" (UID: \"cae208aa-722b-4bf1-9566-1b82203eeb6e\") " pod="openstack/dnsmasq-dns-5d7b5456f5-fs5x5" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.437735 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5vwq5\" (UniqueName: \"kubernetes.io/projected/cae208aa-722b-4bf1-9566-1b82203eeb6e-kube-api-access-5vwq5\") pod \"dnsmasq-dns-5d7b5456f5-fs5x5\" (UID: \"cae208aa-722b-4bf1-9566-1b82203eeb6e\") " pod="openstack/dnsmasq-dns-5d7b5456f5-fs5x5" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.533309 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-5mxx6"] Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.534856 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-98ddfc8f-5mxx6" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.551370 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-5mxx6"] Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.596027 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7b5456f5-fs5x5" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.622015 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db2c1781-f656-442a-87db-97e53578c8aa-config\") pod \"dnsmasq-dns-98ddfc8f-5mxx6\" (UID: \"db2c1781-f656-442a-87db-97e53578c8aa\") " pod="openstack/dnsmasq-dns-98ddfc8f-5mxx6" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.622139 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vzk5w\" (UniqueName: \"kubernetes.io/projected/db2c1781-f656-442a-87db-97e53578c8aa-kube-api-access-vzk5w\") pod \"dnsmasq-dns-98ddfc8f-5mxx6\" (UID: \"db2c1781-f656-442a-87db-97e53578c8aa\") " pod="openstack/dnsmasq-dns-98ddfc8f-5mxx6" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.622192 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db2c1781-f656-442a-87db-97e53578c8aa-dns-svc\") pod \"dnsmasq-dns-98ddfc8f-5mxx6\" (UID: \"db2c1781-f656-442a-87db-97e53578c8aa\") " pod="openstack/dnsmasq-dns-98ddfc8f-5mxx6" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.724252 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db2c1781-f656-442a-87db-97e53578c8aa-config\") pod \"dnsmasq-dns-98ddfc8f-5mxx6\" (UID: \"db2c1781-f656-442a-87db-97e53578c8aa\") " pod="openstack/dnsmasq-dns-98ddfc8f-5mxx6" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.724332 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vzk5w\" (UniqueName: \"kubernetes.io/projected/db2c1781-f656-442a-87db-97e53578c8aa-kube-api-access-vzk5w\") pod \"dnsmasq-dns-98ddfc8f-5mxx6\" (UID: \"db2c1781-f656-442a-87db-97e53578c8aa\") " pod="openstack/dnsmasq-dns-98ddfc8f-5mxx6" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.724377 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db2c1781-f656-442a-87db-97e53578c8aa-dns-svc\") pod \"dnsmasq-dns-98ddfc8f-5mxx6\" (UID: \"db2c1781-f656-442a-87db-97e53578c8aa\") " pod="openstack/dnsmasq-dns-98ddfc8f-5mxx6" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.725476 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db2c1781-f656-442a-87db-97e53578c8aa-config\") pod \"dnsmasq-dns-98ddfc8f-5mxx6\" (UID: \"db2c1781-f656-442a-87db-97e53578c8aa\") " pod="openstack/dnsmasq-dns-98ddfc8f-5mxx6" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.725562 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db2c1781-f656-442a-87db-97e53578c8aa-dns-svc\") pod \"dnsmasq-dns-98ddfc8f-5mxx6\" (UID: \"db2c1781-f656-442a-87db-97e53578c8aa\") " pod="openstack/dnsmasq-dns-98ddfc8f-5mxx6" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.744567 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vzk5w\" (UniqueName: \"kubernetes.io/projected/db2c1781-f656-442a-87db-97e53578c8aa-kube-api-access-vzk5w\") pod \"dnsmasq-dns-98ddfc8f-5mxx6\" (UID: \"db2c1781-f656-442a-87db-97e53578c8aa\") " pod="openstack/dnsmasq-dns-98ddfc8f-5mxx6" Nov 23 15:59:06 crc kubenswrapper[5050]: I1123 15:59:06.857356 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-98ddfc8f-5mxx6" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.114693 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-fs5x5"] Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.401074 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-5mxx6"] Nov 23 15:59:07 crc kubenswrapper[5050]: W1123 15:59:07.408705 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddb2c1781_f656_442a_87db_97e53578c8aa.slice/crio-4019cd677e9a9e758da2f12eb8286b34e8d671a3621d34f9b4202a91c6027557 WatchSource:0}: Error finding container 4019cd677e9a9e758da2f12eb8286b34e8d671a3621d34f9b4202a91c6027557: Status 404 returned error can't find the container with id 4019cd677e9a9e758da2f12eb8286b34e8d671a3621d34f9b4202a91c6027557 Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.412851 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.414153 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.417006 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-5glpx" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.418811 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.418886 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.419018 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.419311 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.435354 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.537234 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jv9k5\" (UniqueName: \"kubernetes.io/projected/60070b93-3461-4644-8bf3-93f3d6dea993-kube-api-access-jv9k5\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.537518 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/60070b93-3461-4644-8bf3-93f3d6dea993-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.537594 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/60070b93-3461-4644-8bf3-93f3d6dea993-server-conf\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.537862 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/60070b93-3461-4644-8bf3-93f3d6dea993-pod-info\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.538092 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/60070b93-3461-4644-8bf3-93f3d6dea993-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.538131 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/60070b93-3461-4644-8bf3-93f3d6dea993-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.538201 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-39327384-20b7-4807-b511-7257eb36c2b9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39327384-20b7-4807-b511-7257eb36c2b9\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.538378 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/60070b93-3461-4644-8bf3-93f3d6dea993-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.538476 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/60070b93-3461-4644-8bf3-93f3d6dea993-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.640409 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jv9k5\" (UniqueName: \"kubernetes.io/projected/60070b93-3461-4644-8bf3-93f3d6dea993-kube-api-access-jv9k5\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.640515 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/60070b93-3461-4644-8bf3-93f3d6dea993-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.640552 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/60070b93-3461-4644-8bf3-93f3d6dea993-server-conf\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.640633 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/60070b93-3461-4644-8bf3-93f3d6dea993-pod-info\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.640692 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/60070b93-3461-4644-8bf3-93f3d6dea993-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.640718 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/60070b93-3461-4644-8bf3-93f3d6dea993-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.640746 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-39327384-20b7-4807-b511-7257eb36c2b9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39327384-20b7-4807-b511-7257eb36c2b9\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.640827 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/60070b93-3461-4644-8bf3-93f3d6dea993-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.640858 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/60070b93-3461-4644-8bf3-93f3d6dea993-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.641733 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/60070b93-3461-4644-8bf3-93f3d6dea993-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.642792 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/60070b93-3461-4644-8bf3-93f3d6dea993-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.642811 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/60070b93-3461-4644-8bf3-93f3d6dea993-server-conf\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.642945 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/60070b93-3461-4644-8bf3-93f3d6dea993-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.648332 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/60070b93-3461-4644-8bf3-93f3d6dea993-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.648783 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/60070b93-3461-4644-8bf3-93f3d6dea993-pod-info\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.649734 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/60070b93-3461-4644-8bf3-93f3d6dea993-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.650625 5050 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.650674 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-39327384-20b7-4807-b511-7257eb36c2b9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39327384-20b7-4807-b511-7257eb36c2b9\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/ef6518b86a463462e0da94e1b3cf57057e957a5269228fea4d68c973e0c23cae/globalmount\"" pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.661797 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jv9k5\" (UniqueName: \"kubernetes.io/projected/60070b93-3461-4644-8bf3-93f3d6dea993-kube-api-access-jv9k5\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.700913 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.702750 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.704706 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-39327384-20b7-4807-b511-7257eb36c2b9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39327384-20b7-4807-b511-7257eb36c2b9\") pod \"rabbitmq-server-0\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.708120 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.708363 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.708402 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.708654 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.710425 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-x9bzf" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.726424 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.794460 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.847284 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e72bc364-dad9-4403-ba3a-270c07097ba6-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.847373 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-chq84\" (UniqueName: \"kubernetes.io/projected/e72bc364-dad9-4403-ba3a-270c07097ba6-kube-api-access-chq84\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.847418 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e72bc364-dad9-4403-ba3a-270c07097ba6-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.847484 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e72bc364-dad9-4403-ba3a-270c07097ba6-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.847703 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e72bc364-dad9-4403-ba3a-270c07097ba6-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.847885 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e72bc364-dad9-4403-ba3a-270c07097ba6-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.847992 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e72bc364-dad9-4403-ba3a-270c07097ba6-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.848168 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-dc43326e-5e1d-4a0d-9c93-1a2004118335\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dc43326e-5e1d-4a0d-9c93-1a2004118335\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.848274 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e72bc364-dad9-4403-ba3a-270c07097ba6-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.949089 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-chq84\" (UniqueName: \"kubernetes.io/projected/e72bc364-dad9-4403-ba3a-270c07097ba6-kube-api-access-chq84\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.949154 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e72bc364-dad9-4403-ba3a-270c07097ba6-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.949179 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e72bc364-dad9-4403-ba3a-270c07097ba6-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.949218 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e72bc364-dad9-4403-ba3a-270c07097ba6-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.949250 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e72bc364-dad9-4403-ba3a-270c07097ba6-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.949277 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e72bc364-dad9-4403-ba3a-270c07097ba6-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.949315 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-dc43326e-5e1d-4a0d-9c93-1a2004118335\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dc43326e-5e1d-4a0d-9c93-1a2004118335\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.949341 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e72bc364-dad9-4403-ba3a-270c07097ba6-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.949374 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e72bc364-dad9-4403-ba3a-270c07097ba6-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.950345 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e72bc364-dad9-4403-ba3a-270c07097ba6-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.951284 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e72bc364-dad9-4403-ba3a-270c07097ba6-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.968280 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e72bc364-dad9-4403-ba3a-270c07097ba6-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.978190 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-chq84\" (UniqueName: \"kubernetes.io/projected/e72bc364-dad9-4403-ba3a-270c07097ba6-kube-api-access-chq84\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.978724 5050 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.978747 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-dc43326e-5e1d-4a0d-9c93-1a2004118335\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dc43326e-5e1d-4a0d-9c93-1a2004118335\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/8690a45c9e8dae91a044e5db2b518f175247cdaecdc65ad047b3d8ea8b074528/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.989305 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e72bc364-dad9-4403-ba3a-270c07097ba6-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:07 crc kubenswrapper[5050]: I1123 15:59:07.991658 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e72bc364-dad9-4403-ba3a-270c07097ba6-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.010825 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e72bc364-dad9-4403-ba3a-270c07097ba6-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.013286 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e72bc364-dad9-4403-ba3a-270c07097ba6-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.068979 5050 generic.go:334] "Generic (PLEG): container finished" podID="db2c1781-f656-442a-87db-97e53578c8aa" containerID="e437fa45a347b26055ce885d618baaec215c51f486c12306ab5c13801ae21f85" exitCode=0 Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.069558 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-5mxx6" event={"ID":"db2c1781-f656-442a-87db-97e53578c8aa","Type":"ContainerDied","Data":"e437fa45a347b26055ce885d618baaec215c51f486c12306ab5c13801ae21f85"} Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.069609 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-5mxx6" event={"ID":"db2c1781-f656-442a-87db-97e53578c8aa","Type":"ContainerStarted","Data":"4019cd677e9a9e758da2f12eb8286b34e8d671a3621d34f9b4202a91c6027557"} Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.091097 5050 generic.go:334] "Generic (PLEG): container finished" podID="cae208aa-722b-4bf1-9566-1b82203eeb6e" containerID="a224cf3cb8e73e96abd996f802ec732b6c0c96244b79ba2d4b704016da0a18d9" exitCode=0 Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.091364 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-fs5x5" event={"ID":"cae208aa-722b-4bf1-9566-1b82203eeb6e","Type":"ContainerDied","Data":"a224cf3cb8e73e96abd996f802ec732b6c0c96244b79ba2d4b704016da0a18d9"} Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.091518 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-fs5x5" event={"ID":"cae208aa-722b-4bf1-9566-1b82203eeb6e","Type":"ContainerStarted","Data":"5a4c57dc499f779f665e5777e45344b07ceb0d0cb7cbed9794b8de25fcfdd8cc"} Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.261903 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.333201 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-tm256"] Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.345929 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tm256" Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.377080 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-dc43326e-5e1d-4a0d-9c93-1a2004118335\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dc43326e-5e1d-4a0d-9c93-1a2004118335\") pod \"rabbitmq-cell1-server-0\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.377229 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tm256"] Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.477981 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-grkzn\" (UniqueName: \"kubernetes.io/projected/e44f7cce-e33d-4ad8-b6bb-3be89f3fda53-kube-api-access-grkzn\") pod \"community-operators-tm256\" (UID: \"e44f7cce-e33d-4ad8-b6bb-3be89f3fda53\") " pod="openshift-marketplace/community-operators-tm256" Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.478580 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e44f7cce-e33d-4ad8-b6bb-3be89f3fda53-utilities\") pod \"community-operators-tm256\" (UID: \"e44f7cce-e33d-4ad8-b6bb-3be89f3fda53\") " pod="openshift-marketplace/community-operators-tm256" Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.478611 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e44f7cce-e33d-4ad8-b6bb-3be89f3fda53-catalog-content\") pod \"community-operators-tm256\" (UID: \"e44f7cce-e33d-4ad8-b6bb-3be89f3fda53\") " pod="openshift-marketplace/community-operators-tm256" Nov 23 15:59:08 crc kubenswrapper[5050]: E1123 15:59:08.504796 5050 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Nov 23 15:59:08 crc kubenswrapper[5050]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/cae208aa-722b-4bf1-9566-1b82203eeb6e/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 23 15:59:08 crc kubenswrapper[5050]: > podSandboxID="5a4c57dc499f779f665e5777e45344b07ceb0d0cb7cbed9794b8de25fcfdd8cc" Nov 23 15:59:08 crc kubenswrapper[5050]: E1123 15:59:08.505006 5050 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 23 15:59:08 crc kubenswrapper[5050]: container &Container{Name:dnsmasq-dns,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n8chc6h5bh56fh546hb7hc8h67h5bchffh577h697h5b5h5bdh59bhf6hf4h558hb5h578h595h5cchfbh644h59ch7fh654h547h587h5cbh5d5h8fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5vwq5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-5d7b5456f5-fs5x5_openstack(cae208aa-722b-4bf1-9566-1b82203eeb6e): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/cae208aa-722b-4bf1-9566-1b82203eeb6e/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 23 15:59:08 crc kubenswrapper[5050]: > logger="UnhandledError" Nov 23 15:59:08 crc kubenswrapper[5050]: E1123 15:59:08.506654 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/cae208aa-722b-4bf1-9566-1b82203eeb6e/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-5d7b5456f5-fs5x5" podUID="cae208aa-722b-4bf1-9566-1b82203eeb6e" Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.580154 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e44f7cce-e33d-4ad8-b6bb-3be89f3fda53-utilities\") pod \"community-operators-tm256\" (UID: \"e44f7cce-e33d-4ad8-b6bb-3be89f3fda53\") " pod="openshift-marketplace/community-operators-tm256" Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.580217 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e44f7cce-e33d-4ad8-b6bb-3be89f3fda53-catalog-content\") pod \"community-operators-tm256\" (UID: \"e44f7cce-e33d-4ad8-b6bb-3be89f3fda53\") " pod="openshift-marketplace/community-operators-tm256" Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.580264 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-grkzn\" (UniqueName: \"kubernetes.io/projected/e44f7cce-e33d-4ad8-b6bb-3be89f3fda53-kube-api-access-grkzn\") pod \"community-operators-tm256\" (UID: \"e44f7cce-e33d-4ad8-b6bb-3be89f3fda53\") " pod="openshift-marketplace/community-operators-tm256" Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.580892 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e44f7cce-e33d-4ad8-b6bb-3be89f3fda53-utilities\") pod \"community-operators-tm256\" (UID: \"e44f7cce-e33d-4ad8-b6bb-3be89f3fda53\") " pod="openshift-marketplace/community-operators-tm256" Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.580994 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e44f7cce-e33d-4ad8-b6bb-3be89f3fda53-catalog-content\") pod \"community-operators-tm256\" (UID: \"e44f7cce-e33d-4ad8-b6bb-3be89f3fda53\") " pod="openshift-marketplace/community-operators-tm256" Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.600816 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-grkzn\" (UniqueName: \"kubernetes.io/projected/e44f7cce-e33d-4ad8-b6bb-3be89f3fda53-kube-api-access-grkzn\") pod \"community-operators-tm256\" (UID: \"e44f7cce-e33d-4ad8-b6bb-3be89f3fda53\") " pod="openshift-marketplace/community-operators-tm256" Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.643777 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.691234 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tm256" Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.958870 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.961899 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.972636 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.972897 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.973070 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.975810 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-vjwrg" Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.978845 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 23 15:59:08 crc kubenswrapper[5050]: I1123 15:59:08.991931 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.097948 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511-config-data-generated\") pod \"openstack-galera-0\" (UID: \"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511\") " pod="openstack/openstack-galera-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.098012 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5s586\" (UniqueName: \"kubernetes.io/projected/4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511-kube-api-access-5s586\") pod \"openstack-galera-0\" (UID: \"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511\") " pod="openstack/openstack-galera-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.098034 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511\") " pod="openstack/openstack-galera-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.098076 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511-config-data-default\") pod \"openstack-galera-0\" (UID: \"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511\") " pod="openstack/openstack-galera-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.098117 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511-operator-scripts\") pod \"openstack-galera-0\" (UID: \"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511\") " pod="openstack/openstack-galera-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.098141 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511\") " pod="openstack/openstack-galera-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.098161 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511-kolla-config\") pod \"openstack-galera-0\" (UID: \"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511\") " pod="openstack/openstack-galera-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.098213 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-32a59a3a-223b-4445-bedb-6e4526952b62\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-32a59a3a-223b-4445-bedb-6e4526952b62\") pod \"openstack-galera-0\" (UID: \"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511\") " pod="openstack/openstack-galera-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.102799 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-5mxx6" event={"ID":"db2c1781-f656-442a-87db-97e53578c8aa","Type":"ContainerStarted","Data":"2fae4240262da61acb11468a7b0e66a1d938004d20822955473b00911e841d91"} Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.102880 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-98ddfc8f-5mxx6" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.105984 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"60070b93-3461-4644-8bf3-93f3d6dea993","Type":"ContainerStarted","Data":"3b12c6f29940eb8cc3df979f76d8b97a05ae7f2c085e5e8123a987dc207c267a"} Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.136184 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-98ddfc8f-5mxx6" podStartSLOduration=3.136153066 podStartE2EDuration="3.136153066s" podCreationTimestamp="2025-11-23 15:59:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:59:09.128158929 +0000 UTC m=+4644.295155424" watchObservedRunningTime="2025-11-23 15:59:09.136153066 +0000 UTC m=+4644.303149541" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.198036 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.199806 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-32a59a3a-223b-4445-bedb-6e4526952b62\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-32a59a3a-223b-4445-bedb-6e4526952b62\") pod \"openstack-galera-0\" (UID: \"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511\") " pod="openstack/openstack-galera-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.199910 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511-config-data-generated\") pod \"openstack-galera-0\" (UID: \"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511\") " pod="openstack/openstack-galera-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.199941 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5s586\" (UniqueName: \"kubernetes.io/projected/4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511-kube-api-access-5s586\") pod \"openstack-galera-0\" (UID: \"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511\") " pod="openstack/openstack-galera-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.199964 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511\") " pod="openstack/openstack-galera-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.199981 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511-config-data-default\") pod \"openstack-galera-0\" (UID: \"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511\") " pod="openstack/openstack-galera-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.200027 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511-operator-scripts\") pod \"openstack-galera-0\" (UID: \"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511\") " pod="openstack/openstack-galera-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.200056 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511\") " pod="openstack/openstack-galera-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.200091 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511-kolla-config\") pod \"openstack-galera-0\" (UID: \"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511\") " pod="openstack/openstack-galera-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.201748 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511-config-data-generated\") pod \"openstack-galera-0\" (UID: \"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511\") " pod="openstack/openstack-galera-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.202663 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511-operator-scripts\") pod \"openstack-galera-0\" (UID: \"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511\") " pod="openstack/openstack-galera-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.202875 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511-config-data-default\") pod \"openstack-galera-0\" (UID: \"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511\") " pod="openstack/openstack-galera-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.203286 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511-kolla-config\") pod \"openstack-galera-0\" (UID: \"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511\") " pod="openstack/openstack-galera-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.208905 5050 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.209057 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-32a59a3a-223b-4445-bedb-6e4526952b62\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-32a59a3a-223b-4445-bedb-6e4526952b62\") pod \"openstack-galera-0\" (UID: \"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/8ab18983687069dea3ab919430c6bdedb4b79a2a810ab71a6e0bd01b12915f00/globalmount\"" pod="openstack/openstack-galera-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.266643 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511\") " pod="openstack/openstack-galera-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.266660 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511\") " pod="openstack/openstack-galera-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.269504 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5s586\" (UniqueName: \"kubernetes.io/projected/4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511-kube-api-access-5s586\") pod \"openstack-galera-0\" (UID: \"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511\") " pod="openstack/openstack-galera-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.281783 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tm256"] Nov 23 15:59:09 crc kubenswrapper[5050]: W1123 15:59:09.288876 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode44f7cce_e33d_4ad8_b6bb_3be89f3fda53.slice/crio-9465d27ffadd576b4fcf23738c6b2261790b01d8cebad6a1097abe0329624870 WatchSource:0}: Error finding container 9465d27ffadd576b4fcf23738c6b2261790b01d8cebad6a1097abe0329624870: Status 404 returned error can't find the container with id 9465d27ffadd576b4fcf23738c6b2261790b01d8cebad6a1097abe0329624870 Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.302747 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-32a59a3a-223b-4445-bedb-6e4526952b62\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-32a59a3a-223b-4445-bedb-6e4526952b62\") pod \"openstack-galera-0\" (UID: \"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511\") " pod="openstack/openstack-galera-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.445936 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.447398 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.452822 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.453370 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-22tn6" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.457793 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.596078 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.618534 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhftq\" (UniqueName: \"kubernetes.io/projected/a1ca158f-7aa6-4339-a321-81831457d2b0-kube-api-access-rhftq\") pod \"memcached-0\" (UID: \"a1ca158f-7aa6-4339-a321-81831457d2b0\") " pod="openstack/memcached-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.618641 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a1ca158f-7aa6-4339-a321-81831457d2b0-config-data\") pod \"memcached-0\" (UID: \"a1ca158f-7aa6-4339-a321-81831457d2b0\") " pod="openstack/memcached-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.618666 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a1ca158f-7aa6-4339-a321-81831457d2b0-kolla-config\") pod \"memcached-0\" (UID: \"a1ca158f-7aa6-4339-a321-81831457d2b0\") " pod="openstack/memcached-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.719966 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a1ca158f-7aa6-4339-a321-81831457d2b0-config-data\") pod \"memcached-0\" (UID: \"a1ca158f-7aa6-4339-a321-81831457d2b0\") " pod="openstack/memcached-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.720023 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a1ca158f-7aa6-4339-a321-81831457d2b0-kolla-config\") pod \"memcached-0\" (UID: \"a1ca158f-7aa6-4339-a321-81831457d2b0\") " pod="openstack/memcached-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.720171 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhftq\" (UniqueName: \"kubernetes.io/projected/a1ca158f-7aa6-4339-a321-81831457d2b0-kube-api-access-rhftq\") pod \"memcached-0\" (UID: \"a1ca158f-7aa6-4339-a321-81831457d2b0\") " pod="openstack/memcached-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.721095 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a1ca158f-7aa6-4339-a321-81831457d2b0-config-data\") pod \"memcached-0\" (UID: \"a1ca158f-7aa6-4339-a321-81831457d2b0\") " pod="openstack/memcached-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.721124 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a1ca158f-7aa6-4339-a321-81831457d2b0-kolla-config\") pod \"memcached-0\" (UID: \"a1ca158f-7aa6-4339-a321-81831457d2b0\") " pod="openstack/memcached-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.760735 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhftq\" (UniqueName: \"kubernetes.io/projected/a1ca158f-7aa6-4339-a321-81831457d2b0-kube-api-access-rhftq\") pod \"memcached-0\" (UID: \"a1ca158f-7aa6-4339-a321-81831457d2b0\") " pod="openstack/memcached-0" Nov 23 15:59:09 crc kubenswrapper[5050]: I1123 15:59:09.838741 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.122937 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-fs5x5" event={"ID":"cae208aa-722b-4bf1-9566-1b82203eeb6e","Type":"ContainerStarted","Data":"52912b997d891f0bb27923a310644d781a02032169675a0d95dcd7669c61e38c"} Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.123686 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5d7b5456f5-fs5x5" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.125410 5050 generic.go:334] "Generic (PLEG): container finished" podID="e44f7cce-e33d-4ad8-b6bb-3be89f3fda53" containerID="0c726452578748764cc60853adb96153ce3aaf3e0ed4ed9f74d6b73d9b16c9c8" exitCode=0 Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.125539 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tm256" event={"ID":"e44f7cce-e33d-4ad8-b6bb-3be89f3fda53","Type":"ContainerDied","Data":"0c726452578748764cc60853adb96153ce3aaf3e0ed4ed9f74d6b73d9b16c9c8"} Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.125581 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tm256" event={"ID":"e44f7cce-e33d-4ad8-b6bb-3be89f3fda53","Type":"ContainerStarted","Data":"9465d27ffadd576b4fcf23738c6b2261790b01d8cebad6a1097abe0329624870"} Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.126950 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e72bc364-dad9-4403-ba3a-270c07097ba6","Type":"ContainerStarted","Data":"86aea6927a9cb32d249fbd74ff6520c53ea2fc366431b6ff3e1888123d05fea8"} Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.153594 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5d7b5456f5-fs5x5" podStartSLOduration=4.153570317 podStartE2EDuration="4.153570317s" podCreationTimestamp="2025-11-23 15:59:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:59:10.147523026 +0000 UTC m=+4645.314519511" watchObservedRunningTime="2025-11-23 15:59:10.153570317 +0000 UTC m=+4645.320566802" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.199408 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 23 15:59:10 crc kubenswrapper[5050]: W1123 15:59:10.207308 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4ec299fe_6b4a_4fe5_a401_ebc1d8dc8511.slice/crio-387e7730f8d84e7810416531774add17236236edae8aa05ffde4ad66a165dad4 WatchSource:0}: Error finding container 387e7730f8d84e7810416531774add17236236edae8aa05ffde4ad66a165dad4: Status 404 returned error can't find the container with id 387e7730f8d84e7810416531774add17236236edae8aa05ffde4ad66a165dad4 Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.286237 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 23 15:59:10 crc kubenswrapper[5050]: W1123 15:59:10.303529 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda1ca158f_7aa6_4339_a321_81831457d2b0.slice/crio-18a11a639579a1929574fa57e63dfb57cbe55e672e37657f1af5df9b41c8c1e0 WatchSource:0}: Error finding container 18a11a639579a1929574fa57e63dfb57cbe55e672e37657f1af5df9b41c8c1e0: Status 404 returned error can't find the container with id 18a11a639579a1929574fa57e63dfb57cbe55e672e37657f1af5df9b41c8c1e0 Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.681076 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.682758 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.686656 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.686923 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.687127 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.687330 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-t682m" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.735792 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.847966 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ssdx\" (UniqueName: \"kubernetes.io/projected/fd4aea72-077d-4db3-85ee-3ecf19b1f7a9-kube-api-access-8ssdx\") pod \"openstack-cell1-galera-0\" (UID: \"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9\") " pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.848025 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/fd4aea72-077d-4db3-85ee-3ecf19b1f7a9-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9\") " pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.848068 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/fd4aea72-077d-4db3-85ee-3ecf19b1f7a9-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9\") " pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.848117 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fd4aea72-077d-4db3-85ee-3ecf19b1f7a9-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9\") " pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.848151 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd4aea72-077d-4db3-85ee-3ecf19b1f7a9-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9\") " pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.848384 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd4aea72-077d-4db3-85ee-3ecf19b1f7a9-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9\") " pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.848648 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fd4aea72-077d-4db3-85ee-3ecf19b1f7a9-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9\") " pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.848756 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-838880fe-9d68-4003-aea3-424a7f2a8f9d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-838880fe-9d68-4003-aea3-424a7f2a8f9d\") pod \"openstack-cell1-galera-0\" (UID: \"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9\") " pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.950706 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/fd4aea72-077d-4db3-85ee-3ecf19b1f7a9-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9\") " pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.950814 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fd4aea72-077d-4db3-85ee-3ecf19b1f7a9-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9\") " pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.950856 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd4aea72-077d-4db3-85ee-3ecf19b1f7a9-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9\") " pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.950885 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd4aea72-077d-4db3-85ee-3ecf19b1f7a9-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9\") " pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.950919 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fd4aea72-077d-4db3-85ee-3ecf19b1f7a9-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9\") " pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.950983 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-838880fe-9d68-4003-aea3-424a7f2a8f9d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-838880fe-9d68-4003-aea3-424a7f2a8f9d\") pod \"openstack-cell1-galera-0\" (UID: \"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9\") " pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.951053 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ssdx\" (UniqueName: \"kubernetes.io/projected/fd4aea72-077d-4db3-85ee-3ecf19b1f7a9-kube-api-access-8ssdx\") pod \"openstack-cell1-galera-0\" (UID: \"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9\") " pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.951077 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/fd4aea72-077d-4db3-85ee-3ecf19b1f7a9-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9\") " pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.951599 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/fd4aea72-077d-4db3-85ee-3ecf19b1f7a9-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9\") " pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.953071 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fd4aea72-077d-4db3-85ee-3ecf19b1f7a9-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9\") " pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.953436 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/fd4aea72-077d-4db3-85ee-3ecf19b1f7a9-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9\") " pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.954107 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fd4aea72-077d-4db3-85ee-3ecf19b1f7a9-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9\") " pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.956681 5050 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.956723 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-838880fe-9d68-4003-aea3-424a7f2a8f9d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-838880fe-9d68-4003-aea3-424a7f2a8f9d\") pod \"openstack-cell1-galera-0\" (UID: \"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/cc66f2ee1caba9cd4453af79af43a76a00bfffb98fcf7eecb1cc79fc62848750/globalmount\"" pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.961801 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd4aea72-077d-4db3-85ee-3ecf19b1f7a9-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9\") " pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:10 crc kubenswrapper[5050]: I1123 15:59:10.961920 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd4aea72-077d-4db3-85ee-3ecf19b1f7a9-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9\") " pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:11 crc kubenswrapper[5050]: I1123 15:59:11.139562 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"a1ca158f-7aa6-4339-a321-81831457d2b0","Type":"ContainerStarted","Data":"27ce0d28bcfc20b811dc43c3cd1bef7dbc78a0634a8ab79c553ec199f4ce76b6"} Nov 23 15:59:11 crc kubenswrapper[5050]: I1123 15:59:11.139634 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"a1ca158f-7aa6-4339-a321-81831457d2b0","Type":"ContainerStarted","Data":"18a11a639579a1929574fa57e63dfb57cbe55e672e37657f1af5df9b41c8c1e0"} Nov 23 15:59:11 crc kubenswrapper[5050]: I1123 15:59:11.139684 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 23 15:59:11 crc kubenswrapper[5050]: I1123 15:59:11.142750 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511","Type":"ContainerStarted","Data":"dc7aa839907445328865a093b59ca69fc5ffeb2d34fa00b55c5f5f814b7dd571"} Nov 23 15:59:11 crc kubenswrapper[5050]: I1123 15:59:11.142847 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511","Type":"ContainerStarted","Data":"387e7730f8d84e7810416531774add17236236edae8aa05ffde4ad66a165dad4"} Nov 23 15:59:11 crc kubenswrapper[5050]: I1123 15:59:11.144527 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"60070b93-3461-4644-8bf3-93f3d6dea993","Type":"ContainerStarted","Data":"8dbffce1d84fbf64ab72e001bf96bb4dbaf7216e0beb5ac29fe96cce16e6a767"} Nov 23 15:59:11 crc kubenswrapper[5050]: I1123 15:59:11.146995 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e72bc364-dad9-4403-ba3a-270c07097ba6","Type":"ContainerStarted","Data":"d991713587d0ad7254c335799a1919b5ef93e56583e55a919d2db7813fb583b5"} Nov 23 15:59:11 crc kubenswrapper[5050]: I1123 15:59:11.170003 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=2.16997107 podStartE2EDuration="2.16997107s" podCreationTimestamp="2025-11-23 15:59:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:59:11.169127366 +0000 UTC m=+4646.336123891" watchObservedRunningTime="2025-11-23 15:59:11.16997107 +0000 UTC m=+4646.336967565" Nov 23 15:59:11 crc kubenswrapper[5050]: I1123 15:59:11.461309 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ssdx\" (UniqueName: \"kubernetes.io/projected/fd4aea72-077d-4db3-85ee-3ecf19b1f7a9-kube-api-access-8ssdx\") pod \"openstack-cell1-galera-0\" (UID: \"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9\") " pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:11 crc kubenswrapper[5050]: I1123 15:59:11.500532 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-838880fe-9d68-4003-aea3-424a7f2a8f9d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-838880fe-9d68-4003-aea3-424a7f2a8f9d\") pod \"openstack-cell1-galera-0\" (UID: \"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9\") " pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:11 crc kubenswrapper[5050]: I1123 15:59:11.663406 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:12 crc kubenswrapper[5050]: I1123 15:59:12.159228 5050 generic.go:334] "Generic (PLEG): container finished" podID="e44f7cce-e33d-4ad8-b6bb-3be89f3fda53" containerID="8b8d8dab119d63d79b9a0a24d7dc81792f0203bfd65795301797e16c477fd8ad" exitCode=0 Nov 23 15:59:12 crc kubenswrapper[5050]: I1123 15:59:12.159324 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tm256" event={"ID":"e44f7cce-e33d-4ad8-b6bb-3be89f3fda53","Type":"ContainerDied","Data":"8b8d8dab119d63d79b9a0a24d7dc81792f0203bfd65795301797e16c477fd8ad"} Nov 23 15:59:12 crc kubenswrapper[5050]: I1123 15:59:12.222561 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 23 15:59:13 crc kubenswrapper[5050]: I1123 15:59:13.172842 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tm256" event={"ID":"e44f7cce-e33d-4ad8-b6bb-3be89f3fda53","Type":"ContainerStarted","Data":"5909062e2af7d98a12afa85a72c53f66558fab7a302bb6ea54882d0ad2d19038"} Nov 23 15:59:13 crc kubenswrapper[5050]: I1123 15:59:13.176324 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9","Type":"ContainerStarted","Data":"f6c74623da09b2c1eea7cced51a06aec15fcc130a960e0480487368f9395af29"} Nov 23 15:59:13 crc kubenswrapper[5050]: I1123 15:59:13.176362 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9","Type":"ContainerStarted","Data":"ac3e4dd203baad72cf135615c56723228b8418fbb1ef65268865abc31ac7e28b"} Nov 23 15:59:13 crc kubenswrapper[5050]: I1123 15:59:13.200626 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-tm256" podStartSLOduration=2.653250491 podStartE2EDuration="5.200583044s" podCreationTimestamp="2025-11-23 15:59:08 +0000 UTC" firstStartedPulling="2025-11-23 15:59:10.127357054 +0000 UTC m=+4645.294353539" lastFinishedPulling="2025-11-23 15:59:12.674689607 +0000 UTC m=+4647.841686092" observedRunningTime="2025-11-23 15:59:13.197702852 +0000 UTC m=+4648.364699357" watchObservedRunningTime="2025-11-23 15:59:13.200583044 +0000 UTC m=+4648.367579519" Nov 23 15:59:15 crc kubenswrapper[5050]: I1123 15:59:15.198618 5050 generic.go:334] "Generic (PLEG): container finished" podID="4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511" containerID="dc7aa839907445328865a093b59ca69fc5ffeb2d34fa00b55c5f5f814b7dd571" exitCode=0 Nov 23 15:59:15 crc kubenswrapper[5050]: I1123 15:59:15.198714 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511","Type":"ContainerDied","Data":"dc7aa839907445328865a093b59ca69fc5ffeb2d34fa00b55c5f5f814b7dd571"} Nov 23 15:59:16 crc kubenswrapper[5050]: I1123 15:59:16.212726 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511","Type":"ContainerStarted","Data":"33275cd04d50f3d8466695bef610c0507f143ab0058cc392bc915551a120613f"} Nov 23 15:59:16 crc kubenswrapper[5050]: I1123 15:59:16.219485 5050 generic.go:334] "Generic (PLEG): container finished" podID="fd4aea72-077d-4db3-85ee-3ecf19b1f7a9" containerID="f6c74623da09b2c1eea7cced51a06aec15fcc130a960e0480487368f9395af29" exitCode=0 Nov 23 15:59:16 crc kubenswrapper[5050]: I1123 15:59:16.219565 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9","Type":"ContainerDied","Data":"f6c74623da09b2c1eea7cced51a06aec15fcc130a960e0480487368f9395af29"} Nov 23 15:59:16 crc kubenswrapper[5050]: I1123 15:59:16.254899 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=9.254863497 podStartE2EDuration="9.254863497s" podCreationTimestamp="2025-11-23 15:59:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:59:16.250497673 +0000 UTC m=+4651.417494168" watchObservedRunningTime="2025-11-23 15:59:16.254863497 +0000 UTC m=+4651.421860022" Nov 23 15:59:16 crc kubenswrapper[5050]: I1123 15:59:16.598750 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5d7b5456f5-fs5x5" Nov 23 15:59:16 crc kubenswrapper[5050]: I1123 15:59:16.858713 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-98ddfc8f-5mxx6" Nov 23 15:59:16 crc kubenswrapper[5050]: I1123 15:59:16.930709 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-fs5x5"] Nov 23 15:59:17 crc kubenswrapper[5050]: I1123 15:59:17.230538 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5d7b5456f5-fs5x5" podUID="cae208aa-722b-4bf1-9566-1b82203eeb6e" containerName="dnsmasq-dns" containerID="cri-o://52912b997d891f0bb27923a310644d781a02032169675a0d95dcd7669c61e38c" gracePeriod=10 Nov 23 15:59:17 crc kubenswrapper[5050]: I1123 15:59:17.230862 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"fd4aea72-077d-4db3-85ee-3ecf19b1f7a9","Type":"ContainerStarted","Data":"f1abfb7dd19636ad1cb52f0fb227af4ef710d9da549503b44884451f15a67a99"} Nov 23 15:59:17 crc kubenswrapper[5050]: I1123 15:59:17.269705 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=8.269683883999999 podStartE2EDuration="8.269683884s" podCreationTimestamp="2025-11-23 15:59:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:59:17.263475269 +0000 UTC m=+4652.430471764" watchObservedRunningTime="2025-11-23 15:59:17.269683884 +0000 UTC m=+4652.436680389" Nov 23 15:59:17 crc kubenswrapper[5050]: I1123 15:59:17.720575 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7b5456f5-fs5x5" Nov 23 15:59:17 crc kubenswrapper[5050]: I1123 15:59:17.888565 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cae208aa-722b-4bf1-9566-1b82203eeb6e-dns-svc\") pod \"cae208aa-722b-4bf1-9566-1b82203eeb6e\" (UID: \"cae208aa-722b-4bf1-9566-1b82203eeb6e\") " Nov 23 15:59:17 crc kubenswrapper[5050]: I1123 15:59:17.888670 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cae208aa-722b-4bf1-9566-1b82203eeb6e-config\") pod \"cae208aa-722b-4bf1-9566-1b82203eeb6e\" (UID: \"cae208aa-722b-4bf1-9566-1b82203eeb6e\") " Nov 23 15:59:17 crc kubenswrapper[5050]: I1123 15:59:17.888708 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5vwq5\" (UniqueName: \"kubernetes.io/projected/cae208aa-722b-4bf1-9566-1b82203eeb6e-kube-api-access-5vwq5\") pod \"cae208aa-722b-4bf1-9566-1b82203eeb6e\" (UID: \"cae208aa-722b-4bf1-9566-1b82203eeb6e\") " Nov 23 15:59:17 crc kubenswrapper[5050]: I1123 15:59:17.896619 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cae208aa-722b-4bf1-9566-1b82203eeb6e-kube-api-access-5vwq5" (OuterVolumeSpecName: "kube-api-access-5vwq5") pod "cae208aa-722b-4bf1-9566-1b82203eeb6e" (UID: "cae208aa-722b-4bf1-9566-1b82203eeb6e"). InnerVolumeSpecName "kube-api-access-5vwq5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:59:17 crc kubenswrapper[5050]: I1123 15:59:17.945641 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cae208aa-722b-4bf1-9566-1b82203eeb6e-config" (OuterVolumeSpecName: "config") pod "cae208aa-722b-4bf1-9566-1b82203eeb6e" (UID: "cae208aa-722b-4bf1-9566-1b82203eeb6e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:59:17 crc kubenswrapper[5050]: I1123 15:59:17.956506 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cae208aa-722b-4bf1-9566-1b82203eeb6e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cae208aa-722b-4bf1-9566-1b82203eeb6e" (UID: "cae208aa-722b-4bf1-9566-1b82203eeb6e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 15:59:17 crc kubenswrapper[5050]: I1123 15:59:17.991084 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cae208aa-722b-4bf1-9566-1b82203eeb6e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 15:59:17 crc kubenswrapper[5050]: I1123 15:59:17.991530 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cae208aa-722b-4bf1-9566-1b82203eeb6e-config\") on node \"crc\" DevicePath \"\"" Nov 23 15:59:17 crc kubenswrapper[5050]: I1123 15:59:17.991555 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5vwq5\" (UniqueName: \"kubernetes.io/projected/cae208aa-722b-4bf1-9566-1b82203eeb6e-kube-api-access-5vwq5\") on node \"crc\" DevicePath \"\"" Nov 23 15:59:18 crc kubenswrapper[5050]: I1123 15:59:18.241127 5050 generic.go:334] "Generic (PLEG): container finished" podID="cae208aa-722b-4bf1-9566-1b82203eeb6e" containerID="52912b997d891f0bb27923a310644d781a02032169675a0d95dcd7669c61e38c" exitCode=0 Nov 23 15:59:18 crc kubenswrapper[5050]: I1123 15:59:18.241185 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-fs5x5" event={"ID":"cae208aa-722b-4bf1-9566-1b82203eeb6e","Type":"ContainerDied","Data":"52912b997d891f0bb27923a310644d781a02032169675a0d95dcd7669c61e38c"} Nov 23 15:59:18 crc kubenswrapper[5050]: I1123 15:59:18.241226 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-fs5x5" event={"ID":"cae208aa-722b-4bf1-9566-1b82203eeb6e","Type":"ContainerDied","Data":"5a4c57dc499f779f665e5777e45344b07ceb0d0cb7cbed9794b8de25fcfdd8cc"} Nov 23 15:59:18 crc kubenswrapper[5050]: I1123 15:59:18.241251 5050 scope.go:117] "RemoveContainer" containerID="52912b997d891f0bb27923a310644d781a02032169675a0d95dcd7669c61e38c" Nov 23 15:59:18 crc kubenswrapper[5050]: I1123 15:59:18.241375 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7b5456f5-fs5x5" Nov 23 15:59:18 crc kubenswrapper[5050]: I1123 15:59:18.268377 5050 scope.go:117] "RemoveContainer" containerID="a224cf3cb8e73e96abd996f802ec732b6c0c96244b79ba2d4b704016da0a18d9" Nov 23 15:59:18 crc kubenswrapper[5050]: I1123 15:59:18.292383 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-fs5x5"] Nov 23 15:59:18 crc kubenswrapper[5050]: I1123 15:59:18.300829 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-fs5x5"] Nov 23 15:59:18 crc kubenswrapper[5050]: I1123 15:59:18.315155 5050 scope.go:117] "RemoveContainer" containerID="52912b997d891f0bb27923a310644d781a02032169675a0d95dcd7669c61e38c" Nov 23 15:59:18 crc kubenswrapper[5050]: E1123 15:59:18.318690 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"52912b997d891f0bb27923a310644d781a02032169675a0d95dcd7669c61e38c\": container with ID starting with 52912b997d891f0bb27923a310644d781a02032169675a0d95dcd7669c61e38c not found: ID does not exist" containerID="52912b997d891f0bb27923a310644d781a02032169675a0d95dcd7669c61e38c" Nov 23 15:59:18 crc kubenswrapper[5050]: I1123 15:59:18.318762 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52912b997d891f0bb27923a310644d781a02032169675a0d95dcd7669c61e38c"} err="failed to get container status \"52912b997d891f0bb27923a310644d781a02032169675a0d95dcd7669c61e38c\": rpc error: code = NotFound desc = could not find container \"52912b997d891f0bb27923a310644d781a02032169675a0d95dcd7669c61e38c\": container with ID starting with 52912b997d891f0bb27923a310644d781a02032169675a0d95dcd7669c61e38c not found: ID does not exist" Nov 23 15:59:18 crc kubenswrapper[5050]: I1123 15:59:18.318802 5050 scope.go:117] "RemoveContainer" containerID="a224cf3cb8e73e96abd996f802ec732b6c0c96244b79ba2d4b704016da0a18d9" Nov 23 15:59:18 crc kubenswrapper[5050]: E1123 15:59:18.319694 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a224cf3cb8e73e96abd996f802ec732b6c0c96244b79ba2d4b704016da0a18d9\": container with ID starting with a224cf3cb8e73e96abd996f802ec732b6c0c96244b79ba2d4b704016da0a18d9 not found: ID does not exist" containerID="a224cf3cb8e73e96abd996f802ec732b6c0c96244b79ba2d4b704016da0a18d9" Nov 23 15:59:18 crc kubenswrapper[5050]: I1123 15:59:18.319749 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a224cf3cb8e73e96abd996f802ec732b6c0c96244b79ba2d4b704016da0a18d9"} err="failed to get container status \"a224cf3cb8e73e96abd996f802ec732b6c0c96244b79ba2d4b704016da0a18d9\": rpc error: code = NotFound desc = could not find container \"a224cf3cb8e73e96abd996f802ec732b6c0c96244b79ba2d4b704016da0a18d9\": container with ID starting with a224cf3cb8e73e96abd996f802ec732b6c0c96244b79ba2d4b704016da0a18d9 not found: ID does not exist" Nov 23 15:59:18 crc kubenswrapper[5050]: I1123 15:59:18.692477 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-tm256" Nov 23 15:59:18 crc kubenswrapper[5050]: I1123 15:59:18.692562 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-tm256" Nov 23 15:59:18 crc kubenswrapper[5050]: I1123 15:59:18.776829 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-tm256" Nov 23 15:59:18 crc kubenswrapper[5050]: E1123 15:59:18.906884 5050 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.32:46436->38.102.83.32:33067: write tcp 38.102.83.32:46436->38.102.83.32:33067: write: broken pipe Nov 23 15:59:19 crc kubenswrapper[5050]: I1123 15:59:19.330087 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-tm256" Nov 23 15:59:19 crc kubenswrapper[5050]: I1123 15:59:19.387193 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tm256"] Nov 23 15:59:19 crc kubenswrapper[5050]: I1123 15:59:19.565583 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cae208aa-722b-4bf1-9566-1b82203eeb6e" path="/var/lib/kubelet/pods/cae208aa-722b-4bf1-9566-1b82203eeb6e/volumes" Nov 23 15:59:19 crc kubenswrapper[5050]: I1123 15:59:19.596887 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 23 15:59:19 crc kubenswrapper[5050]: I1123 15:59:19.596961 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 23 15:59:19 crc kubenswrapper[5050]: I1123 15:59:19.841837 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 23 15:59:19 crc kubenswrapper[5050]: I1123 15:59:19.924077 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 23 15:59:20 crc kubenswrapper[5050]: I1123 15:59:20.369642 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 23 15:59:21 crc kubenswrapper[5050]: I1123 15:59:21.270527 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-tm256" podUID="e44f7cce-e33d-4ad8-b6bb-3be89f3fda53" containerName="registry-server" containerID="cri-o://5909062e2af7d98a12afa85a72c53f66558fab7a302bb6ea54882d0ad2d19038" gracePeriod=2 Nov 23 15:59:21 crc kubenswrapper[5050]: I1123 15:59:21.664492 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:21 crc kubenswrapper[5050]: I1123 15:59:21.666269 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:22 crc kubenswrapper[5050]: I1123 15:59:22.145615 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tm256" Nov 23 15:59:22 crc kubenswrapper[5050]: I1123 15:59:22.207030 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:22 crc kubenswrapper[5050]: I1123 15:59:22.278053 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e44f7cce-e33d-4ad8-b6bb-3be89f3fda53-utilities\") pod \"e44f7cce-e33d-4ad8-b6bb-3be89f3fda53\" (UID: \"e44f7cce-e33d-4ad8-b6bb-3be89f3fda53\") " Nov 23 15:59:22 crc kubenswrapper[5050]: I1123 15:59:22.278189 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e44f7cce-e33d-4ad8-b6bb-3be89f3fda53-catalog-content\") pod \"e44f7cce-e33d-4ad8-b6bb-3be89f3fda53\" (UID: \"e44f7cce-e33d-4ad8-b6bb-3be89f3fda53\") " Nov 23 15:59:22 crc kubenswrapper[5050]: I1123 15:59:22.278328 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-grkzn\" (UniqueName: \"kubernetes.io/projected/e44f7cce-e33d-4ad8-b6bb-3be89f3fda53-kube-api-access-grkzn\") pod \"e44f7cce-e33d-4ad8-b6bb-3be89f3fda53\" (UID: \"e44f7cce-e33d-4ad8-b6bb-3be89f3fda53\") " Nov 23 15:59:22 crc kubenswrapper[5050]: I1123 15:59:22.278920 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e44f7cce-e33d-4ad8-b6bb-3be89f3fda53-utilities" (OuterVolumeSpecName: "utilities") pod "e44f7cce-e33d-4ad8-b6bb-3be89f3fda53" (UID: "e44f7cce-e33d-4ad8-b6bb-3be89f3fda53"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:59:22 crc kubenswrapper[5050]: I1123 15:59:22.286889 5050 generic.go:334] "Generic (PLEG): container finished" podID="e44f7cce-e33d-4ad8-b6bb-3be89f3fda53" containerID="5909062e2af7d98a12afa85a72c53f66558fab7a302bb6ea54882d0ad2d19038" exitCode=0 Nov 23 15:59:22 crc kubenswrapper[5050]: I1123 15:59:22.286975 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tm256" Nov 23 15:59:22 crc kubenswrapper[5050]: I1123 15:59:22.287009 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tm256" event={"ID":"e44f7cce-e33d-4ad8-b6bb-3be89f3fda53","Type":"ContainerDied","Data":"5909062e2af7d98a12afa85a72c53f66558fab7a302bb6ea54882d0ad2d19038"} Nov 23 15:59:22 crc kubenswrapper[5050]: I1123 15:59:22.287508 5050 scope.go:117] "RemoveContainer" containerID="5909062e2af7d98a12afa85a72c53f66558fab7a302bb6ea54882d0ad2d19038" Nov 23 15:59:22 crc kubenswrapper[5050]: I1123 15:59:22.287376 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tm256" event={"ID":"e44f7cce-e33d-4ad8-b6bb-3be89f3fda53","Type":"ContainerDied","Data":"9465d27ffadd576b4fcf23738c6b2261790b01d8cebad6a1097abe0329624870"} Nov 23 15:59:22 crc kubenswrapper[5050]: I1123 15:59:22.289938 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e44f7cce-e33d-4ad8-b6bb-3be89f3fda53-kube-api-access-grkzn" (OuterVolumeSpecName: "kube-api-access-grkzn") pod "e44f7cce-e33d-4ad8-b6bb-3be89f3fda53" (UID: "e44f7cce-e33d-4ad8-b6bb-3be89f3fda53"). InnerVolumeSpecName "kube-api-access-grkzn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 15:59:22 crc kubenswrapper[5050]: I1123 15:59:22.332736 5050 scope.go:117] "RemoveContainer" containerID="8b8d8dab119d63d79b9a0a24d7dc81792f0203bfd65795301797e16c477fd8ad" Nov 23 15:59:22 crc kubenswrapper[5050]: I1123 15:59:22.362873 5050 scope.go:117] "RemoveContainer" containerID="0c726452578748764cc60853adb96153ce3aaf3e0ed4ed9f74d6b73d9b16c9c8" Nov 23 15:59:22 crc kubenswrapper[5050]: I1123 15:59:22.381660 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 23 15:59:22 crc kubenswrapper[5050]: I1123 15:59:22.382385 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-grkzn\" (UniqueName: \"kubernetes.io/projected/e44f7cce-e33d-4ad8-b6bb-3be89f3fda53-kube-api-access-grkzn\") on node \"crc\" DevicePath \"\"" Nov 23 15:59:22 crc kubenswrapper[5050]: I1123 15:59:22.382755 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e44f7cce-e33d-4ad8-b6bb-3be89f3fda53-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 15:59:22 crc kubenswrapper[5050]: I1123 15:59:22.388573 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e44f7cce-e33d-4ad8-b6bb-3be89f3fda53-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e44f7cce-e33d-4ad8-b6bb-3be89f3fda53" (UID: "e44f7cce-e33d-4ad8-b6bb-3be89f3fda53"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 15:59:22 crc kubenswrapper[5050]: I1123 15:59:22.392536 5050 scope.go:117] "RemoveContainer" containerID="5909062e2af7d98a12afa85a72c53f66558fab7a302bb6ea54882d0ad2d19038" Nov 23 15:59:22 crc kubenswrapper[5050]: E1123 15:59:22.393050 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5909062e2af7d98a12afa85a72c53f66558fab7a302bb6ea54882d0ad2d19038\": container with ID starting with 5909062e2af7d98a12afa85a72c53f66558fab7a302bb6ea54882d0ad2d19038 not found: ID does not exist" containerID="5909062e2af7d98a12afa85a72c53f66558fab7a302bb6ea54882d0ad2d19038" Nov 23 15:59:22 crc kubenswrapper[5050]: I1123 15:59:22.393089 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5909062e2af7d98a12afa85a72c53f66558fab7a302bb6ea54882d0ad2d19038"} err="failed to get container status \"5909062e2af7d98a12afa85a72c53f66558fab7a302bb6ea54882d0ad2d19038\": rpc error: code = NotFound desc = could not find container \"5909062e2af7d98a12afa85a72c53f66558fab7a302bb6ea54882d0ad2d19038\": container with ID starting with 5909062e2af7d98a12afa85a72c53f66558fab7a302bb6ea54882d0ad2d19038 not found: ID does not exist" Nov 23 15:59:22 crc kubenswrapper[5050]: I1123 15:59:22.393130 5050 scope.go:117] "RemoveContainer" containerID="8b8d8dab119d63d79b9a0a24d7dc81792f0203bfd65795301797e16c477fd8ad" Nov 23 15:59:22 crc kubenswrapper[5050]: E1123 15:59:22.393427 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b8d8dab119d63d79b9a0a24d7dc81792f0203bfd65795301797e16c477fd8ad\": container with ID starting with 8b8d8dab119d63d79b9a0a24d7dc81792f0203bfd65795301797e16c477fd8ad not found: ID does not exist" containerID="8b8d8dab119d63d79b9a0a24d7dc81792f0203bfd65795301797e16c477fd8ad" Nov 23 15:59:22 crc kubenswrapper[5050]: I1123 15:59:22.393471 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b8d8dab119d63d79b9a0a24d7dc81792f0203bfd65795301797e16c477fd8ad"} err="failed to get container status \"8b8d8dab119d63d79b9a0a24d7dc81792f0203bfd65795301797e16c477fd8ad\": rpc error: code = NotFound desc = could not find container \"8b8d8dab119d63d79b9a0a24d7dc81792f0203bfd65795301797e16c477fd8ad\": container with ID starting with 8b8d8dab119d63d79b9a0a24d7dc81792f0203bfd65795301797e16c477fd8ad not found: ID does not exist" Nov 23 15:59:22 crc kubenswrapper[5050]: I1123 15:59:22.393497 5050 scope.go:117] "RemoveContainer" containerID="0c726452578748764cc60853adb96153ce3aaf3e0ed4ed9f74d6b73d9b16c9c8" Nov 23 15:59:22 crc kubenswrapper[5050]: E1123 15:59:22.393778 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c726452578748764cc60853adb96153ce3aaf3e0ed4ed9f74d6b73d9b16c9c8\": container with ID starting with 0c726452578748764cc60853adb96153ce3aaf3e0ed4ed9f74d6b73d9b16c9c8 not found: ID does not exist" containerID="0c726452578748764cc60853adb96153ce3aaf3e0ed4ed9f74d6b73d9b16c9c8" Nov 23 15:59:22 crc kubenswrapper[5050]: I1123 15:59:22.393825 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c726452578748764cc60853adb96153ce3aaf3e0ed4ed9f74d6b73d9b16c9c8"} err="failed to get container status \"0c726452578748764cc60853adb96153ce3aaf3e0ed4ed9f74d6b73d9b16c9c8\": rpc error: code = NotFound desc = could not find container \"0c726452578748764cc60853adb96153ce3aaf3e0ed4ed9f74d6b73d9b16c9c8\": container with ID starting with 0c726452578748764cc60853adb96153ce3aaf3e0ed4ed9f74d6b73d9b16c9c8 not found: ID does not exist" Nov 23 15:59:22 crc kubenswrapper[5050]: I1123 15:59:22.485064 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e44f7cce-e33d-4ad8-b6bb-3be89f3fda53-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 15:59:22 crc kubenswrapper[5050]: I1123 15:59:22.626989 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tm256"] Nov 23 15:59:22 crc kubenswrapper[5050]: I1123 15:59:22.636959 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-tm256"] Nov 23 15:59:23 crc kubenswrapper[5050]: I1123 15:59:23.565328 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e44f7cce-e33d-4ad8-b6bb-3be89f3fda53" path="/var/lib/kubelet/pods/e44f7cce-e33d-4ad8-b6bb-3be89f3fda53/volumes" Nov 23 15:59:29 crc kubenswrapper[5050]: I1123 15:59:29.224619 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:59:29 crc kubenswrapper[5050]: I1123 15:59:29.225464 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 15:59:43 crc kubenswrapper[5050]: I1123 15:59:43.527996 5050 generic.go:334] "Generic (PLEG): container finished" podID="60070b93-3461-4644-8bf3-93f3d6dea993" containerID="8dbffce1d84fbf64ab72e001bf96bb4dbaf7216e0beb5ac29fe96cce16e6a767" exitCode=0 Nov 23 15:59:43 crc kubenswrapper[5050]: I1123 15:59:43.528141 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"60070b93-3461-4644-8bf3-93f3d6dea993","Type":"ContainerDied","Data":"8dbffce1d84fbf64ab72e001bf96bb4dbaf7216e0beb5ac29fe96cce16e6a767"} Nov 23 15:59:44 crc kubenswrapper[5050]: I1123 15:59:44.542072 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"60070b93-3461-4644-8bf3-93f3d6dea993","Type":"ContainerStarted","Data":"2383b3c5973bd1bcafe22515306281fbd6b9cc09a1d301862448276ae76a3b4e"} Nov 23 15:59:44 crc kubenswrapper[5050]: I1123 15:59:44.542976 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 23 15:59:44 crc kubenswrapper[5050]: I1123 15:59:44.545102 5050 generic.go:334] "Generic (PLEG): container finished" podID="e72bc364-dad9-4403-ba3a-270c07097ba6" containerID="d991713587d0ad7254c335799a1919b5ef93e56583e55a919d2db7813fb583b5" exitCode=0 Nov 23 15:59:44 crc kubenswrapper[5050]: I1123 15:59:44.545174 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e72bc364-dad9-4403-ba3a-270c07097ba6","Type":"ContainerDied","Data":"d991713587d0ad7254c335799a1919b5ef93e56583e55a919d2db7813fb583b5"} Nov 23 15:59:44 crc kubenswrapper[5050]: I1123 15:59:44.584764 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=38.584739552 podStartE2EDuration="38.584739552s" podCreationTimestamp="2025-11-23 15:59:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:59:44.579168964 +0000 UTC m=+4679.746165469" watchObservedRunningTime="2025-11-23 15:59:44.584739552 +0000 UTC m=+4679.751736037" Nov 23 15:59:45 crc kubenswrapper[5050]: I1123 15:59:45.561332 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e72bc364-dad9-4403-ba3a-270c07097ba6","Type":"ContainerStarted","Data":"501bf1710c9d435ae568e7ecea292d55ae606c630dcf50a982fdebd3ab054f13"} Nov 23 15:59:45 crc kubenswrapper[5050]: I1123 15:59:45.562132 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:45 crc kubenswrapper[5050]: I1123 15:59:45.605904 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=39.605873368 podStartE2EDuration="39.605873368s" podCreationTimestamp="2025-11-23 15:59:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 15:59:45.597172612 +0000 UTC m=+4680.764169147" watchObservedRunningTime="2025-11-23 15:59:45.605873368 +0000 UTC m=+4680.772869903" Nov 23 15:59:57 crc kubenswrapper[5050]: I1123 15:59:57.797180 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 23 15:59:58 crc kubenswrapper[5050]: I1123 15:59:58.647891 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 23 15:59:59 crc kubenswrapper[5050]: I1123 15:59:59.224512 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 15:59:59 crc kubenswrapper[5050]: I1123 15:59:59.225030 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:00:00 crc kubenswrapper[5050]: I1123 16:00:00.171514 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398560-6q4s6"] Nov 23 16:00:00 crc kubenswrapper[5050]: E1123 16:00:00.172427 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cae208aa-722b-4bf1-9566-1b82203eeb6e" containerName="dnsmasq-dns" Nov 23 16:00:00 crc kubenswrapper[5050]: I1123 16:00:00.172588 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="cae208aa-722b-4bf1-9566-1b82203eeb6e" containerName="dnsmasq-dns" Nov 23 16:00:00 crc kubenswrapper[5050]: E1123 16:00:00.172717 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e44f7cce-e33d-4ad8-b6bb-3be89f3fda53" containerName="extract-content" Nov 23 16:00:00 crc kubenswrapper[5050]: I1123 16:00:00.172849 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e44f7cce-e33d-4ad8-b6bb-3be89f3fda53" containerName="extract-content" Nov 23 16:00:00 crc kubenswrapper[5050]: E1123 16:00:00.172976 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cae208aa-722b-4bf1-9566-1b82203eeb6e" containerName="init" Nov 23 16:00:00 crc kubenswrapper[5050]: I1123 16:00:00.173137 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="cae208aa-722b-4bf1-9566-1b82203eeb6e" containerName="init" Nov 23 16:00:00 crc kubenswrapper[5050]: E1123 16:00:00.173291 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e44f7cce-e33d-4ad8-b6bb-3be89f3fda53" containerName="registry-server" Nov 23 16:00:00 crc kubenswrapper[5050]: I1123 16:00:00.173429 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e44f7cce-e33d-4ad8-b6bb-3be89f3fda53" containerName="registry-server" Nov 23 16:00:00 crc kubenswrapper[5050]: E1123 16:00:00.173621 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e44f7cce-e33d-4ad8-b6bb-3be89f3fda53" containerName="extract-utilities" Nov 23 16:00:00 crc kubenswrapper[5050]: I1123 16:00:00.173747 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e44f7cce-e33d-4ad8-b6bb-3be89f3fda53" containerName="extract-utilities" Nov 23 16:00:00 crc kubenswrapper[5050]: I1123 16:00:00.174173 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="e44f7cce-e33d-4ad8-b6bb-3be89f3fda53" containerName="registry-server" Nov 23 16:00:00 crc kubenswrapper[5050]: I1123 16:00:00.174325 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="cae208aa-722b-4bf1-9566-1b82203eeb6e" containerName="dnsmasq-dns" Nov 23 16:00:00 crc kubenswrapper[5050]: I1123 16:00:00.175364 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398560-6q4s6" Nov 23 16:00:00 crc kubenswrapper[5050]: I1123 16:00:00.178977 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 23 16:00:00 crc kubenswrapper[5050]: I1123 16:00:00.180855 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398560-6q4s6"] Nov 23 16:00:00 crc kubenswrapper[5050]: I1123 16:00:00.187810 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 23 16:00:00 crc kubenswrapper[5050]: I1123 16:00:00.306418 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnpwz\" (UniqueName: \"kubernetes.io/projected/6a742030-c300-4bc7-b673-8c6411f3c837-kube-api-access-hnpwz\") pod \"collect-profiles-29398560-6q4s6\" (UID: \"6a742030-c300-4bc7-b673-8c6411f3c837\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398560-6q4s6" Nov 23 16:00:00 crc kubenswrapper[5050]: I1123 16:00:00.306603 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6a742030-c300-4bc7-b673-8c6411f3c837-secret-volume\") pod \"collect-profiles-29398560-6q4s6\" (UID: \"6a742030-c300-4bc7-b673-8c6411f3c837\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398560-6q4s6" Nov 23 16:00:00 crc kubenswrapper[5050]: I1123 16:00:00.306687 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6a742030-c300-4bc7-b673-8c6411f3c837-config-volume\") pod \"collect-profiles-29398560-6q4s6\" (UID: \"6a742030-c300-4bc7-b673-8c6411f3c837\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398560-6q4s6" Nov 23 16:00:00 crc kubenswrapper[5050]: I1123 16:00:00.408689 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6a742030-c300-4bc7-b673-8c6411f3c837-config-volume\") pod \"collect-profiles-29398560-6q4s6\" (UID: \"6a742030-c300-4bc7-b673-8c6411f3c837\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398560-6q4s6" Nov 23 16:00:00 crc kubenswrapper[5050]: I1123 16:00:00.410977 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnpwz\" (UniqueName: \"kubernetes.io/projected/6a742030-c300-4bc7-b673-8c6411f3c837-kube-api-access-hnpwz\") pod \"collect-profiles-29398560-6q4s6\" (UID: \"6a742030-c300-4bc7-b673-8c6411f3c837\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398560-6q4s6" Nov 23 16:00:00 crc kubenswrapper[5050]: I1123 16:00:00.412378 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6a742030-c300-4bc7-b673-8c6411f3c837-secret-volume\") pod \"collect-profiles-29398560-6q4s6\" (UID: \"6a742030-c300-4bc7-b673-8c6411f3c837\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398560-6q4s6" Nov 23 16:00:00 crc kubenswrapper[5050]: I1123 16:00:00.410887 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6a742030-c300-4bc7-b673-8c6411f3c837-config-volume\") pod \"collect-profiles-29398560-6q4s6\" (UID: \"6a742030-c300-4bc7-b673-8c6411f3c837\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398560-6q4s6" Nov 23 16:00:00 crc kubenswrapper[5050]: I1123 16:00:00.424891 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6a742030-c300-4bc7-b673-8c6411f3c837-secret-volume\") pod \"collect-profiles-29398560-6q4s6\" (UID: \"6a742030-c300-4bc7-b673-8c6411f3c837\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398560-6q4s6" Nov 23 16:00:00 crc kubenswrapper[5050]: I1123 16:00:00.436052 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnpwz\" (UniqueName: \"kubernetes.io/projected/6a742030-c300-4bc7-b673-8c6411f3c837-kube-api-access-hnpwz\") pod \"collect-profiles-29398560-6q4s6\" (UID: \"6a742030-c300-4bc7-b673-8c6411f3c837\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398560-6q4s6" Nov 23 16:00:00 crc kubenswrapper[5050]: I1123 16:00:00.503304 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398560-6q4s6" Nov 23 16:00:00 crc kubenswrapper[5050]: I1123 16:00:00.993763 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398560-6q4s6"] Nov 23 16:00:01 crc kubenswrapper[5050]: I1123 16:00:01.719922 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398560-6q4s6" event={"ID":"6a742030-c300-4bc7-b673-8c6411f3c837","Type":"ContainerStarted","Data":"73357890db28dd98f4f807fe52d57d93b84a87a0529d25b2261e7e7b6c764d36"} Nov 23 16:00:01 crc kubenswrapper[5050]: I1123 16:00:01.719985 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398560-6q4s6" event={"ID":"6a742030-c300-4bc7-b673-8c6411f3c837","Type":"ContainerStarted","Data":"cc48e19b0995f13973b647fc34b5c86b382f929a295d06cb370228265cb78401"} Nov 23 16:00:01 crc kubenswrapper[5050]: I1123 16:00:01.751395 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29398560-6q4s6" podStartSLOduration=1.751368143 podStartE2EDuration="1.751368143s" podCreationTimestamp="2025-11-23 16:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:00:01.741315928 +0000 UTC m=+4696.908312433" watchObservedRunningTime="2025-11-23 16:00:01.751368143 +0000 UTC m=+4696.918364648" Nov 23 16:00:02 crc kubenswrapper[5050]: I1123 16:00:02.734925 5050 generic.go:334] "Generic (PLEG): container finished" podID="6a742030-c300-4bc7-b673-8c6411f3c837" containerID="73357890db28dd98f4f807fe52d57d93b84a87a0529d25b2261e7e7b6c764d36" exitCode=0 Nov 23 16:00:02 crc kubenswrapper[5050]: I1123 16:00:02.735046 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398560-6q4s6" event={"ID":"6a742030-c300-4bc7-b673-8c6411f3c837","Type":"ContainerDied","Data":"73357890db28dd98f4f807fe52d57d93b84a87a0529d25b2261e7e7b6c764d36"} Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.237875 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398560-6q4s6" Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.288897 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hnpwz\" (UniqueName: \"kubernetes.io/projected/6a742030-c300-4bc7-b673-8c6411f3c837-kube-api-access-hnpwz\") pod \"6a742030-c300-4bc7-b673-8c6411f3c837\" (UID: \"6a742030-c300-4bc7-b673-8c6411f3c837\") " Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.290946 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6a742030-c300-4bc7-b673-8c6411f3c837-config-volume\") pod \"6a742030-c300-4bc7-b673-8c6411f3c837\" (UID: \"6a742030-c300-4bc7-b673-8c6411f3c837\") " Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.291062 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6a742030-c300-4bc7-b673-8c6411f3c837-secret-volume\") pod \"6a742030-c300-4bc7-b673-8c6411f3c837\" (UID: \"6a742030-c300-4bc7-b673-8c6411f3c837\") " Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.292922 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a742030-c300-4bc7-b673-8c6411f3c837-config-volume" (OuterVolumeSpecName: "config-volume") pod "6a742030-c300-4bc7-b673-8c6411f3c837" (UID: "6a742030-c300-4bc7-b673-8c6411f3c837"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.313797 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a742030-c300-4bc7-b673-8c6411f3c837-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "6a742030-c300-4bc7-b673-8c6411f3c837" (UID: "6a742030-c300-4bc7-b673-8c6411f3c837"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.313970 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a742030-c300-4bc7-b673-8c6411f3c837-kube-api-access-hnpwz" (OuterVolumeSpecName: "kube-api-access-hnpwz") pod "6a742030-c300-4bc7-b673-8c6411f3c837" (UID: "6a742030-c300-4bc7-b673-8c6411f3c837"). InnerVolumeSpecName "kube-api-access-hnpwz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.393819 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hnpwz\" (UniqueName: \"kubernetes.io/projected/6a742030-c300-4bc7-b673-8c6411f3c837-kube-api-access-hnpwz\") on node \"crc\" DevicePath \"\"" Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.394155 5050 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6a742030-c300-4bc7-b673-8c6411f3c837-config-volume\") on node \"crc\" DevicePath \"\"" Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.394222 5050 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6a742030-c300-4bc7-b673-8c6411f3c837-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.437009 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-7gb49"] Nov 23 16:00:04 crc kubenswrapper[5050]: E1123 16:00:04.437675 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a742030-c300-4bc7-b673-8c6411f3c837" containerName="collect-profiles" Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.437792 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a742030-c300-4bc7-b673-8c6411f3c837" containerName="collect-profiles" Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.438022 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a742030-c300-4bc7-b673-8c6411f3c837" containerName="collect-profiles" Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.438984 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b7946d7b9-7gb49" Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.460255 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-7gb49"] Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.495613 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c7e0522c-5cea-4c2a-b6eb-9e59a873c18f-dns-svc\") pod \"dnsmasq-dns-5b7946d7b9-7gb49\" (UID: \"c7e0522c-5cea-4c2a-b6eb-9e59a873c18f\") " pod="openstack/dnsmasq-dns-5b7946d7b9-7gb49" Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.495741 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lcp2p\" (UniqueName: \"kubernetes.io/projected/c7e0522c-5cea-4c2a-b6eb-9e59a873c18f-kube-api-access-lcp2p\") pod \"dnsmasq-dns-5b7946d7b9-7gb49\" (UID: \"c7e0522c-5cea-4c2a-b6eb-9e59a873c18f\") " pod="openstack/dnsmasq-dns-5b7946d7b9-7gb49" Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.495792 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7e0522c-5cea-4c2a-b6eb-9e59a873c18f-config\") pod \"dnsmasq-dns-5b7946d7b9-7gb49\" (UID: \"c7e0522c-5cea-4c2a-b6eb-9e59a873c18f\") " pod="openstack/dnsmasq-dns-5b7946d7b9-7gb49" Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.597575 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c7e0522c-5cea-4c2a-b6eb-9e59a873c18f-dns-svc\") pod \"dnsmasq-dns-5b7946d7b9-7gb49\" (UID: \"c7e0522c-5cea-4c2a-b6eb-9e59a873c18f\") " pod="openstack/dnsmasq-dns-5b7946d7b9-7gb49" Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.597713 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lcp2p\" (UniqueName: \"kubernetes.io/projected/c7e0522c-5cea-4c2a-b6eb-9e59a873c18f-kube-api-access-lcp2p\") pod \"dnsmasq-dns-5b7946d7b9-7gb49\" (UID: \"c7e0522c-5cea-4c2a-b6eb-9e59a873c18f\") " pod="openstack/dnsmasq-dns-5b7946d7b9-7gb49" Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.597776 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7e0522c-5cea-4c2a-b6eb-9e59a873c18f-config\") pod \"dnsmasq-dns-5b7946d7b9-7gb49\" (UID: \"c7e0522c-5cea-4c2a-b6eb-9e59a873c18f\") " pod="openstack/dnsmasq-dns-5b7946d7b9-7gb49" Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.600380 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c7e0522c-5cea-4c2a-b6eb-9e59a873c18f-dns-svc\") pod \"dnsmasq-dns-5b7946d7b9-7gb49\" (UID: \"c7e0522c-5cea-4c2a-b6eb-9e59a873c18f\") " pod="openstack/dnsmasq-dns-5b7946d7b9-7gb49" Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.600495 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7e0522c-5cea-4c2a-b6eb-9e59a873c18f-config\") pod \"dnsmasq-dns-5b7946d7b9-7gb49\" (UID: \"c7e0522c-5cea-4c2a-b6eb-9e59a873c18f\") " pod="openstack/dnsmasq-dns-5b7946d7b9-7gb49" Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.618645 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lcp2p\" (UniqueName: \"kubernetes.io/projected/c7e0522c-5cea-4c2a-b6eb-9e59a873c18f-kube-api-access-lcp2p\") pod \"dnsmasq-dns-5b7946d7b9-7gb49\" (UID: \"c7e0522c-5cea-4c2a-b6eb-9e59a873c18f\") " pod="openstack/dnsmasq-dns-5b7946d7b9-7gb49" Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.759808 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b7946d7b9-7gb49" Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.760607 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398560-6q4s6" event={"ID":"6a742030-c300-4bc7-b673-8c6411f3c837","Type":"ContainerDied","Data":"cc48e19b0995f13973b647fc34b5c86b382f929a295d06cb370228265cb78401"} Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.760677 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cc48e19b0995f13973b647fc34b5c86b382f929a295d06cb370228265cb78401" Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.760704 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398560-6q4s6" Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.863719 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398515-8ncgq"] Nov 23 16:00:04 crc kubenswrapper[5050]: I1123 16:00:04.874524 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398515-8ncgq"] Nov 23 16:00:05 crc kubenswrapper[5050]: I1123 16:00:05.267281 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 23 16:00:05 crc kubenswrapper[5050]: I1123 16:00:05.348856 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-7gb49"] Nov 23 16:00:05 crc kubenswrapper[5050]: I1123 16:00:05.560523 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="72f0e4c1-31ac-4791-b22c-c315b6ce4b11" path="/var/lib/kubelet/pods/72f0e4c1-31ac-4791-b22c-c315b6ce4b11/volumes" Nov 23 16:00:05 crc kubenswrapper[5050]: I1123 16:00:05.769912 5050 generic.go:334] "Generic (PLEG): container finished" podID="c7e0522c-5cea-4c2a-b6eb-9e59a873c18f" containerID="68b6acdc53f59c581882f49b239119e0ba7ea72f0083b0ad3cc9808876190547" exitCode=0 Nov 23 16:00:05 crc kubenswrapper[5050]: I1123 16:00:05.769975 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-7gb49" event={"ID":"c7e0522c-5cea-4c2a-b6eb-9e59a873c18f","Type":"ContainerDied","Data":"68b6acdc53f59c581882f49b239119e0ba7ea72f0083b0ad3cc9808876190547"} Nov 23 16:00:05 crc kubenswrapper[5050]: I1123 16:00:05.770003 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-7gb49" event={"ID":"c7e0522c-5cea-4c2a-b6eb-9e59a873c18f","Type":"ContainerStarted","Data":"63b3723bfe87d99bd72cc676804c067369f8f4d5db2757d88b5a02978f72e803"} Nov 23 16:00:06 crc kubenswrapper[5050]: I1123 16:00:06.097548 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 23 16:00:06 crc kubenswrapper[5050]: I1123 16:00:06.105037 5050 scope.go:117] "RemoveContainer" containerID="fc2645d00d7fecac9e96af21a596fe4440367cf7a2d77f07770dc70bcf16cc72" Nov 23 16:00:06 crc kubenswrapper[5050]: I1123 16:00:06.778703 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-7gb49" event={"ID":"c7e0522c-5cea-4c2a-b6eb-9e59a873c18f","Type":"ContainerStarted","Data":"81455a62b04c7d108b8480a60c8c52d6f28c14ce24d91e92f93e2b25091a52fc"} Nov 23 16:00:06 crc kubenswrapper[5050]: I1123 16:00:06.780538 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5b7946d7b9-7gb49" Nov 23 16:00:06 crc kubenswrapper[5050]: I1123 16:00:06.800824 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5b7946d7b9-7gb49" podStartSLOduration=2.800807036 podStartE2EDuration="2.800807036s" podCreationTimestamp="2025-11-23 16:00:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:00:06.799123378 +0000 UTC m=+4701.966119863" watchObservedRunningTime="2025-11-23 16:00:06.800807036 +0000 UTC m=+4701.967803521" Nov 23 16:00:07 crc kubenswrapper[5050]: I1123 16:00:07.187290 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="60070b93-3461-4644-8bf3-93f3d6dea993" containerName="rabbitmq" containerID="cri-o://2383b3c5973bd1bcafe22515306281fbd6b9cc09a1d301862448276ae76a3b4e" gracePeriod=604799 Nov 23 16:00:07 crc kubenswrapper[5050]: I1123 16:00:07.795754 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="60070b93-3461-4644-8bf3-93f3d6dea993" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.238:5672: connect: connection refused" Nov 23 16:00:08 crc kubenswrapper[5050]: I1123 16:00:08.025679 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="e72bc364-dad9-4403-ba3a-270c07097ba6" containerName="rabbitmq" containerID="cri-o://501bf1710c9d435ae568e7ecea292d55ae606c630dcf50a982fdebd3ab054f13" gracePeriod=604799 Nov 23 16:00:08 crc kubenswrapper[5050]: I1123 16:00:08.644760 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="e72bc364-dad9-4403-ba3a-270c07097ba6" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.239:5672: connect: connection refused" Nov 23 16:00:13 crc kubenswrapper[5050]: I1123 16:00:13.854068 5050 generic.go:334] "Generic (PLEG): container finished" podID="60070b93-3461-4644-8bf3-93f3d6dea993" containerID="2383b3c5973bd1bcafe22515306281fbd6b9cc09a1d301862448276ae76a3b4e" exitCode=0 Nov 23 16:00:13 crc kubenswrapper[5050]: I1123 16:00:13.854155 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"60070b93-3461-4644-8bf3-93f3d6dea993","Type":"ContainerDied","Data":"2383b3c5973bd1bcafe22515306281fbd6b9cc09a1d301862448276ae76a3b4e"} Nov 23 16:00:13 crc kubenswrapper[5050]: I1123 16:00:13.856898 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"60070b93-3461-4644-8bf3-93f3d6dea993","Type":"ContainerDied","Data":"3b12c6f29940eb8cc3df979f76d8b97a05ae7f2c085e5e8123a987dc207c267a"} Nov 23 16:00:13 crc kubenswrapper[5050]: I1123 16:00:13.857055 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3b12c6f29940eb8cc3df979f76d8b97a05ae7f2c085e5e8123a987dc207c267a" Nov 23 16:00:13 crc kubenswrapper[5050]: I1123 16:00:13.884180 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 23 16:00:13 crc kubenswrapper[5050]: I1123 16:00:13.997098 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/60070b93-3461-4644-8bf3-93f3d6dea993-rabbitmq-plugins\") pod \"60070b93-3461-4644-8bf3-93f3d6dea993\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " Nov 23 16:00:13 crc kubenswrapper[5050]: I1123 16:00:13.997398 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39327384-20b7-4807-b511-7257eb36c2b9\") pod \"60070b93-3461-4644-8bf3-93f3d6dea993\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " Nov 23 16:00:13 crc kubenswrapper[5050]: I1123 16:00:13.997501 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jv9k5\" (UniqueName: \"kubernetes.io/projected/60070b93-3461-4644-8bf3-93f3d6dea993-kube-api-access-jv9k5\") pod \"60070b93-3461-4644-8bf3-93f3d6dea993\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " Nov 23 16:00:13 crc kubenswrapper[5050]: I1123 16:00:13.997558 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/60070b93-3461-4644-8bf3-93f3d6dea993-pod-info\") pod \"60070b93-3461-4644-8bf3-93f3d6dea993\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " Nov 23 16:00:13 crc kubenswrapper[5050]: I1123 16:00:13.997604 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/60070b93-3461-4644-8bf3-93f3d6dea993-rabbitmq-confd\") pod \"60070b93-3461-4644-8bf3-93f3d6dea993\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " Nov 23 16:00:13 crc kubenswrapper[5050]: I1123 16:00:13.997637 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/60070b93-3461-4644-8bf3-93f3d6dea993-erlang-cookie-secret\") pod \"60070b93-3461-4644-8bf3-93f3d6dea993\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " Nov 23 16:00:13 crc kubenswrapper[5050]: I1123 16:00:13.997668 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/60070b93-3461-4644-8bf3-93f3d6dea993-plugins-conf\") pod \"60070b93-3461-4644-8bf3-93f3d6dea993\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " Nov 23 16:00:13 crc kubenswrapper[5050]: I1123 16:00:13.997697 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/60070b93-3461-4644-8bf3-93f3d6dea993-rabbitmq-erlang-cookie\") pod \"60070b93-3461-4644-8bf3-93f3d6dea993\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " Nov 23 16:00:13 crc kubenswrapper[5050]: I1123 16:00:13.997721 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/60070b93-3461-4644-8bf3-93f3d6dea993-server-conf\") pod \"60070b93-3461-4644-8bf3-93f3d6dea993\" (UID: \"60070b93-3461-4644-8bf3-93f3d6dea993\") " Nov 23 16:00:13 crc kubenswrapper[5050]: I1123 16:00:13.999692 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/60070b93-3461-4644-8bf3-93f3d6dea993-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "60070b93-3461-4644-8bf3-93f3d6dea993" (UID: "60070b93-3461-4644-8bf3-93f3d6dea993"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:00:13 crc kubenswrapper[5050]: I1123 16:00:13.999906 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/60070b93-3461-4644-8bf3-93f3d6dea993-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "60070b93-3461-4644-8bf3-93f3d6dea993" (UID: "60070b93-3461-4644-8bf3-93f3d6dea993"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:00:14 crc kubenswrapper[5050]: I1123 16:00:14.000015 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/60070b93-3461-4644-8bf3-93f3d6dea993-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "60070b93-3461-4644-8bf3-93f3d6dea993" (UID: "60070b93-3461-4644-8bf3-93f3d6dea993"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:00:14 crc kubenswrapper[5050]: I1123 16:00:14.004506 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60070b93-3461-4644-8bf3-93f3d6dea993-kube-api-access-jv9k5" (OuterVolumeSpecName: "kube-api-access-jv9k5") pod "60070b93-3461-4644-8bf3-93f3d6dea993" (UID: "60070b93-3461-4644-8bf3-93f3d6dea993"). InnerVolumeSpecName "kube-api-access-jv9k5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:00:14 crc kubenswrapper[5050]: I1123 16:00:14.005625 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/60070b93-3461-4644-8bf3-93f3d6dea993-pod-info" (OuterVolumeSpecName: "pod-info") pod "60070b93-3461-4644-8bf3-93f3d6dea993" (UID: "60070b93-3461-4644-8bf3-93f3d6dea993"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 23 16:00:14 crc kubenswrapper[5050]: I1123 16:00:14.006022 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60070b93-3461-4644-8bf3-93f3d6dea993-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "60070b93-3461-4644-8bf3-93f3d6dea993" (UID: "60070b93-3461-4644-8bf3-93f3d6dea993"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:00:14 crc kubenswrapper[5050]: I1123 16:00:14.033666 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39327384-20b7-4807-b511-7257eb36c2b9" (OuterVolumeSpecName: "persistence") pod "60070b93-3461-4644-8bf3-93f3d6dea993" (UID: "60070b93-3461-4644-8bf3-93f3d6dea993"). InnerVolumeSpecName "pvc-39327384-20b7-4807-b511-7257eb36c2b9". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 23 16:00:14 crc kubenswrapper[5050]: I1123 16:00:14.042250 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/60070b93-3461-4644-8bf3-93f3d6dea993-server-conf" (OuterVolumeSpecName: "server-conf") pod "60070b93-3461-4644-8bf3-93f3d6dea993" (UID: "60070b93-3461-4644-8bf3-93f3d6dea993"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:00:14 crc kubenswrapper[5050]: I1123 16:00:14.090742 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60070b93-3461-4644-8bf3-93f3d6dea993-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "60070b93-3461-4644-8bf3-93f3d6dea993" (UID: "60070b93-3461-4644-8bf3-93f3d6dea993"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:00:14 crc kubenswrapper[5050]: I1123 16:00:14.099685 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jv9k5\" (UniqueName: \"kubernetes.io/projected/60070b93-3461-4644-8bf3-93f3d6dea993-kube-api-access-jv9k5\") on node \"crc\" DevicePath \"\"" Nov 23 16:00:14 crc kubenswrapper[5050]: I1123 16:00:14.099724 5050 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/60070b93-3461-4644-8bf3-93f3d6dea993-pod-info\") on node \"crc\" DevicePath \"\"" Nov 23 16:00:14 crc kubenswrapper[5050]: I1123 16:00:14.099738 5050 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/60070b93-3461-4644-8bf3-93f3d6dea993-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 23 16:00:14 crc kubenswrapper[5050]: I1123 16:00:14.099748 5050 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/60070b93-3461-4644-8bf3-93f3d6dea993-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 23 16:00:14 crc kubenswrapper[5050]: I1123 16:00:14.099758 5050 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/60070b93-3461-4644-8bf3-93f3d6dea993-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 23 16:00:14 crc kubenswrapper[5050]: I1123 16:00:14.099767 5050 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/60070b93-3461-4644-8bf3-93f3d6dea993-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 23 16:00:14 crc kubenswrapper[5050]: I1123 16:00:14.099775 5050 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/60070b93-3461-4644-8bf3-93f3d6dea993-server-conf\") on node \"crc\" DevicePath \"\"" Nov 23 16:00:14 crc kubenswrapper[5050]: I1123 16:00:14.099786 5050 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/60070b93-3461-4644-8bf3-93f3d6dea993-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 23 16:00:14 crc kubenswrapper[5050]: I1123 16:00:14.099849 5050 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-39327384-20b7-4807-b511-7257eb36c2b9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39327384-20b7-4807-b511-7257eb36c2b9\") on node \"crc\" " Nov 23 16:00:14 crc kubenswrapper[5050]: I1123 16:00:14.115764 5050 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 23 16:00:14 crc kubenswrapper[5050]: I1123 16:00:14.115921 5050 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-39327384-20b7-4807-b511-7257eb36c2b9" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39327384-20b7-4807-b511-7257eb36c2b9") on node "crc" Nov 23 16:00:14 crc kubenswrapper[5050]: I1123 16:00:14.201365 5050 reconciler_common.go:293] "Volume detached for volume \"pvc-39327384-20b7-4807-b511-7257eb36c2b9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39327384-20b7-4807-b511-7257eb36c2b9\") on node \"crc\" DevicePath \"\"" Nov 23 16:00:14 crc kubenswrapper[5050]: I1123 16:00:14.762659 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5b7946d7b9-7gb49" Nov 23 16:00:14 crc kubenswrapper[5050]: I1123 16:00:14.873885 5050 generic.go:334] "Generic (PLEG): container finished" podID="e72bc364-dad9-4403-ba3a-270c07097ba6" containerID="501bf1710c9d435ae568e7ecea292d55ae606c630dcf50a982fdebd3ab054f13" exitCode=0 Nov 23 16:00:14 crc kubenswrapper[5050]: I1123 16:00:14.873969 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e72bc364-dad9-4403-ba3a-270c07097ba6","Type":"ContainerDied","Data":"501bf1710c9d435ae568e7ecea292d55ae606c630dcf50a982fdebd3ab054f13"} Nov 23 16:00:14 crc kubenswrapper[5050]: I1123 16:00:14.873999 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 23 16:00:14 crc kubenswrapper[5050]: I1123 16:00:14.943944 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-5mxx6"] Nov 23 16:00:14 crc kubenswrapper[5050]: I1123 16:00:14.944436 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-98ddfc8f-5mxx6" podUID="db2c1781-f656-442a-87db-97e53578c8aa" containerName="dnsmasq-dns" containerID="cri-o://2fae4240262da61acb11468a7b0e66a1d938004d20822955473b00911e841d91" gracePeriod=10 Nov 23 16:00:14 crc kubenswrapper[5050]: I1123 16:00:14.969257 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 23 16:00:14 crc kubenswrapper[5050]: I1123 16:00:14.993472 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:14.999331 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 23 16:00:15 crc kubenswrapper[5050]: E1123 16:00:14.999891 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60070b93-3461-4644-8bf3-93f3d6dea993" containerName="setup-container" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:14.999909 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="60070b93-3461-4644-8bf3-93f3d6dea993" containerName="setup-container" Nov 23 16:00:15 crc kubenswrapper[5050]: E1123 16:00:14.999937 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60070b93-3461-4644-8bf3-93f3d6dea993" containerName="rabbitmq" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:14.999944 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="60070b93-3461-4644-8bf3-93f3d6dea993" containerName="rabbitmq" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.000105 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="60070b93-3461-4644-8bf3-93f3d6dea993" containerName="rabbitmq" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.000978 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.006479 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.006968 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.007223 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.007298 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-5glpx" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.007493 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.007659 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.123827 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4lwf\" (UniqueName: \"kubernetes.io/projected/624f99c9-a404-4437-9a6f-835971760d52-kube-api-access-m4lwf\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") " pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.123890 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/624f99c9-a404-4437-9a6f-835971760d52-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") " pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.123936 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-39327384-20b7-4807-b511-7257eb36c2b9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39327384-20b7-4807-b511-7257eb36c2b9\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") " pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.124010 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/624f99c9-a404-4437-9a6f-835971760d52-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") " pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.124054 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/624f99c9-a404-4437-9a6f-835971760d52-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") " pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.124074 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/624f99c9-a404-4437-9a6f-835971760d52-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") " pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.124118 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/624f99c9-a404-4437-9a6f-835971760d52-pod-info\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") " pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.124151 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/624f99c9-a404-4437-9a6f-835971760d52-server-conf\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") " pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.124171 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/624f99c9-a404-4437-9a6f-835971760d52-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") " pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.225625 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/624f99c9-a404-4437-9a6f-835971760d52-pod-info\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") " pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.226219 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/624f99c9-a404-4437-9a6f-835971760d52-server-conf\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") " pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.226250 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/624f99c9-a404-4437-9a6f-835971760d52-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") " pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.228137 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/624f99c9-a404-4437-9a6f-835971760d52-server-conf\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") " pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.228241 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4lwf\" (UniqueName: \"kubernetes.io/projected/624f99c9-a404-4437-9a6f-835971760d52-kube-api-access-m4lwf\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") " pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.228298 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/624f99c9-a404-4437-9a6f-835971760d52-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") " pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.228334 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-39327384-20b7-4807-b511-7257eb36c2b9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39327384-20b7-4807-b511-7257eb36c2b9\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") " pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.228476 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/624f99c9-a404-4437-9a6f-835971760d52-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") " pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.228545 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/624f99c9-a404-4437-9a6f-835971760d52-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") " pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.228563 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/624f99c9-a404-4437-9a6f-835971760d52-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") " pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.228945 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/624f99c9-a404-4437-9a6f-835971760d52-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") " pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.231514 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/624f99c9-a404-4437-9a6f-835971760d52-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") " pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.233404 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/624f99c9-a404-4437-9a6f-835971760d52-pod-info\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") " pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.234176 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/624f99c9-a404-4437-9a6f-835971760d52-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") " pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.236168 5050 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.236203 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-39327384-20b7-4807-b511-7257eb36c2b9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39327384-20b7-4807-b511-7257eb36c2b9\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/ef6518b86a463462e0da94e1b3cf57057e957a5269228fea4d68c973e0c23cae/globalmount\"" pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.237323 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/624f99c9-a404-4437-9a6f-835971760d52-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") " pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.239390 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/624f99c9-a404-4437-9a6f-835971760d52-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") " pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.264617 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4lwf\" (UniqueName: \"kubernetes.io/projected/624f99c9-a404-4437-9a6f-835971760d52-kube-api-access-m4lwf\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") " pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.283606 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.287345 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-39327384-20b7-4807-b511-7257eb36c2b9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39327384-20b7-4807-b511-7257eb36c2b9\") pod \"rabbitmq-server-0\" (UID: \"624f99c9-a404-4437-9a6f-835971760d52\") " pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.354263 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.371828 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-98ddfc8f-5mxx6" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.432963 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e72bc364-dad9-4403-ba3a-270c07097ba6-pod-info\") pod \"e72bc364-dad9-4403-ba3a-270c07097ba6\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.433053 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e72bc364-dad9-4403-ba3a-270c07097ba6-rabbitmq-erlang-cookie\") pod \"e72bc364-dad9-4403-ba3a-270c07097ba6\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.433244 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dc43326e-5e1d-4a0d-9c93-1a2004118335\") pod \"e72bc364-dad9-4403-ba3a-270c07097ba6\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.433309 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e72bc364-dad9-4403-ba3a-270c07097ba6-plugins-conf\") pod \"e72bc364-dad9-4403-ba3a-270c07097ba6\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.433345 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e72bc364-dad9-4403-ba3a-270c07097ba6-rabbitmq-plugins\") pod \"e72bc364-dad9-4403-ba3a-270c07097ba6\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.433371 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-chq84\" (UniqueName: \"kubernetes.io/projected/e72bc364-dad9-4403-ba3a-270c07097ba6-kube-api-access-chq84\") pod \"e72bc364-dad9-4403-ba3a-270c07097ba6\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.433414 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e72bc364-dad9-4403-ba3a-270c07097ba6-erlang-cookie-secret\") pod \"e72bc364-dad9-4403-ba3a-270c07097ba6\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.433491 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e72bc364-dad9-4403-ba3a-270c07097ba6-rabbitmq-confd\") pod \"e72bc364-dad9-4403-ba3a-270c07097ba6\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.433528 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e72bc364-dad9-4403-ba3a-270c07097ba6-server-conf\") pod \"e72bc364-dad9-4403-ba3a-270c07097ba6\" (UID: \"e72bc364-dad9-4403-ba3a-270c07097ba6\") " Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.434689 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e72bc364-dad9-4403-ba3a-270c07097ba6-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "e72bc364-dad9-4403-ba3a-270c07097ba6" (UID: "e72bc364-dad9-4403-ba3a-270c07097ba6"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.439186 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e72bc364-dad9-4403-ba3a-270c07097ba6-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "e72bc364-dad9-4403-ba3a-270c07097ba6" (UID: "e72bc364-dad9-4403-ba3a-270c07097ba6"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.439550 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e72bc364-dad9-4403-ba3a-270c07097ba6-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "e72bc364-dad9-4403-ba3a-270c07097ba6" (UID: "e72bc364-dad9-4403-ba3a-270c07097ba6"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.443016 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/e72bc364-dad9-4403-ba3a-270c07097ba6-pod-info" (OuterVolumeSpecName: "pod-info") pod "e72bc364-dad9-4403-ba3a-270c07097ba6" (UID: "e72bc364-dad9-4403-ba3a-270c07097ba6"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.443672 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e72bc364-dad9-4403-ba3a-270c07097ba6-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "e72bc364-dad9-4403-ba3a-270c07097ba6" (UID: "e72bc364-dad9-4403-ba3a-270c07097ba6"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.448312 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e72bc364-dad9-4403-ba3a-270c07097ba6-kube-api-access-chq84" (OuterVolumeSpecName: "kube-api-access-chq84") pod "e72bc364-dad9-4403-ba3a-270c07097ba6" (UID: "e72bc364-dad9-4403-ba3a-270c07097ba6"). InnerVolumeSpecName "kube-api-access-chq84". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.454043 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e72bc364-dad9-4403-ba3a-270c07097ba6-server-conf" (OuterVolumeSpecName: "server-conf") pod "e72bc364-dad9-4403-ba3a-270c07097ba6" (UID: "e72bc364-dad9-4403-ba3a-270c07097ba6"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.456983 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dc43326e-5e1d-4a0d-9c93-1a2004118335" (OuterVolumeSpecName: "persistence") pod "e72bc364-dad9-4403-ba3a-270c07097ba6" (UID: "e72bc364-dad9-4403-ba3a-270c07097ba6"). InnerVolumeSpecName "pvc-dc43326e-5e1d-4a0d-9c93-1a2004118335". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.535260 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vzk5w\" (UniqueName: \"kubernetes.io/projected/db2c1781-f656-442a-87db-97e53578c8aa-kube-api-access-vzk5w\") pod \"db2c1781-f656-442a-87db-97e53578c8aa\" (UID: \"db2c1781-f656-442a-87db-97e53578c8aa\") " Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.535352 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db2c1781-f656-442a-87db-97e53578c8aa-config\") pod \"db2c1781-f656-442a-87db-97e53578c8aa\" (UID: \"db2c1781-f656-442a-87db-97e53578c8aa\") " Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.535411 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db2c1781-f656-442a-87db-97e53578c8aa-dns-svc\") pod \"db2c1781-f656-442a-87db-97e53578c8aa\" (UID: \"db2c1781-f656-442a-87db-97e53578c8aa\") " Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.540330 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db2c1781-f656-442a-87db-97e53578c8aa-kube-api-access-vzk5w" (OuterVolumeSpecName: "kube-api-access-vzk5w") pod "db2c1781-f656-442a-87db-97e53578c8aa" (UID: "db2c1781-f656-442a-87db-97e53578c8aa"). InnerVolumeSpecName "kube-api-access-vzk5w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.541094 5050 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e72bc364-dad9-4403-ba3a-270c07097ba6-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.542012 5050 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e72bc364-dad9-4403-ba3a-270c07097ba6-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.542027 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-chq84\" (UniqueName: \"kubernetes.io/projected/e72bc364-dad9-4403-ba3a-270c07097ba6-kube-api-access-chq84\") on node \"crc\" DevicePath \"\"" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.542042 5050 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e72bc364-dad9-4403-ba3a-270c07097ba6-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.542053 5050 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e72bc364-dad9-4403-ba3a-270c07097ba6-server-conf\") on node \"crc\" DevicePath \"\"" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.542066 5050 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e72bc364-dad9-4403-ba3a-270c07097ba6-pod-info\") on node \"crc\" DevicePath \"\"" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.542079 5050 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e72bc364-dad9-4403-ba3a-270c07097ba6-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.542132 5050 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-dc43326e-5e1d-4a0d-9c93-1a2004118335\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dc43326e-5e1d-4a0d-9c93-1a2004118335\") on node \"crc\" " Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.563104 5050 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.563319 5050 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-dc43326e-5e1d-4a0d-9c93-1a2004118335" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dc43326e-5e1d-4a0d-9c93-1a2004118335") on node "crc" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.566550 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="60070b93-3461-4644-8bf3-93f3d6dea993" path="/var/lib/kubelet/pods/60070b93-3461-4644-8bf3-93f3d6dea993/volumes" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.569567 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e72bc364-dad9-4403-ba3a-270c07097ba6-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "e72bc364-dad9-4403-ba3a-270c07097ba6" (UID: "e72bc364-dad9-4403-ba3a-270c07097ba6"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.591487 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db2c1781-f656-442a-87db-97e53578c8aa-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "db2c1781-f656-442a-87db-97e53578c8aa" (UID: "db2c1781-f656-442a-87db-97e53578c8aa"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.592867 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db2c1781-f656-442a-87db-97e53578c8aa-config" (OuterVolumeSpecName: "config") pod "db2c1781-f656-442a-87db-97e53578c8aa" (UID: "db2c1781-f656-442a-87db-97e53578c8aa"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.643620 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db2c1781-f656-442a-87db-97e53578c8aa-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.643665 5050 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e72bc364-dad9-4403-ba3a-270c07097ba6-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.643677 5050 reconciler_common.go:293] "Volume detached for volume \"pvc-dc43326e-5e1d-4a0d-9c93-1a2004118335\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dc43326e-5e1d-4a0d-9c93-1a2004118335\") on node \"crc\" DevicePath \"\"" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.643688 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vzk5w\" (UniqueName: \"kubernetes.io/projected/db2c1781-f656-442a-87db-97e53578c8aa-kube-api-access-vzk5w\") on node \"crc\" DevicePath \"\"" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.643702 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db2c1781-f656-442a-87db-97e53578c8aa-config\") on node \"crc\" DevicePath \"\"" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.842190 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 23 16:00:15 crc kubenswrapper[5050]: W1123 16:00:15.847126 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod624f99c9_a404_4437_9a6f_835971760d52.slice/crio-205b5f498cd31f2250c319e85af5fc9839dc45da1495a87eedc5b84be3c7e62e WatchSource:0}: Error finding container 205b5f498cd31f2250c319e85af5fc9839dc45da1495a87eedc5b84be3c7e62e: Status 404 returned error can't find the container with id 205b5f498cd31f2250c319e85af5fc9839dc45da1495a87eedc5b84be3c7e62e Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.886214 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e72bc364-dad9-4403-ba3a-270c07097ba6","Type":"ContainerDied","Data":"86aea6927a9cb32d249fbd74ff6520c53ea2fc366431b6ff3e1888123d05fea8"} Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.886288 5050 scope.go:117] "RemoveContainer" containerID="501bf1710c9d435ae568e7ecea292d55ae606c630dcf50a982fdebd3ab054f13" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.886479 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.898670 5050 generic.go:334] "Generic (PLEG): container finished" podID="db2c1781-f656-442a-87db-97e53578c8aa" containerID="2fae4240262da61acb11468a7b0e66a1d938004d20822955473b00911e841d91" exitCode=0 Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.898773 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-5mxx6" event={"ID":"db2c1781-f656-442a-87db-97e53578c8aa","Type":"ContainerDied","Data":"2fae4240262da61acb11468a7b0e66a1d938004d20822955473b00911e841d91"} Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.898813 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-5mxx6" event={"ID":"db2c1781-f656-442a-87db-97e53578c8aa","Type":"ContainerDied","Data":"4019cd677e9a9e758da2f12eb8286b34e8d671a3621d34f9b4202a91c6027557"} Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.898913 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-98ddfc8f-5mxx6" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.903900 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"624f99c9-a404-4437-9a6f-835971760d52","Type":"ContainerStarted","Data":"205b5f498cd31f2250c319e85af5fc9839dc45da1495a87eedc5b84be3c7e62e"} Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.929177 5050 scope.go:117] "RemoveContainer" containerID="d991713587d0ad7254c335799a1919b5ef93e56583e55a919d2db7813fb583b5" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.939477 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-5mxx6"] Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.953846 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-5mxx6"] Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.970956 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.978428 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.984362 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 23 16:00:15 crc kubenswrapper[5050]: E1123 16:00:15.984774 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db2c1781-f656-442a-87db-97e53578c8aa" containerName="init" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.984797 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="db2c1781-f656-442a-87db-97e53578c8aa" containerName="init" Nov 23 16:00:15 crc kubenswrapper[5050]: E1123 16:00:15.984807 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db2c1781-f656-442a-87db-97e53578c8aa" containerName="dnsmasq-dns" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.984814 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="db2c1781-f656-442a-87db-97e53578c8aa" containerName="dnsmasq-dns" Nov 23 16:00:15 crc kubenswrapper[5050]: E1123 16:00:15.984828 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e72bc364-dad9-4403-ba3a-270c07097ba6" containerName="setup-container" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.984836 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e72bc364-dad9-4403-ba3a-270c07097ba6" containerName="setup-container" Nov 23 16:00:15 crc kubenswrapper[5050]: E1123 16:00:15.984856 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e72bc364-dad9-4403-ba3a-270c07097ba6" containerName="rabbitmq" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.984862 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e72bc364-dad9-4403-ba3a-270c07097ba6" containerName="rabbitmq" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.985015 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="db2c1781-f656-442a-87db-97e53578c8aa" containerName="dnsmasq-dns" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.985040 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="e72bc364-dad9-4403-ba3a-270c07097ba6" containerName="rabbitmq" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.985958 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.992224 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.992621 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.993993 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-x9bzf" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.994667 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.995060 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 23 16:00:15 crc kubenswrapper[5050]: I1123 16:00:15.995479 5050 scope.go:117] "RemoveContainer" containerID="2fae4240262da61acb11468a7b0e66a1d938004d20822955473b00911e841d91" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.003585 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.034859 5050 scope.go:117] "RemoveContainer" containerID="e437fa45a347b26055ce885d618baaec215c51f486c12306ab5c13801ae21f85" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.070874 5050 scope.go:117] "RemoveContainer" containerID="2fae4240262da61acb11468a7b0e66a1d938004d20822955473b00911e841d91" Nov 23 16:00:16 crc kubenswrapper[5050]: E1123 16:00:16.071886 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2fae4240262da61acb11468a7b0e66a1d938004d20822955473b00911e841d91\": container with ID starting with 2fae4240262da61acb11468a7b0e66a1d938004d20822955473b00911e841d91 not found: ID does not exist" containerID="2fae4240262da61acb11468a7b0e66a1d938004d20822955473b00911e841d91" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.071922 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2fae4240262da61acb11468a7b0e66a1d938004d20822955473b00911e841d91"} err="failed to get container status \"2fae4240262da61acb11468a7b0e66a1d938004d20822955473b00911e841d91\": rpc error: code = NotFound desc = could not find container \"2fae4240262da61acb11468a7b0e66a1d938004d20822955473b00911e841d91\": container with ID starting with 2fae4240262da61acb11468a7b0e66a1d938004d20822955473b00911e841d91 not found: ID does not exist" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.071951 5050 scope.go:117] "RemoveContainer" containerID="e437fa45a347b26055ce885d618baaec215c51f486c12306ab5c13801ae21f85" Nov 23 16:00:16 crc kubenswrapper[5050]: E1123 16:00:16.072485 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e437fa45a347b26055ce885d618baaec215c51f486c12306ab5c13801ae21f85\": container with ID starting with e437fa45a347b26055ce885d618baaec215c51f486c12306ab5c13801ae21f85 not found: ID does not exist" containerID="e437fa45a347b26055ce885d618baaec215c51f486c12306ab5c13801ae21f85" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.072632 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e437fa45a347b26055ce885d618baaec215c51f486c12306ab5c13801ae21f85"} err="failed to get container status \"e437fa45a347b26055ce885d618baaec215c51f486c12306ab5c13801ae21f85\": rpc error: code = NotFound desc = could not find container \"e437fa45a347b26055ce885d618baaec215c51f486c12306ab5c13801ae21f85\": container with ID starting with e437fa45a347b26055ce885d618baaec215c51f486c12306ab5c13801ae21f85 not found: ID does not exist" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.153569 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f2c92766-b409-46b5-a029-6f55fb430e89-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.153637 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f2c92766-b409-46b5-a029-6f55fb430e89-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.153661 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9tzm\" (UniqueName: \"kubernetes.io/projected/f2c92766-b409-46b5-a029-6f55fb430e89-kube-api-access-g9tzm\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.153754 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f2c92766-b409-46b5-a029-6f55fb430e89-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.153795 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f2c92766-b409-46b5-a029-6f55fb430e89-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.153823 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f2c92766-b409-46b5-a029-6f55fb430e89-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.153840 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f2c92766-b409-46b5-a029-6f55fb430e89-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.153898 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-dc43326e-5e1d-4a0d-9c93-1a2004118335\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dc43326e-5e1d-4a0d-9c93-1a2004118335\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.153926 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f2c92766-b409-46b5-a029-6f55fb430e89-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.255499 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f2c92766-b409-46b5-a029-6f55fb430e89-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.255660 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f2c92766-b409-46b5-a029-6f55fb430e89-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.255744 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f2c92766-b409-46b5-a029-6f55fb430e89-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.255797 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9tzm\" (UniqueName: \"kubernetes.io/projected/f2c92766-b409-46b5-a029-6f55fb430e89-kube-api-access-g9tzm\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.255886 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f2c92766-b409-46b5-a029-6f55fb430e89-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.255976 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f2c92766-b409-46b5-a029-6f55fb430e89-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.256025 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f2c92766-b409-46b5-a029-6f55fb430e89-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.256075 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f2c92766-b409-46b5-a029-6f55fb430e89-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.256179 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-dc43326e-5e1d-4a0d-9c93-1a2004118335\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dc43326e-5e1d-4a0d-9c93-1a2004118335\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.259479 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f2c92766-b409-46b5-a029-6f55fb430e89-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.260826 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f2c92766-b409-46b5-a029-6f55fb430e89-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.261312 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f2c92766-b409-46b5-a029-6f55fb430e89-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.262375 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f2c92766-b409-46b5-a029-6f55fb430e89-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.265147 5050 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.265230 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-dc43326e-5e1d-4a0d-9c93-1a2004118335\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dc43326e-5e1d-4a0d-9c93-1a2004118335\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/8690a45c9e8dae91a044e5db2b518f175247cdaecdc65ad047b3d8ea8b074528/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.266574 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f2c92766-b409-46b5-a029-6f55fb430e89-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.266794 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f2c92766-b409-46b5-a029-6f55fb430e89-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.269102 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f2c92766-b409-46b5-a029-6f55fb430e89-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.293795 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9tzm\" (UniqueName: \"kubernetes.io/projected/f2c92766-b409-46b5-a029-6f55fb430e89-kube-api-access-g9tzm\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.296423 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-dc43326e-5e1d-4a0d-9c93-1a2004118335\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dc43326e-5e1d-4a0d-9c93-1a2004118335\") pod \"rabbitmq-cell1-server-0\" (UID: \"f2c92766-b409-46b5-a029-6f55fb430e89\") " pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.320241 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:16 crc kubenswrapper[5050]: I1123 16:00:16.853984 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 23 16:00:17 crc kubenswrapper[5050]: W1123 16:00:17.069888 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf2c92766_b409_46b5_a029_6f55fb430e89.slice/crio-e3c95b265bf6530e3f854e4509e805a772e05fcbd19867b9eb6559cafb09f0b0 WatchSource:0}: Error finding container e3c95b265bf6530e3f854e4509e805a772e05fcbd19867b9eb6559cafb09f0b0: Status 404 returned error can't find the container with id e3c95b265bf6530e3f854e4509e805a772e05fcbd19867b9eb6559cafb09f0b0 Nov 23 16:00:17 crc kubenswrapper[5050]: I1123 16:00:17.562913 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db2c1781-f656-442a-87db-97e53578c8aa" path="/var/lib/kubelet/pods/db2c1781-f656-442a-87db-97e53578c8aa/volumes" Nov 23 16:00:17 crc kubenswrapper[5050]: I1123 16:00:17.567660 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e72bc364-dad9-4403-ba3a-270c07097ba6" path="/var/lib/kubelet/pods/e72bc364-dad9-4403-ba3a-270c07097ba6/volumes" Nov 23 16:00:17 crc kubenswrapper[5050]: I1123 16:00:17.927696 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f2c92766-b409-46b5-a029-6f55fb430e89","Type":"ContainerStarted","Data":"e3c95b265bf6530e3f854e4509e805a772e05fcbd19867b9eb6559cafb09f0b0"} Nov 23 16:00:18 crc kubenswrapper[5050]: I1123 16:00:18.943665 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f2c92766-b409-46b5-a029-6f55fb430e89","Type":"ContainerStarted","Data":"8a9aef4c74d7b670467ff9047e6b8c87e8ad19beb632b7d88406755ef3cd19fc"} Nov 23 16:00:18 crc kubenswrapper[5050]: I1123 16:00:18.950173 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"624f99c9-a404-4437-9a6f-835971760d52","Type":"ContainerStarted","Data":"b7b6c9133a92d902c8abe21ac567ea2ad41d8aafe50e216145aed1d40088b5ad"} Nov 23 16:00:29 crc kubenswrapper[5050]: I1123 16:00:29.224734 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:00:29 crc kubenswrapper[5050]: I1123 16:00:29.225419 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:00:29 crc kubenswrapper[5050]: I1123 16:00:29.225497 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 16:00:29 crc kubenswrapper[5050]: I1123 16:00:29.226259 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c18d191ff51dd31497250d2492a5d834c9d4fb3c3d8d531c739172043c30b828"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 16:00:29 crc kubenswrapper[5050]: I1123 16:00:29.226526 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://c18d191ff51dd31497250d2492a5d834c9d4fb3c3d8d531c739172043c30b828" gracePeriod=600 Nov 23 16:00:30 crc kubenswrapper[5050]: I1123 16:00:30.067955 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="c18d191ff51dd31497250d2492a5d834c9d4fb3c3d8d531c739172043c30b828" exitCode=0 Nov 23 16:00:30 crc kubenswrapper[5050]: I1123 16:00:30.068003 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"c18d191ff51dd31497250d2492a5d834c9d4fb3c3d8d531c739172043c30b828"} Nov 23 16:00:30 crc kubenswrapper[5050]: I1123 16:00:30.069003 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b"} Nov 23 16:00:30 crc kubenswrapper[5050]: I1123 16:00:30.069039 5050 scope.go:117] "RemoveContainer" containerID="084b3acf48b431cddb110c9e00f2544e092eca446a3ace8e3ff269a7e06eedfb" Nov 23 16:00:52 crc kubenswrapper[5050]: I1123 16:00:52.313368 5050 generic.go:334] "Generic (PLEG): container finished" podID="f2c92766-b409-46b5-a029-6f55fb430e89" containerID="8a9aef4c74d7b670467ff9047e6b8c87e8ad19beb632b7d88406755ef3cd19fc" exitCode=0 Nov 23 16:00:52 crc kubenswrapper[5050]: I1123 16:00:52.313596 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f2c92766-b409-46b5-a029-6f55fb430e89","Type":"ContainerDied","Data":"8a9aef4c74d7b670467ff9047e6b8c87e8ad19beb632b7d88406755ef3cd19fc"} Nov 23 16:00:52 crc kubenswrapper[5050]: I1123 16:00:52.319090 5050 generic.go:334] "Generic (PLEG): container finished" podID="624f99c9-a404-4437-9a6f-835971760d52" containerID="b7b6c9133a92d902c8abe21ac567ea2ad41d8aafe50e216145aed1d40088b5ad" exitCode=0 Nov 23 16:00:52 crc kubenswrapper[5050]: I1123 16:00:52.319160 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"624f99c9-a404-4437-9a6f-835971760d52","Type":"ContainerDied","Data":"b7b6c9133a92d902c8abe21ac567ea2ad41d8aafe50e216145aed1d40088b5ad"} Nov 23 16:00:53 crc kubenswrapper[5050]: I1123 16:00:53.329644 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f2c92766-b409-46b5-a029-6f55fb430e89","Type":"ContainerStarted","Data":"9f4e6f8fad1c405f2ae07e359376233d261630cc2b284ab4c40e2724407ace87"} Nov 23 16:00:53 crc kubenswrapper[5050]: I1123 16:00:53.330858 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:00:53 crc kubenswrapper[5050]: I1123 16:00:53.332620 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"624f99c9-a404-4437-9a6f-835971760d52","Type":"ContainerStarted","Data":"9cc2ad370df9d5d762ff68c9446d818df2459432138753fb09c4134967b7ef04"} Nov 23 16:00:53 crc kubenswrapper[5050]: I1123 16:00:53.332861 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 23 16:00:53 crc kubenswrapper[5050]: I1123 16:00:53.364211 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=38.364186626 podStartE2EDuration="38.364186626s" podCreationTimestamp="2025-11-23 16:00:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:00:53.356587101 +0000 UTC m=+4748.523583616" watchObservedRunningTime="2025-11-23 16:00:53.364186626 +0000 UTC m=+4748.531183121" Nov 23 16:00:53 crc kubenswrapper[5050]: I1123 16:00:53.385373 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=39.385349343 podStartE2EDuration="39.385349343s" podCreationTimestamp="2025-11-23 16:00:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:00:53.383026918 +0000 UTC m=+4748.550023463" watchObservedRunningTime="2025-11-23 16:00:53.385349343 +0000 UTC m=+4748.552345838" Nov 23 16:01:05 crc kubenswrapper[5050]: I1123 16:01:05.357800 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 23 16:01:06 crc kubenswrapper[5050]: I1123 16:01:06.323773 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 23 16:01:13 crc kubenswrapper[5050]: I1123 16:01:13.837158 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-1-default"] Nov 23 16:01:13 crc kubenswrapper[5050]: I1123 16:01:13.840513 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 23 16:01:13 crc kubenswrapper[5050]: I1123 16:01:13.846686 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-8nrbj" Nov 23 16:01:13 crc kubenswrapper[5050]: I1123 16:01:13.854236 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 23 16:01:13 crc kubenswrapper[5050]: I1123 16:01:13.959365 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ffhc\" (UniqueName: \"kubernetes.io/projected/0f964021-c405-4aec-ad69-820337769a75-kube-api-access-8ffhc\") pod \"mariadb-client-1-default\" (UID: \"0f964021-c405-4aec-ad69-820337769a75\") " pod="openstack/mariadb-client-1-default" Nov 23 16:01:14 crc kubenswrapper[5050]: I1123 16:01:14.061757 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ffhc\" (UniqueName: \"kubernetes.io/projected/0f964021-c405-4aec-ad69-820337769a75-kube-api-access-8ffhc\") pod \"mariadb-client-1-default\" (UID: \"0f964021-c405-4aec-ad69-820337769a75\") " pod="openstack/mariadb-client-1-default" Nov 23 16:01:14 crc kubenswrapper[5050]: I1123 16:01:14.105083 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ffhc\" (UniqueName: \"kubernetes.io/projected/0f964021-c405-4aec-ad69-820337769a75-kube-api-access-8ffhc\") pod \"mariadb-client-1-default\" (UID: \"0f964021-c405-4aec-ad69-820337769a75\") " pod="openstack/mariadb-client-1-default" Nov 23 16:01:14 crc kubenswrapper[5050]: I1123 16:01:14.178274 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 23 16:01:14 crc kubenswrapper[5050]: I1123 16:01:14.608748 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 23 16:01:14 crc kubenswrapper[5050]: W1123 16:01:14.615469 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0f964021_c405_4aec_ad69_820337769a75.slice/crio-fa8b5494b804cdb2d16b7ae2a3706bf55577699fc25930659d8bc3a3c30d1f7a WatchSource:0}: Error finding container fa8b5494b804cdb2d16b7ae2a3706bf55577699fc25930659d8bc3a3c30d1f7a: Status 404 returned error can't find the container with id fa8b5494b804cdb2d16b7ae2a3706bf55577699fc25930659d8bc3a3c30d1f7a Nov 23 16:01:15 crc kubenswrapper[5050]: I1123 16:01:15.568164 5050 generic.go:334] "Generic (PLEG): container finished" podID="0f964021-c405-4aec-ad69-820337769a75" containerID="47fc8485f89533de87d093318c233da9fa54a79bc6d2de09e7344cd65d687368" exitCode=0 Nov 23 16:01:15 crc kubenswrapper[5050]: I1123 16:01:15.568287 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1-default" event={"ID":"0f964021-c405-4aec-ad69-820337769a75","Type":"ContainerDied","Data":"47fc8485f89533de87d093318c233da9fa54a79bc6d2de09e7344cd65d687368"} Nov 23 16:01:15 crc kubenswrapper[5050]: I1123 16:01:15.568861 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1-default" event={"ID":"0f964021-c405-4aec-ad69-820337769a75","Type":"ContainerStarted","Data":"fa8b5494b804cdb2d16b7ae2a3706bf55577699fc25930659d8bc3a3c30d1f7a"} Nov 23 16:01:17 crc kubenswrapper[5050]: I1123 16:01:17.113025 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 23 16:01:17 crc kubenswrapper[5050]: I1123 16:01:17.157734 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-1-default_0f964021-c405-4aec-ad69-820337769a75/mariadb-client-1-default/0.log" Nov 23 16:01:17 crc kubenswrapper[5050]: I1123 16:01:17.196872 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 23 16:01:17 crc kubenswrapper[5050]: I1123 16:01:17.203153 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 23 16:01:17 crc kubenswrapper[5050]: I1123 16:01:17.224046 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8ffhc\" (UniqueName: \"kubernetes.io/projected/0f964021-c405-4aec-ad69-820337769a75-kube-api-access-8ffhc\") pod \"0f964021-c405-4aec-ad69-820337769a75\" (UID: \"0f964021-c405-4aec-ad69-820337769a75\") " Nov 23 16:01:17 crc kubenswrapper[5050]: I1123 16:01:17.231156 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f964021-c405-4aec-ad69-820337769a75-kube-api-access-8ffhc" (OuterVolumeSpecName: "kube-api-access-8ffhc") pod "0f964021-c405-4aec-ad69-820337769a75" (UID: "0f964021-c405-4aec-ad69-820337769a75"). InnerVolumeSpecName "kube-api-access-8ffhc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:01:17 crc kubenswrapper[5050]: I1123 16:01:17.326853 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8ffhc\" (UniqueName: \"kubernetes.io/projected/0f964021-c405-4aec-ad69-820337769a75-kube-api-access-8ffhc\") on node \"crc\" DevicePath \"\"" Nov 23 16:01:17 crc kubenswrapper[5050]: I1123 16:01:17.568981 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f964021-c405-4aec-ad69-820337769a75" path="/var/lib/kubelet/pods/0f964021-c405-4aec-ad69-820337769a75/volumes" Nov 23 16:01:17 crc kubenswrapper[5050]: I1123 16:01:17.598918 5050 scope.go:117] "RemoveContainer" containerID="47fc8485f89533de87d093318c233da9fa54a79bc6d2de09e7344cd65d687368" Nov 23 16:01:17 crc kubenswrapper[5050]: I1123 16:01:17.598957 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 23 16:01:17 crc kubenswrapper[5050]: I1123 16:01:17.793263 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-2-default"] Nov 23 16:01:17 crc kubenswrapper[5050]: E1123 16:01:17.795100 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f964021-c405-4aec-ad69-820337769a75" containerName="mariadb-client-1-default" Nov 23 16:01:17 crc kubenswrapper[5050]: I1123 16:01:17.795136 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f964021-c405-4aec-ad69-820337769a75" containerName="mariadb-client-1-default" Nov 23 16:01:17 crc kubenswrapper[5050]: I1123 16:01:17.795514 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f964021-c405-4aec-ad69-820337769a75" containerName="mariadb-client-1-default" Nov 23 16:01:17 crc kubenswrapper[5050]: I1123 16:01:17.796367 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 23 16:01:17 crc kubenswrapper[5050]: I1123 16:01:17.800889 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-8nrbj" Nov 23 16:01:17 crc kubenswrapper[5050]: I1123 16:01:17.805250 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 23 16:01:17 crc kubenswrapper[5050]: I1123 16:01:17.944279 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrqbc\" (UniqueName: \"kubernetes.io/projected/c06cf314-20f1-4b53-8714-3643a142dce2-kube-api-access-xrqbc\") pod \"mariadb-client-2-default\" (UID: \"c06cf314-20f1-4b53-8714-3643a142dce2\") " pod="openstack/mariadb-client-2-default" Nov 23 16:01:18 crc kubenswrapper[5050]: I1123 16:01:18.046025 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrqbc\" (UniqueName: \"kubernetes.io/projected/c06cf314-20f1-4b53-8714-3643a142dce2-kube-api-access-xrqbc\") pod \"mariadb-client-2-default\" (UID: \"c06cf314-20f1-4b53-8714-3643a142dce2\") " pod="openstack/mariadb-client-2-default" Nov 23 16:01:18 crc kubenswrapper[5050]: I1123 16:01:18.077395 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrqbc\" (UniqueName: \"kubernetes.io/projected/c06cf314-20f1-4b53-8714-3643a142dce2-kube-api-access-xrqbc\") pod \"mariadb-client-2-default\" (UID: \"c06cf314-20f1-4b53-8714-3643a142dce2\") " pod="openstack/mariadb-client-2-default" Nov 23 16:01:18 crc kubenswrapper[5050]: I1123 16:01:18.135922 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 23 16:01:18 crc kubenswrapper[5050]: I1123 16:01:18.444004 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 23 16:01:18 crc kubenswrapper[5050]: W1123 16:01:18.448258 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc06cf314_20f1_4b53_8714_3643a142dce2.slice/crio-0d81b1995da08dc48116f47cb188b5476a7077e0d587baca0193d753a09298f5 WatchSource:0}: Error finding container 0d81b1995da08dc48116f47cb188b5476a7077e0d587baca0193d753a09298f5: Status 404 returned error can't find the container with id 0d81b1995da08dc48116f47cb188b5476a7077e0d587baca0193d753a09298f5 Nov 23 16:01:18 crc kubenswrapper[5050]: I1123 16:01:18.607575 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2-default" event={"ID":"c06cf314-20f1-4b53-8714-3643a142dce2","Type":"ContainerStarted","Data":"0d81b1995da08dc48116f47cb188b5476a7077e0d587baca0193d753a09298f5"} Nov 23 16:01:19 crc kubenswrapper[5050]: I1123 16:01:19.624827 5050 generic.go:334] "Generic (PLEG): container finished" podID="c06cf314-20f1-4b53-8714-3643a142dce2" containerID="1569ce2d7244aa705beed6438c20feb0d794ff795f95484b662f13eea1aa6991" exitCode=1 Nov 23 16:01:19 crc kubenswrapper[5050]: I1123 16:01:19.624949 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2-default" event={"ID":"c06cf314-20f1-4b53-8714-3643a142dce2","Type":"ContainerDied","Data":"1569ce2d7244aa705beed6438c20feb0d794ff795f95484b662f13eea1aa6991"} Nov 23 16:01:21 crc kubenswrapper[5050]: I1123 16:01:21.127785 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 23 16:01:21 crc kubenswrapper[5050]: I1123 16:01:21.159534 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-2-default_c06cf314-20f1-4b53-8714-3643a142dce2/mariadb-client-2-default/0.log" Nov 23 16:01:21 crc kubenswrapper[5050]: I1123 16:01:21.200781 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 23 16:01:21 crc kubenswrapper[5050]: I1123 16:01:21.210868 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 23 16:01:21 crc kubenswrapper[5050]: I1123 16:01:21.304971 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xrqbc\" (UniqueName: \"kubernetes.io/projected/c06cf314-20f1-4b53-8714-3643a142dce2-kube-api-access-xrqbc\") pod \"c06cf314-20f1-4b53-8714-3643a142dce2\" (UID: \"c06cf314-20f1-4b53-8714-3643a142dce2\") " Nov 23 16:01:21 crc kubenswrapper[5050]: I1123 16:01:21.314747 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c06cf314-20f1-4b53-8714-3643a142dce2-kube-api-access-xrqbc" (OuterVolumeSpecName: "kube-api-access-xrqbc") pod "c06cf314-20f1-4b53-8714-3643a142dce2" (UID: "c06cf314-20f1-4b53-8714-3643a142dce2"). InnerVolumeSpecName "kube-api-access-xrqbc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:01:21 crc kubenswrapper[5050]: I1123 16:01:21.407659 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xrqbc\" (UniqueName: \"kubernetes.io/projected/c06cf314-20f1-4b53-8714-3643a142dce2-kube-api-access-xrqbc\") on node \"crc\" DevicePath \"\"" Nov 23 16:01:21 crc kubenswrapper[5050]: I1123 16:01:21.568391 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c06cf314-20f1-4b53-8714-3643a142dce2" path="/var/lib/kubelet/pods/c06cf314-20f1-4b53-8714-3643a142dce2/volumes" Nov 23 16:01:21 crc kubenswrapper[5050]: I1123 16:01:21.650633 5050 scope.go:117] "RemoveContainer" containerID="1569ce2d7244aa705beed6438c20feb0d794ff795f95484b662f13eea1aa6991" Nov 23 16:01:21 crc kubenswrapper[5050]: I1123 16:01:21.650697 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 23 16:01:21 crc kubenswrapper[5050]: I1123 16:01:21.831728 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-1"] Nov 23 16:01:21 crc kubenswrapper[5050]: E1123 16:01:21.832962 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c06cf314-20f1-4b53-8714-3643a142dce2" containerName="mariadb-client-2-default" Nov 23 16:01:21 crc kubenswrapper[5050]: I1123 16:01:21.832997 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="c06cf314-20f1-4b53-8714-3643a142dce2" containerName="mariadb-client-2-default" Nov 23 16:01:21 crc kubenswrapper[5050]: I1123 16:01:21.833322 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="c06cf314-20f1-4b53-8714-3643a142dce2" containerName="mariadb-client-2-default" Nov 23 16:01:21 crc kubenswrapper[5050]: I1123 16:01:21.834341 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 23 16:01:21 crc kubenswrapper[5050]: I1123 16:01:21.843054 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-8nrbj" Nov 23 16:01:21 crc kubenswrapper[5050]: I1123 16:01:21.847260 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1"] Nov 23 16:01:21 crc kubenswrapper[5050]: I1123 16:01:21.917036 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nkc5g\" (UniqueName: \"kubernetes.io/projected/6c66e2ee-a8d5-4feb-b4a1-c7bc3e131381-kube-api-access-nkc5g\") pod \"mariadb-client-1\" (UID: \"6c66e2ee-a8d5-4feb-b4a1-c7bc3e131381\") " pod="openstack/mariadb-client-1" Nov 23 16:01:22 crc kubenswrapper[5050]: I1123 16:01:22.022200 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nkc5g\" (UniqueName: \"kubernetes.io/projected/6c66e2ee-a8d5-4feb-b4a1-c7bc3e131381-kube-api-access-nkc5g\") pod \"mariadb-client-1\" (UID: \"6c66e2ee-a8d5-4feb-b4a1-c7bc3e131381\") " pod="openstack/mariadb-client-1" Nov 23 16:01:22 crc kubenswrapper[5050]: I1123 16:01:22.056626 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nkc5g\" (UniqueName: \"kubernetes.io/projected/6c66e2ee-a8d5-4feb-b4a1-c7bc3e131381-kube-api-access-nkc5g\") pod \"mariadb-client-1\" (UID: \"6c66e2ee-a8d5-4feb-b4a1-c7bc3e131381\") " pod="openstack/mariadb-client-1" Nov 23 16:01:22 crc kubenswrapper[5050]: I1123 16:01:22.171897 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 23 16:01:22 crc kubenswrapper[5050]: I1123 16:01:22.835067 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1"] Nov 23 16:01:22 crc kubenswrapper[5050]: W1123 16:01:22.843794 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6c66e2ee_a8d5_4feb_b4a1_c7bc3e131381.slice/crio-1337406ed4c0d97a19ba4b4194678bb1b446cf49aeaaf9f1d62573e78922aedf WatchSource:0}: Error finding container 1337406ed4c0d97a19ba4b4194678bb1b446cf49aeaaf9f1d62573e78922aedf: Status 404 returned error can't find the container with id 1337406ed4c0d97a19ba4b4194678bb1b446cf49aeaaf9f1d62573e78922aedf Nov 23 16:01:23 crc kubenswrapper[5050]: I1123 16:01:23.683775 5050 generic.go:334] "Generic (PLEG): container finished" podID="6c66e2ee-a8d5-4feb-b4a1-c7bc3e131381" containerID="82c611d15a7c2f51c1dde5c45f4a455b605f0cc9c6061cfe504dea671df0cbc1" exitCode=0 Nov 23 16:01:23 crc kubenswrapper[5050]: I1123 16:01:23.683898 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1" event={"ID":"6c66e2ee-a8d5-4feb-b4a1-c7bc3e131381","Type":"ContainerDied","Data":"82c611d15a7c2f51c1dde5c45f4a455b605f0cc9c6061cfe504dea671df0cbc1"} Nov 23 16:01:23 crc kubenswrapper[5050]: I1123 16:01:23.684750 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1" event={"ID":"6c66e2ee-a8d5-4feb-b4a1-c7bc3e131381","Type":"ContainerStarted","Data":"1337406ed4c0d97a19ba4b4194678bb1b446cf49aeaaf9f1d62573e78922aedf"} Nov 23 16:01:25 crc kubenswrapper[5050]: I1123 16:01:25.211220 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 23 16:01:25 crc kubenswrapper[5050]: I1123 16:01:25.236773 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-1_6c66e2ee-a8d5-4feb-b4a1-c7bc3e131381/mariadb-client-1/0.log" Nov 23 16:01:25 crc kubenswrapper[5050]: I1123 16:01:25.274243 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-1"] Nov 23 16:01:25 crc kubenswrapper[5050]: I1123 16:01:25.280635 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-1"] Nov 23 16:01:25 crc kubenswrapper[5050]: I1123 16:01:25.388399 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nkc5g\" (UniqueName: \"kubernetes.io/projected/6c66e2ee-a8d5-4feb-b4a1-c7bc3e131381-kube-api-access-nkc5g\") pod \"6c66e2ee-a8d5-4feb-b4a1-c7bc3e131381\" (UID: \"6c66e2ee-a8d5-4feb-b4a1-c7bc3e131381\") " Nov 23 16:01:25 crc kubenswrapper[5050]: I1123 16:01:25.398923 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c66e2ee-a8d5-4feb-b4a1-c7bc3e131381-kube-api-access-nkc5g" (OuterVolumeSpecName: "kube-api-access-nkc5g") pod "6c66e2ee-a8d5-4feb-b4a1-c7bc3e131381" (UID: "6c66e2ee-a8d5-4feb-b4a1-c7bc3e131381"). InnerVolumeSpecName "kube-api-access-nkc5g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:01:25 crc kubenswrapper[5050]: I1123 16:01:25.490242 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nkc5g\" (UniqueName: \"kubernetes.io/projected/6c66e2ee-a8d5-4feb-b4a1-c7bc3e131381-kube-api-access-nkc5g\") on node \"crc\" DevicePath \"\"" Nov 23 16:01:25 crc kubenswrapper[5050]: I1123 16:01:25.561289 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c66e2ee-a8d5-4feb-b4a1-c7bc3e131381" path="/var/lib/kubelet/pods/6c66e2ee-a8d5-4feb-b4a1-c7bc3e131381/volumes" Nov 23 16:01:25 crc kubenswrapper[5050]: I1123 16:01:25.770493 5050 scope.go:117] "RemoveContainer" containerID="82c611d15a7c2f51c1dde5c45f4a455b605f0cc9c6061cfe504dea671df0cbc1" Nov 23 16:01:25 crc kubenswrapper[5050]: I1123 16:01:25.770794 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 23 16:01:25 crc kubenswrapper[5050]: I1123 16:01:25.842654 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-4-default"] Nov 23 16:01:25 crc kubenswrapper[5050]: E1123 16:01:25.843010 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c66e2ee-a8d5-4feb-b4a1-c7bc3e131381" containerName="mariadb-client-1" Nov 23 16:01:25 crc kubenswrapper[5050]: I1123 16:01:25.843028 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c66e2ee-a8d5-4feb-b4a1-c7bc3e131381" containerName="mariadb-client-1" Nov 23 16:01:25 crc kubenswrapper[5050]: I1123 16:01:25.843186 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c66e2ee-a8d5-4feb-b4a1-c7bc3e131381" containerName="mariadb-client-1" Nov 23 16:01:25 crc kubenswrapper[5050]: I1123 16:01:25.843735 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 23 16:01:25 crc kubenswrapper[5050]: I1123 16:01:25.847112 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-8nrbj" Nov 23 16:01:25 crc kubenswrapper[5050]: I1123 16:01:25.855334 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 23 16:01:25 crc kubenswrapper[5050]: I1123 16:01:25.897870 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjrbm\" (UniqueName: \"kubernetes.io/projected/e617bcde-8f0f-4f00-b016-2ddc4a373dd2-kube-api-access-bjrbm\") pod \"mariadb-client-4-default\" (UID: \"e617bcde-8f0f-4f00-b016-2ddc4a373dd2\") " pod="openstack/mariadb-client-4-default" Nov 23 16:01:26 crc kubenswrapper[5050]: I1123 16:01:26.000253 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjrbm\" (UniqueName: \"kubernetes.io/projected/e617bcde-8f0f-4f00-b016-2ddc4a373dd2-kube-api-access-bjrbm\") pod \"mariadb-client-4-default\" (UID: \"e617bcde-8f0f-4f00-b016-2ddc4a373dd2\") " pod="openstack/mariadb-client-4-default" Nov 23 16:01:26 crc kubenswrapper[5050]: I1123 16:01:26.028182 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjrbm\" (UniqueName: \"kubernetes.io/projected/e617bcde-8f0f-4f00-b016-2ddc4a373dd2-kube-api-access-bjrbm\") pod \"mariadb-client-4-default\" (UID: \"e617bcde-8f0f-4f00-b016-2ddc4a373dd2\") " pod="openstack/mariadb-client-4-default" Nov 23 16:01:26 crc kubenswrapper[5050]: I1123 16:01:26.160770 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 23 16:01:26 crc kubenswrapper[5050]: I1123 16:01:26.767541 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 23 16:01:26 crc kubenswrapper[5050]: I1123 16:01:26.786596 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-4-default" event={"ID":"e617bcde-8f0f-4f00-b016-2ddc4a373dd2","Type":"ContainerStarted","Data":"8d43ff120f05430119d7e89f71f6f27736650de2ac8e29a0f5ce5ddb85cc9b83"} Nov 23 16:01:27 crc kubenswrapper[5050]: I1123 16:01:27.801390 5050 generic.go:334] "Generic (PLEG): container finished" podID="e617bcde-8f0f-4f00-b016-2ddc4a373dd2" containerID="f2084c425d3a180afdbdf80ea0f62930d29f520055bfe52a6298fe9e36fa5df1" exitCode=0 Nov 23 16:01:27 crc kubenswrapper[5050]: I1123 16:01:27.801560 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-4-default" event={"ID":"e617bcde-8f0f-4f00-b016-2ddc4a373dd2","Type":"ContainerDied","Data":"f2084c425d3a180afdbdf80ea0f62930d29f520055bfe52a6298fe9e36fa5df1"} Nov 23 16:01:29 crc kubenswrapper[5050]: I1123 16:01:29.261076 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 23 16:01:29 crc kubenswrapper[5050]: I1123 16:01:29.285981 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-4-default_e617bcde-8f0f-4f00-b016-2ddc4a373dd2/mariadb-client-4-default/0.log" Nov 23 16:01:29 crc kubenswrapper[5050]: I1123 16:01:29.316682 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 23 16:01:29 crc kubenswrapper[5050]: I1123 16:01:29.325280 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 23 16:01:29 crc kubenswrapper[5050]: I1123 16:01:29.366971 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bjrbm\" (UniqueName: \"kubernetes.io/projected/e617bcde-8f0f-4f00-b016-2ddc4a373dd2-kube-api-access-bjrbm\") pod \"e617bcde-8f0f-4f00-b016-2ddc4a373dd2\" (UID: \"e617bcde-8f0f-4f00-b016-2ddc4a373dd2\") " Nov 23 16:01:29 crc kubenswrapper[5050]: I1123 16:01:29.376617 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e617bcde-8f0f-4f00-b016-2ddc4a373dd2-kube-api-access-bjrbm" (OuterVolumeSpecName: "kube-api-access-bjrbm") pod "e617bcde-8f0f-4f00-b016-2ddc4a373dd2" (UID: "e617bcde-8f0f-4f00-b016-2ddc4a373dd2"). InnerVolumeSpecName "kube-api-access-bjrbm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:01:29 crc kubenswrapper[5050]: I1123 16:01:29.470338 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bjrbm\" (UniqueName: \"kubernetes.io/projected/e617bcde-8f0f-4f00-b016-2ddc4a373dd2-kube-api-access-bjrbm\") on node \"crc\" DevicePath \"\"" Nov 23 16:01:29 crc kubenswrapper[5050]: I1123 16:01:29.568549 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e617bcde-8f0f-4f00-b016-2ddc4a373dd2" path="/var/lib/kubelet/pods/e617bcde-8f0f-4f00-b016-2ddc4a373dd2/volumes" Nov 23 16:01:29 crc kubenswrapper[5050]: I1123 16:01:29.825362 5050 scope.go:117] "RemoveContainer" containerID="f2084c425d3a180afdbdf80ea0f62930d29f520055bfe52a6298fe9e36fa5df1" Nov 23 16:01:29 crc kubenswrapper[5050]: I1123 16:01:29.825487 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 23 16:01:34 crc kubenswrapper[5050]: I1123 16:01:34.294654 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-5-default"] Nov 23 16:01:34 crc kubenswrapper[5050]: E1123 16:01:34.298110 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e617bcde-8f0f-4f00-b016-2ddc4a373dd2" containerName="mariadb-client-4-default" Nov 23 16:01:34 crc kubenswrapper[5050]: I1123 16:01:34.298128 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e617bcde-8f0f-4f00-b016-2ddc4a373dd2" containerName="mariadb-client-4-default" Nov 23 16:01:34 crc kubenswrapper[5050]: I1123 16:01:34.298290 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="e617bcde-8f0f-4f00-b016-2ddc4a373dd2" containerName="mariadb-client-4-default" Nov 23 16:01:34 crc kubenswrapper[5050]: I1123 16:01:34.298965 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 23 16:01:34 crc kubenswrapper[5050]: I1123 16:01:34.302485 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-8nrbj" Nov 23 16:01:34 crc kubenswrapper[5050]: I1123 16:01:34.313092 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 23 16:01:34 crc kubenswrapper[5050]: I1123 16:01:34.375833 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4cnxp\" (UniqueName: \"kubernetes.io/projected/9d6de7e4-0f2d-4748-981b-d01bc8c6db36-kube-api-access-4cnxp\") pod \"mariadb-client-5-default\" (UID: \"9d6de7e4-0f2d-4748-981b-d01bc8c6db36\") " pod="openstack/mariadb-client-5-default" Nov 23 16:01:34 crc kubenswrapper[5050]: I1123 16:01:34.478781 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4cnxp\" (UniqueName: \"kubernetes.io/projected/9d6de7e4-0f2d-4748-981b-d01bc8c6db36-kube-api-access-4cnxp\") pod \"mariadb-client-5-default\" (UID: \"9d6de7e4-0f2d-4748-981b-d01bc8c6db36\") " pod="openstack/mariadb-client-5-default" Nov 23 16:01:34 crc kubenswrapper[5050]: I1123 16:01:34.512887 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4cnxp\" (UniqueName: \"kubernetes.io/projected/9d6de7e4-0f2d-4748-981b-d01bc8c6db36-kube-api-access-4cnxp\") pod \"mariadb-client-5-default\" (UID: \"9d6de7e4-0f2d-4748-981b-d01bc8c6db36\") " pod="openstack/mariadb-client-5-default" Nov 23 16:01:34 crc kubenswrapper[5050]: I1123 16:01:34.641173 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 23 16:01:35 crc kubenswrapper[5050]: I1123 16:01:35.011557 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 23 16:01:35 crc kubenswrapper[5050]: I1123 16:01:35.909430 5050 generic.go:334] "Generic (PLEG): container finished" podID="9d6de7e4-0f2d-4748-981b-d01bc8c6db36" containerID="2049c9b4f9b263662c1497d87a8bad29fdba1e8a3605a0926f00c8c3dc6d91ea" exitCode=0 Nov 23 16:01:35 crc kubenswrapper[5050]: I1123 16:01:35.909529 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-5-default" event={"ID":"9d6de7e4-0f2d-4748-981b-d01bc8c6db36","Type":"ContainerDied","Data":"2049c9b4f9b263662c1497d87a8bad29fdba1e8a3605a0926f00c8c3dc6d91ea"} Nov 23 16:01:35 crc kubenswrapper[5050]: I1123 16:01:35.909573 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-5-default" event={"ID":"9d6de7e4-0f2d-4748-981b-d01bc8c6db36","Type":"ContainerStarted","Data":"92717a29db4155d981f15860700355dacad17771242adf5a396ca8ba19803e8b"} Nov 23 16:01:37 crc kubenswrapper[5050]: I1123 16:01:37.747950 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 23 16:01:37 crc kubenswrapper[5050]: I1123 16:01:37.774686 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-5-default_9d6de7e4-0f2d-4748-981b-d01bc8c6db36/mariadb-client-5-default/0.log" Nov 23 16:01:37 crc kubenswrapper[5050]: I1123 16:01:37.830779 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 23 16:01:37 crc kubenswrapper[5050]: I1123 16:01:37.839116 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 23 16:01:37 crc kubenswrapper[5050]: I1123 16:01:37.856752 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4cnxp\" (UniqueName: \"kubernetes.io/projected/9d6de7e4-0f2d-4748-981b-d01bc8c6db36-kube-api-access-4cnxp\") pod \"9d6de7e4-0f2d-4748-981b-d01bc8c6db36\" (UID: \"9d6de7e4-0f2d-4748-981b-d01bc8c6db36\") " Nov 23 16:01:37 crc kubenswrapper[5050]: I1123 16:01:37.868797 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d6de7e4-0f2d-4748-981b-d01bc8c6db36-kube-api-access-4cnxp" (OuterVolumeSpecName: "kube-api-access-4cnxp") pod "9d6de7e4-0f2d-4748-981b-d01bc8c6db36" (UID: "9d6de7e4-0f2d-4748-981b-d01bc8c6db36"). InnerVolumeSpecName "kube-api-access-4cnxp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:01:37 crc kubenswrapper[5050]: I1123 16:01:37.949601 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="92717a29db4155d981f15860700355dacad17771242adf5a396ca8ba19803e8b" Nov 23 16:01:37 crc kubenswrapper[5050]: I1123 16:01:37.949701 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 23 16:01:37 crc kubenswrapper[5050]: I1123 16:01:37.959239 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4cnxp\" (UniqueName: \"kubernetes.io/projected/9d6de7e4-0f2d-4748-981b-d01bc8c6db36-kube-api-access-4cnxp\") on node \"crc\" DevicePath \"\"" Nov 23 16:01:38 crc kubenswrapper[5050]: I1123 16:01:38.011184 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-6-default"] Nov 23 16:01:38 crc kubenswrapper[5050]: E1123 16:01:38.011697 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d6de7e4-0f2d-4748-981b-d01bc8c6db36" containerName="mariadb-client-5-default" Nov 23 16:01:38 crc kubenswrapper[5050]: I1123 16:01:38.011725 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d6de7e4-0f2d-4748-981b-d01bc8c6db36" containerName="mariadb-client-5-default" Nov 23 16:01:38 crc kubenswrapper[5050]: I1123 16:01:38.011963 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d6de7e4-0f2d-4748-981b-d01bc8c6db36" containerName="mariadb-client-5-default" Nov 23 16:01:38 crc kubenswrapper[5050]: I1123 16:01:38.012697 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 23 16:01:38 crc kubenswrapper[5050]: I1123 16:01:38.015821 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-8nrbj" Nov 23 16:01:38 crc kubenswrapper[5050]: I1123 16:01:38.022086 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 23 16:01:38 crc kubenswrapper[5050]: I1123 16:01:38.060605 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qnwwx\" (UniqueName: \"kubernetes.io/projected/424ad2c2-90c9-402e-bddc-03ff6f8e66a2-kube-api-access-qnwwx\") pod \"mariadb-client-6-default\" (UID: \"424ad2c2-90c9-402e-bddc-03ff6f8e66a2\") " pod="openstack/mariadb-client-6-default" Nov 23 16:01:38 crc kubenswrapper[5050]: I1123 16:01:38.163813 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qnwwx\" (UniqueName: \"kubernetes.io/projected/424ad2c2-90c9-402e-bddc-03ff6f8e66a2-kube-api-access-qnwwx\") pod \"mariadb-client-6-default\" (UID: \"424ad2c2-90c9-402e-bddc-03ff6f8e66a2\") " pod="openstack/mariadb-client-6-default" Nov 23 16:01:38 crc kubenswrapper[5050]: I1123 16:01:38.186644 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qnwwx\" (UniqueName: \"kubernetes.io/projected/424ad2c2-90c9-402e-bddc-03ff6f8e66a2-kube-api-access-qnwwx\") pod \"mariadb-client-6-default\" (UID: \"424ad2c2-90c9-402e-bddc-03ff6f8e66a2\") " pod="openstack/mariadb-client-6-default" Nov 23 16:01:38 crc kubenswrapper[5050]: I1123 16:01:38.347883 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 23 16:01:38 crc kubenswrapper[5050]: I1123 16:01:38.975082 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 23 16:01:38 crc kubenswrapper[5050]: W1123 16:01:38.980095 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod424ad2c2_90c9_402e_bddc_03ff6f8e66a2.slice/crio-2d6a0111b5cb1aafbb690a80fd9a997ac3a401ad3058bfae89e308d264a0c436 WatchSource:0}: Error finding container 2d6a0111b5cb1aafbb690a80fd9a997ac3a401ad3058bfae89e308d264a0c436: Status 404 returned error can't find the container with id 2d6a0111b5cb1aafbb690a80fd9a997ac3a401ad3058bfae89e308d264a0c436 Nov 23 16:01:39 crc kubenswrapper[5050]: I1123 16:01:39.599067 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d6de7e4-0f2d-4748-981b-d01bc8c6db36" path="/var/lib/kubelet/pods/9d6de7e4-0f2d-4748-981b-d01bc8c6db36/volumes" Nov 23 16:01:39 crc kubenswrapper[5050]: I1123 16:01:39.968361 5050 generic.go:334] "Generic (PLEG): container finished" podID="424ad2c2-90c9-402e-bddc-03ff6f8e66a2" containerID="331ebe2d4d1844bb715e588bb0aff534a8ab606c2410e25dd45896a2aa94c9e6" exitCode=1 Nov 23 16:01:39 crc kubenswrapper[5050]: I1123 16:01:39.968434 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"424ad2c2-90c9-402e-bddc-03ff6f8e66a2","Type":"ContainerDied","Data":"331ebe2d4d1844bb715e588bb0aff534a8ab606c2410e25dd45896a2aa94c9e6"} Nov 23 16:01:39 crc kubenswrapper[5050]: I1123 16:01:39.968492 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"424ad2c2-90c9-402e-bddc-03ff6f8e66a2","Type":"ContainerStarted","Data":"2d6a0111b5cb1aafbb690a80fd9a997ac3a401ad3058bfae89e308d264a0c436"} Nov 23 16:01:41 crc kubenswrapper[5050]: I1123 16:01:41.430914 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 23 16:01:41 crc kubenswrapper[5050]: I1123 16:01:41.464501 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-6-default_424ad2c2-90c9-402e-bddc-03ff6f8e66a2/mariadb-client-6-default/0.log" Nov 23 16:01:41 crc kubenswrapper[5050]: I1123 16:01:41.501136 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 23 16:01:41 crc kubenswrapper[5050]: I1123 16:01:41.511275 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 23 16:01:41 crc kubenswrapper[5050]: I1123 16:01:41.571416 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qnwwx\" (UniqueName: \"kubernetes.io/projected/424ad2c2-90c9-402e-bddc-03ff6f8e66a2-kube-api-access-qnwwx\") pod \"424ad2c2-90c9-402e-bddc-03ff6f8e66a2\" (UID: \"424ad2c2-90c9-402e-bddc-03ff6f8e66a2\") " Nov 23 16:01:41 crc kubenswrapper[5050]: I1123 16:01:41.583885 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/424ad2c2-90c9-402e-bddc-03ff6f8e66a2-kube-api-access-qnwwx" (OuterVolumeSpecName: "kube-api-access-qnwwx") pod "424ad2c2-90c9-402e-bddc-03ff6f8e66a2" (UID: "424ad2c2-90c9-402e-bddc-03ff6f8e66a2"). InnerVolumeSpecName "kube-api-access-qnwwx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:01:41 crc kubenswrapper[5050]: I1123 16:01:41.674556 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qnwwx\" (UniqueName: \"kubernetes.io/projected/424ad2c2-90c9-402e-bddc-03ff6f8e66a2-kube-api-access-qnwwx\") on node \"crc\" DevicePath \"\"" Nov 23 16:01:41 crc kubenswrapper[5050]: I1123 16:01:41.697011 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-7-default"] Nov 23 16:01:41 crc kubenswrapper[5050]: E1123 16:01:41.699340 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="424ad2c2-90c9-402e-bddc-03ff6f8e66a2" containerName="mariadb-client-6-default" Nov 23 16:01:41 crc kubenswrapper[5050]: I1123 16:01:41.699372 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="424ad2c2-90c9-402e-bddc-03ff6f8e66a2" containerName="mariadb-client-6-default" Nov 23 16:01:41 crc kubenswrapper[5050]: I1123 16:01:41.700024 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="424ad2c2-90c9-402e-bddc-03ff6f8e66a2" containerName="mariadb-client-6-default" Nov 23 16:01:41 crc kubenswrapper[5050]: I1123 16:01:41.700703 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 23 16:01:41 crc kubenswrapper[5050]: I1123 16:01:41.721852 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 23 16:01:41 crc kubenswrapper[5050]: I1123 16:01:41.776830 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rpgzh\" (UniqueName: \"kubernetes.io/projected/eddf7db8-3645-40c0-96ed-ec6004596f9f-kube-api-access-rpgzh\") pod \"mariadb-client-7-default\" (UID: \"eddf7db8-3645-40c0-96ed-ec6004596f9f\") " pod="openstack/mariadb-client-7-default" Nov 23 16:01:41 crc kubenswrapper[5050]: I1123 16:01:41.879025 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rpgzh\" (UniqueName: \"kubernetes.io/projected/eddf7db8-3645-40c0-96ed-ec6004596f9f-kube-api-access-rpgzh\") pod \"mariadb-client-7-default\" (UID: \"eddf7db8-3645-40c0-96ed-ec6004596f9f\") " pod="openstack/mariadb-client-7-default" Nov 23 16:01:41 crc kubenswrapper[5050]: I1123 16:01:41.909383 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rpgzh\" (UniqueName: \"kubernetes.io/projected/eddf7db8-3645-40c0-96ed-ec6004596f9f-kube-api-access-rpgzh\") pod \"mariadb-client-7-default\" (UID: \"eddf7db8-3645-40c0-96ed-ec6004596f9f\") " pod="openstack/mariadb-client-7-default" Nov 23 16:01:41 crc kubenswrapper[5050]: I1123 16:01:41.994243 5050 scope.go:117] "RemoveContainer" containerID="331ebe2d4d1844bb715e588bb0aff534a8ab606c2410e25dd45896a2aa94c9e6" Nov 23 16:01:41 crc kubenswrapper[5050]: I1123 16:01:41.994300 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 23 16:01:42 crc kubenswrapper[5050]: I1123 16:01:42.024103 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 23 16:01:42 crc kubenswrapper[5050]: I1123 16:01:42.634413 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 23 16:01:42 crc kubenswrapper[5050]: W1123 16:01:42.641659 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podeddf7db8_3645_40c0_96ed_ec6004596f9f.slice/crio-358412b9ec451a8bb67e8fe2f869cd86e6eb8d5669636ab0958bdf4b0a9f1d48 WatchSource:0}: Error finding container 358412b9ec451a8bb67e8fe2f869cd86e6eb8d5669636ab0958bdf4b0a9f1d48: Status 404 returned error can't find the container with id 358412b9ec451a8bb67e8fe2f869cd86e6eb8d5669636ab0958bdf4b0a9f1d48 Nov 23 16:01:43 crc kubenswrapper[5050]: I1123 16:01:43.007654 5050 generic.go:334] "Generic (PLEG): container finished" podID="eddf7db8-3645-40c0-96ed-ec6004596f9f" containerID="fa4b64dabe63914d29dc2700faef5fa3d4a39e94ec82e779cbf8c75ad42295b9" exitCode=0 Nov 23 16:01:43 crc kubenswrapper[5050]: I1123 16:01:43.007722 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-7-default" event={"ID":"eddf7db8-3645-40c0-96ed-ec6004596f9f","Type":"ContainerDied","Data":"fa4b64dabe63914d29dc2700faef5fa3d4a39e94ec82e779cbf8c75ad42295b9"} Nov 23 16:01:43 crc kubenswrapper[5050]: I1123 16:01:43.007758 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-7-default" event={"ID":"eddf7db8-3645-40c0-96ed-ec6004596f9f","Type":"ContainerStarted","Data":"358412b9ec451a8bb67e8fe2f869cd86e6eb8d5669636ab0958bdf4b0a9f1d48"} Nov 23 16:01:43 crc kubenswrapper[5050]: I1123 16:01:43.566539 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="424ad2c2-90c9-402e-bddc-03ff6f8e66a2" path="/var/lib/kubelet/pods/424ad2c2-90c9-402e-bddc-03ff6f8e66a2/volumes" Nov 23 16:01:44 crc kubenswrapper[5050]: I1123 16:01:44.485771 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 23 16:01:44 crc kubenswrapper[5050]: I1123 16:01:44.509216 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-7-default_eddf7db8-3645-40c0-96ed-ec6004596f9f/mariadb-client-7-default/0.log" Nov 23 16:01:44 crc kubenswrapper[5050]: I1123 16:01:44.539575 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 23 16:01:44 crc kubenswrapper[5050]: I1123 16:01:44.545348 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 23 16:01:44 crc kubenswrapper[5050]: I1123 16:01:44.640687 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rpgzh\" (UniqueName: \"kubernetes.io/projected/eddf7db8-3645-40c0-96ed-ec6004596f9f-kube-api-access-rpgzh\") pod \"eddf7db8-3645-40c0-96ed-ec6004596f9f\" (UID: \"eddf7db8-3645-40c0-96ed-ec6004596f9f\") " Nov 23 16:01:44 crc kubenswrapper[5050]: I1123 16:01:44.649119 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eddf7db8-3645-40c0-96ed-ec6004596f9f-kube-api-access-rpgzh" (OuterVolumeSpecName: "kube-api-access-rpgzh") pod "eddf7db8-3645-40c0-96ed-ec6004596f9f" (UID: "eddf7db8-3645-40c0-96ed-ec6004596f9f"). InnerVolumeSpecName "kube-api-access-rpgzh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:01:44 crc kubenswrapper[5050]: I1123 16:01:44.721305 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-2"] Nov 23 16:01:44 crc kubenswrapper[5050]: E1123 16:01:44.721706 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eddf7db8-3645-40c0-96ed-ec6004596f9f" containerName="mariadb-client-7-default" Nov 23 16:01:44 crc kubenswrapper[5050]: I1123 16:01:44.721722 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="eddf7db8-3645-40c0-96ed-ec6004596f9f" containerName="mariadb-client-7-default" Nov 23 16:01:44 crc kubenswrapper[5050]: I1123 16:01:44.721876 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="eddf7db8-3645-40c0-96ed-ec6004596f9f" containerName="mariadb-client-7-default" Nov 23 16:01:44 crc kubenswrapper[5050]: I1123 16:01:44.722472 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 23 16:01:44 crc kubenswrapper[5050]: I1123 16:01:44.735070 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2"] Nov 23 16:01:44 crc kubenswrapper[5050]: I1123 16:01:44.744694 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rpgzh\" (UniqueName: \"kubernetes.io/projected/eddf7db8-3645-40c0-96ed-ec6004596f9f-kube-api-access-rpgzh\") on node \"crc\" DevicePath \"\"" Nov 23 16:01:44 crc kubenswrapper[5050]: I1123 16:01:44.846678 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fzbtq\" (UniqueName: \"kubernetes.io/projected/4097174b-5b18-45b4-bbe9-3b197cd8ae62-kube-api-access-fzbtq\") pod \"mariadb-client-2\" (UID: \"4097174b-5b18-45b4-bbe9-3b197cd8ae62\") " pod="openstack/mariadb-client-2" Nov 23 16:01:44 crc kubenswrapper[5050]: I1123 16:01:44.948425 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fzbtq\" (UniqueName: \"kubernetes.io/projected/4097174b-5b18-45b4-bbe9-3b197cd8ae62-kube-api-access-fzbtq\") pod \"mariadb-client-2\" (UID: \"4097174b-5b18-45b4-bbe9-3b197cd8ae62\") " pod="openstack/mariadb-client-2" Nov 23 16:01:44 crc kubenswrapper[5050]: I1123 16:01:44.974203 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fzbtq\" (UniqueName: \"kubernetes.io/projected/4097174b-5b18-45b4-bbe9-3b197cd8ae62-kube-api-access-fzbtq\") pod \"mariadb-client-2\" (UID: \"4097174b-5b18-45b4-bbe9-3b197cd8ae62\") " pod="openstack/mariadb-client-2" Nov 23 16:01:45 crc kubenswrapper[5050]: I1123 16:01:45.032157 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="358412b9ec451a8bb67e8fe2f869cd86e6eb8d5669636ab0958bdf4b0a9f1d48" Nov 23 16:01:45 crc kubenswrapper[5050]: I1123 16:01:45.032242 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 23 16:01:45 crc kubenswrapper[5050]: I1123 16:01:45.056357 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 23 16:01:45 crc kubenswrapper[5050]: I1123 16:01:45.563990 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eddf7db8-3645-40c0-96ed-ec6004596f9f" path="/var/lib/kubelet/pods/eddf7db8-3645-40c0-96ed-ec6004596f9f/volumes" Nov 23 16:01:46 crc kubenswrapper[5050]: I1123 16:01:46.313149 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2"] Nov 23 16:01:46 crc kubenswrapper[5050]: W1123 16:01:46.315228 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4097174b_5b18_45b4_bbe9_3b197cd8ae62.slice/crio-3b2b02634c5b6a8ec49b0237d2a03f4c4ef1b30d0a1450d7a0a08d0687cf6f14 WatchSource:0}: Error finding container 3b2b02634c5b6a8ec49b0237d2a03f4c4ef1b30d0a1450d7a0a08d0687cf6f14: Status 404 returned error can't find the container with id 3b2b02634c5b6a8ec49b0237d2a03f4c4ef1b30d0a1450d7a0a08d0687cf6f14 Nov 23 16:01:47 crc kubenswrapper[5050]: I1123 16:01:47.055883 5050 generic.go:334] "Generic (PLEG): container finished" podID="4097174b-5b18-45b4-bbe9-3b197cd8ae62" containerID="791b5dcb41c80301c9185f3bba166ea79d1275eb5f2a12921db851d4b437795a" exitCode=0 Nov 23 16:01:47 crc kubenswrapper[5050]: I1123 16:01:47.056537 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2" event={"ID":"4097174b-5b18-45b4-bbe9-3b197cd8ae62","Type":"ContainerDied","Data":"791b5dcb41c80301c9185f3bba166ea79d1275eb5f2a12921db851d4b437795a"} Nov 23 16:01:47 crc kubenswrapper[5050]: I1123 16:01:47.056607 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2" event={"ID":"4097174b-5b18-45b4-bbe9-3b197cd8ae62","Type":"ContainerStarted","Data":"3b2b02634c5b6a8ec49b0237d2a03f4c4ef1b30d0a1450d7a0a08d0687cf6f14"} Nov 23 16:01:48 crc kubenswrapper[5050]: I1123 16:01:48.562522 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 23 16:01:48 crc kubenswrapper[5050]: I1123 16:01:48.584156 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-2_4097174b-5b18-45b4-bbe9-3b197cd8ae62/mariadb-client-2/0.log" Nov 23 16:01:48 crc kubenswrapper[5050]: I1123 16:01:48.621417 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-2"] Nov 23 16:01:48 crc kubenswrapper[5050]: I1123 16:01:48.628435 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-2"] Nov 23 16:01:48 crc kubenswrapper[5050]: I1123 16:01:48.630032 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fzbtq\" (UniqueName: \"kubernetes.io/projected/4097174b-5b18-45b4-bbe9-3b197cd8ae62-kube-api-access-fzbtq\") pod \"4097174b-5b18-45b4-bbe9-3b197cd8ae62\" (UID: \"4097174b-5b18-45b4-bbe9-3b197cd8ae62\") " Nov 23 16:01:48 crc kubenswrapper[5050]: I1123 16:01:48.655112 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4097174b-5b18-45b4-bbe9-3b197cd8ae62-kube-api-access-fzbtq" (OuterVolumeSpecName: "kube-api-access-fzbtq") pod "4097174b-5b18-45b4-bbe9-3b197cd8ae62" (UID: "4097174b-5b18-45b4-bbe9-3b197cd8ae62"). InnerVolumeSpecName "kube-api-access-fzbtq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:01:48 crc kubenswrapper[5050]: I1123 16:01:48.733207 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fzbtq\" (UniqueName: \"kubernetes.io/projected/4097174b-5b18-45b4-bbe9-3b197cd8ae62-kube-api-access-fzbtq\") on node \"crc\" DevicePath \"\"" Nov 23 16:01:49 crc kubenswrapper[5050]: I1123 16:01:49.077488 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3b2b02634c5b6a8ec49b0237d2a03f4c4ef1b30d0a1450d7a0a08d0687cf6f14" Nov 23 16:01:49 crc kubenswrapper[5050]: I1123 16:01:49.077587 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 23 16:01:49 crc kubenswrapper[5050]: I1123 16:01:49.559835 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4097174b-5b18-45b4-bbe9-3b197cd8ae62" path="/var/lib/kubelet/pods/4097174b-5b18-45b4-bbe9-3b197cd8ae62/volumes" Nov 23 16:02:06 crc kubenswrapper[5050]: I1123 16:02:06.246354 5050 scope.go:117] "RemoveContainer" containerID="cf148f11c2448ed887db614fa53c16566f4b8284694e122f0d289638f341b499" Nov 23 16:02:29 crc kubenswrapper[5050]: I1123 16:02:29.224967 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:02:29 crc kubenswrapper[5050]: I1123 16:02:29.225922 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:02:59 crc kubenswrapper[5050]: I1123 16:02:59.224500 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:02:59 crc kubenswrapper[5050]: I1123 16:02:59.227506 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:03:29 crc kubenswrapper[5050]: I1123 16:03:29.225036 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:03:29 crc kubenswrapper[5050]: I1123 16:03:29.225862 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:03:29 crc kubenswrapper[5050]: I1123 16:03:29.225930 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 16:03:29 crc kubenswrapper[5050]: I1123 16:03:29.227135 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 16:03:29 crc kubenswrapper[5050]: I1123 16:03:29.227238 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" gracePeriod=600 Nov 23 16:03:29 crc kubenswrapper[5050]: E1123 16:03:29.361333 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:03:29 crc kubenswrapper[5050]: I1123 16:03:29.454976 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" exitCode=0 Nov 23 16:03:29 crc kubenswrapper[5050]: I1123 16:03:29.455057 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b"} Nov 23 16:03:29 crc kubenswrapper[5050]: I1123 16:03:29.455110 5050 scope.go:117] "RemoveContainer" containerID="c18d191ff51dd31497250d2492a5d834c9d4fb3c3d8d531c739172043c30b828" Nov 23 16:03:29 crc kubenswrapper[5050]: I1123 16:03:29.456169 5050 scope.go:117] "RemoveContainer" containerID="3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" Nov 23 16:03:29 crc kubenswrapper[5050]: E1123 16:03:29.456627 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:03:42 crc kubenswrapper[5050]: I1123 16:03:42.549107 5050 scope.go:117] "RemoveContainer" containerID="3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" Nov 23 16:03:42 crc kubenswrapper[5050]: E1123 16:03:42.550187 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:03:57 crc kubenswrapper[5050]: I1123 16:03:57.549563 5050 scope.go:117] "RemoveContainer" containerID="3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" Nov 23 16:03:57 crc kubenswrapper[5050]: E1123 16:03:57.550665 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:04:12 crc kubenswrapper[5050]: I1123 16:04:12.549010 5050 scope.go:117] "RemoveContainer" containerID="3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" Nov 23 16:04:12 crc kubenswrapper[5050]: E1123 16:04:12.549940 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:04:25 crc kubenswrapper[5050]: I1123 16:04:25.561391 5050 scope.go:117] "RemoveContainer" containerID="3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" Nov 23 16:04:25 crc kubenswrapper[5050]: E1123 16:04:25.562711 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:04:36 crc kubenswrapper[5050]: I1123 16:04:36.548762 5050 scope.go:117] "RemoveContainer" containerID="3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" Nov 23 16:04:36 crc kubenswrapper[5050]: E1123 16:04:36.549987 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:04:45 crc kubenswrapper[5050]: I1123 16:04:45.320110 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-s2vvw"] Nov 23 16:04:45 crc kubenswrapper[5050]: E1123 16:04:45.321890 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4097174b-5b18-45b4-bbe9-3b197cd8ae62" containerName="mariadb-client-2" Nov 23 16:04:45 crc kubenswrapper[5050]: I1123 16:04:45.321911 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4097174b-5b18-45b4-bbe9-3b197cd8ae62" containerName="mariadb-client-2" Nov 23 16:04:45 crc kubenswrapper[5050]: I1123 16:04:45.323459 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4097174b-5b18-45b4-bbe9-3b197cd8ae62" containerName="mariadb-client-2" Nov 23 16:04:45 crc kubenswrapper[5050]: I1123 16:04:45.353393 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s2vvw" Nov 23 16:04:45 crc kubenswrapper[5050]: I1123 16:04:45.355056 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-s2vvw"] Nov 23 16:04:45 crc kubenswrapper[5050]: I1123 16:04:45.522973 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2hqr\" (UniqueName: \"kubernetes.io/projected/0ce93732-1aeb-4045-ae72-8fdd28a5c455-kube-api-access-n2hqr\") pod \"redhat-marketplace-s2vvw\" (UID: \"0ce93732-1aeb-4045-ae72-8fdd28a5c455\") " pod="openshift-marketplace/redhat-marketplace-s2vvw" Nov 23 16:04:45 crc kubenswrapper[5050]: I1123 16:04:45.523477 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ce93732-1aeb-4045-ae72-8fdd28a5c455-catalog-content\") pod \"redhat-marketplace-s2vvw\" (UID: \"0ce93732-1aeb-4045-ae72-8fdd28a5c455\") " pod="openshift-marketplace/redhat-marketplace-s2vvw" Nov 23 16:04:45 crc kubenswrapper[5050]: I1123 16:04:45.523732 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ce93732-1aeb-4045-ae72-8fdd28a5c455-utilities\") pod \"redhat-marketplace-s2vvw\" (UID: \"0ce93732-1aeb-4045-ae72-8fdd28a5c455\") " pod="openshift-marketplace/redhat-marketplace-s2vvw" Nov 23 16:04:45 crc kubenswrapper[5050]: I1123 16:04:45.625429 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ce93732-1aeb-4045-ae72-8fdd28a5c455-catalog-content\") pod \"redhat-marketplace-s2vvw\" (UID: \"0ce93732-1aeb-4045-ae72-8fdd28a5c455\") " pod="openshift-marketplace/redhat-marketplace-s2vvw" Nov 23 16:04:45 crc kubenswrapper[5050]: I1123 16:04:45.625538 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ce93732-1aeb-4045-ae72-8fdd28a5c455-utilities\") pod \"redhat-marketplace-s2vvw\" (UID: \"0ce93732-1aeb-4045-ae72-8fdd28a5c455\") " pod="openshift-marketplace/redhat-marketplace-s2vvw" Nov 23 16:04:45 crc kubenswrapper[5050]: I1123 16:04:45.625589 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2hqr\" (UniqueName: \"kubernetes.io/projected/0ce93732-1aeb-4045-ae72-8fdd28a5c455-kube-api-access-n2hqr\") pod \"redhat-marketplace-s2vvw\" (UID: \"0ce93732-1aeb-4045-ae72-8fdd28a5c455\") " pod="openshift-marketplace/redhat-marketplace-s2vvw" Nov 23 16:04:45 crc kubenswrapper[5050]: I1123 16:04:45.626095 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ce93732-1aeb-4045-ae72-8fdd28a5c455-catalog-content\") pod \"redhat-marketplace-s2vvw\" (UID: \"0ce93732-1aeb-4045-ae72-8fdd28a5c455\") " pod="openshift-marketplace/redhat-marketplace-s2vvw" Nov 23 16:04:45 crc kubenswrapper[5050]: I1123 16:04:45.626264 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ce93732-1aeb-4045-ae72-8fdd28a5c455-utilities\") pod \"redhat-marketplace-s2vvw\" (UID: \"0ce93732-1aeb-4045-ae72-8fdd28a5c455\") " pod="openshift-marketplace/redhat-marketplace-s2vvw" Nov 23 16:04:45 crc kubenswrapper[5050]: I1123 16:04:45.645387 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2hqr\" (UniqueName: \"kubernetes.io/projected/0ce93732-1aeb-4045-ae72-8fdd28a5c455-kube-api-access-n2hqr\") pod \"redhat-marketplace-s2vvw\" (UID: \"0ce93732-1aeb-4045-ae72-8fdd28a5c455\") " pod="openshift-marketplace/redhat-marketplace-s2vvw" Nov 23 16:04:45 crc kubenswrapper[5050]: I1123 16:04:45.684647 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s2vvw" Nov 23 16:04:46 crc kubenswrapper[5050]: I1123 16:04:46.216000 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-s2vvw"] Nov 23 16:04:46 crc kubenswrapper[5050]: I1123 16:04:46.324252 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s2vvw" event={"ID":"0ce93732-1aeb-4045-ae72-8fdd28a5c455","Type":"ContainerStarted","Data":"d3fc315d9bb4daa1bcdb0fbd2927dcbcca4c8005eb5ca33765e7d6537ce52da0"} Nov 23 16:04:47 crc kubenswrapper[5050]: I1123 16:04:47.337639 5050 generic.go:334] "Generic (PLEG): container finished" podID="0ce93732-1aeb-4045-ae72-8fdd28a5c455" containerID="fd90b729b7cb1c1c06518267525af47d227e0ab12fda2d6fdbdc1151baf4bbde" exitCode=0 Nov 23 16:04:47 crc kubenswrapper[5050]: I1123 16:04:47.338094 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s2vvw" event={"ID":"0ce93732-1aeb-4045-ae72-8fdd28a5c455","Type":"ContainerDied","Data":"fd90b729b7cb1c1c06518267525af47d227e0ab12fda2d6fdbdc1151baf4bbde"} Nov 23 16:04:47 crc kubenswrapper[5050]: I1123 16:04:47.341103 5050 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 23 16:04:47 crc kubenswrapper[5050]: I1123 16:04:47.549704 5050 scope.go:117] "RemoveContainer" containerID="3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" Nov 23 16:04:47 crc kubenswrapper[5050]: E1123 16:04:47.550422 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:04:48 crc kubenswrapper[5050]: I1123 16:04:48.361398 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s2vvw" event={"ID":"0ce93732-1aeb-4045-ae72-8fdd28a5c455","Type":"ContainerStarted","Data":"bbc5c72e8959b5a0507f86ce594c08ab0e1b27290b8e26ed862ce459837c663f"} Nov 23 16:04:49 crc kubenswrapper[5050]: I1123 16:04:49.375467 5050 generic.go:334] "Generic (PLEG): container finished" podID="0ce93732-1aeb-4045-ae72-8fdd28a5c455" containerID="bbc5c72e8959b5a0507f86ce594c08ab0e1b27290b8e26ed862ce459837c663f" exitCode=0 Nov 23 16:04:49 crc kubenswrapper[5050]: I1123 16:04:49.375629 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s2vvw" event={"ID":"0ce93732-1aeb-4045-ae72-8fdd28a5c455","Type":"ContainerDied","Data":"bbc5c72e8959b5a0507f86ce594c08ab0e1b27290b8e26ed862ce459837c663f"} Nov 23 16:04:49 crc kubenswrapper[5050]: I1123 16:04:49.375861 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s2vvw" event={"ID":"0ce93732-1aeb-4045-ae72-8fdd28a5c455","Type":"ContainerStarted","Data":"d99dde6e70b1397170cd89242d9b2221236fdcdef7d989e270e999cf3eaceedd"} Nov 23 16:04:49 crc kubenswrapper[5050]: I1123 16:04:49.408204 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-s2vvw" podStartSLOduration=2.954690544 podStartE2EDuration="4.408165596s" podCreationTimestamp="2025-11-23 16:04:45 +0000 UTC" firstStartedPulling="2025-11-23 16:04:47.340696561 +0000 UTC m=+4982.507693056" lastFinishedPulling="2025-11-23 16:04:48.794171583 +0000 UTC m=+4983.961168108" observedRunningTime="2025-11-23 16:04:49.404614746 +0000 UTC m=+4984.571611311" watchObservedRunningTime="2025-11-23 16:04:49.408165596 +0000 UTC m=+4984.575162131" Nov 23 16:04:55 crc kubenswrapper[5050]: I1123 16:04:55.685876 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-s2vvw" Nov 23 16:04:55 crc kubenswrapper[5050]: I1123 16:04:55.687145 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-s2vvw" Nov 23 16:04:55 crc kubenswrapper[5050]: I1123 16:04:55.769661 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-s2vvw" Nov 23 16:04:56 crc kubenswrapper[5050]: I1123 16:04:56.531121 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-s2vvw" Nov 23 16:04:58 crc kubenswrapper[5050]: I1123 16:04:58.266082 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-s2vvw"] Nov 23 16:04:58 crc kubenswrapper[5050]: I1123 16:04:58.481294 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-s2vvw" podUID="0ce93732-1aeb-4045-ae72-8fdd28a5c455" containerName="registry-server" containerID="cri-o://d99dde6e70b1397170cd89242d9b2221236fdcdef7d989e270e999cf3eaceedd" gracePeriod=2 Nov 23 16:04:59 crc kubenswrapper[5050]: I1123 16:04:59.024068 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s2vvw" Nov 23 16:04:59 crc kubenswrapper[5050]: I1123 16:04:59.182462 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ce93732-1aeb-4045-ae72-8fdd28a5c455-catalog-content\") pod \"0ce93732-1aeb-4045-ae72-8fdd28a5c455\" (UID: \"0ce93732-1aeb-4045-ae72-8fdd28a5c455\") " Nov 23 16:04:59 crc kubenswrapper[5050]: I1123 16:04:59.182550 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ce93732-1aeb-4045-ae72-8fdd28a5c455-utilities\") pod \"0ce93732-1aeb-4045-ae72-8fdd28a5c455\" (UID: \"0ce93732-1aeb-4045-ae72-8fdd28a5c455\") " Nov 23 16:04:59 crc kubenswrapper[5050]: I1123 16:04:59.182586 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n2hqr\" (UniqueName: \"kubernetes.io/projected/0ce93732-1aeb-4045-ae72-8fdd28a5c455-kube-api-access-n2hqr\") pod \"0ce93732-1aeb-4045-ae72-8fdd28a5c455\" (UID: \"0ce93732-1aeb-4045-ae72-8fdd28a5c455\") " Nov 23 16:04:59 crc kubenswrapper[5050]: I1123 16:04:59.183999 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0ce93732-1aeb-4045-ae72-8fdd28a5c455-utilities" (OuterVolumeSpecName: "utilities") pod "0ce93732-1aeb-4045-ae72-8fdd28a5c455" (UID: "0ce93732-1aeb-4045-ae72-8fdd28a5c455"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:04:59 crc kubenswrapper[5050]: I1123 16:04:59.197764 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0ce93732-1aeb-4045-ae72-8fdd28a5c455-kube-api-access-n2hqr" (OuterVolumeSpecName: "kube-api-access-n2hqr") pod "0ce93732-1aeb-4045-ae72-8fdd28a5c455" (UID: "0ce93732-1aeb-4045-ae72-8fdd28a5c455"). InnerVolumeSpecName "kube-api-access-n2hqr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:04:59 crc kubenswrapper[5050]: I1123 16:04:59.217776 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0ce93732-1aeb-4045-ae72-8fdd28a5c455-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0ce93732-1aeb-4045-ae72-8fdd28a5c455" (UID: "0ce93732-1aeb-4045-ae72-8fdd28a5c455"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:04:59 crc kubenswrapper[5050]: I1123 16:04:59.285699 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ce93732-1aeb-4045-ae72-8fdd28a5c455-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 16:04:59 crc kubenswrapper[5050]: I1123 16:04:59.285924 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ce93732-1aeb-4045-ae72-8fdd28a5c455-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 16:04:59 crc kubenswrapper[5050]: I1123 16:04:59.285965 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n2hqr\" (UniqueName: \"kubernetes.io/projected/0ce93732-1aeb-4045-ae72-8fdd28a5c455-kube-api-access-n2hqr\") on node \"crc\" DevicePath \"\"" Nov 23 16:04:59 crc kubenswrapper[5050]: I1123 16:04:59.498379 5050 generic.go:334] "Generic (PLEG): container finished" podID="0ce93732-1aeb-4045-ae72-8fdd28a5c455" containerID="d99dde6e70b1397170cd89242d9b2221236fdcdef7d989e270e999cf3eaceedd" exitCode=0 Nov 23 16:04:59 crc kubenswrapper[5050]: I1123 16:04:59.498478 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s2vvw" Nov 23 16:04:59 crc kubenswrapper[5050]: I1123 16:04:59.498529 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s2vvw" event={"ID":"0ce93732-1aeb-4045-ae72-8fdd28a5c455","Type":"ContainerDied","Data":"d99dde6e70b1397170cd89242d9b2221236fdcdef7d989e270e999cf3eaceedd"} Nov 23 16:04:59 crc kubenswrapper[5050]: I1123 16:04:59.498965 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s2vvw" event={"ID":"0ce93732-1aeb-4045-ae72-8fdd28a5c455","Type":"ContainerDied","Data":"d3fc315d9bb4daa1bcdb0fbd2927dcbcca4c8005eb5ca33765e7d6537ce52da0"} Nov 23 16:04:59 crc kubenswrapper[5050]: I1123 16:04:59.499014 5050 scope.go:117] "RemoveContainer" containerID="d99dde6e70b1397170cd89242d9b2221236fdcdef7d989e270e999cf3eaceedd" Nov 23 16:04:59 crc kubenswrapper[5050]: I1123 16:04:59.549651 5050 scope.go:117] "RemoveContainer" containerID="3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" Nov 23 16:04:59 crc kubenswrapper[5050]: E1123 16:04:59.550114 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:04:59 crc kubenswrapper[5050]: I1123 16:04:59.571963 5050 scope.go:117] "RemoveContainer" containerID="bbc5c72e8959b5a0507f86ce594c08ab0e1b27290b8e26ed862ce459837c663f" Nov 23 16:04:59 crc kubenswrapper[5050]: I1123 16:04:59.584677 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-s2vvw"] Nov 23 16:04:59 crc kubenswrapper[5050]: I1123 16:04:59.603305 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-s2vvw"] Nov 23 16:04:59 crc kubenswrapper[5050]: I1123 16:04:59.623615 5050 scope.go:117] "RemoveContainer" containerID="fd90b729b7cb1c1c06518267525af47d227e0ab12fda2d6fdbdc1151baf4bbde" Nov 23 16:04:59 crc kubenswrapper[5050]: I1123 16:04:59.644495 5050 scope.go:117] "RemoveContainer" containerID="d99dde6e70b1397170cd89242d9b2221236fdcdef7d989e270e999cf3eaceedd" Nov 23 16:04:59 crc kubenswrapper[5050]: E1123 16:04:59.645140 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d99dde6e70b1397170cd89242d9b2221236fdcdef7d989e270e999cf3eaceedd\": container with ID starting with d99dde6e70b1397170cd89242d9b2221236fdcdef7d989e270e999cf3eaceedd not found: ID does not exist" containerID="d99dde6e70b1397170cd89242d9b2221236fdcdef7d989e270e999cf3eaceedd" Nov 23 16:04:59 crc kubenswrapper[5050]: I1123 16:04:59.645224 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d99dde6e70b1397170cd89242d9b2221236fdcdef7d989e270e999cf3eaceedd"} err="failed to get container status \"d99dde6e70b1397170cd89242d9b2221236fdcdef7d989e270e999cf3eaceedd\": rpc error: code = NotFound desc = could not find container \"d99dde6e70b1397170cd89242d9b2221236fdcdef7d989e270e999cf3eaceedd\": container with ID starting with d99dde6e70b1397170cd89242d9b2221236fdcdef7d989e270e999cf3eaceedd not found: ID does not exist" Nov 23 16:04:59 crc kubenswrapper[5050]: I1123 16:04:59.645270 5050 scope.go:117] "RemoveContainer" containerID="bbc5c72e8959b5a0507f86ce594c08ab0e1b27290b8e26ed862ce459837c663f" Nov 23 16:04:59 crc kubenswrapper[5050]: E1123 16:04:59.645695 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bbc5c72e8959b5a0507f86ce594c08ab0e1b27290b8e26ed862ce459837c663f\": container with ID starting with bbc5c72e8959b5a0507f86ce594c08ab0e1b27290b8e26ed862ce459837c663f not found: ID does not exist" containerID="bbc5c72e8959b5a0507f86ce594c08ab0e1b27290b8e26ed862ce459837c663f" Nov 23 16:04:59 crc kubenswrapper[5050]: I1123 16:04:59.645805 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bbc5c72e8959b5a0507f86ce594c08ab0e1b27290b8e26ed862ce459837c663f"} err="failed to get container status \"bbc5c72e8959b5a0507f86ce594c08ab0e1b27290b8e26ed862ce459837c663f\": rpc error: code = NotFound desc = could not find container \"bbc5c72e8959b5a0507f86ce594c08ab0e1b27290b8e26ed862ce459837c663f\": container with ID starting with bbc5c72e8959b5a0507f86ce594c08ab0e1b27290b8e26ed862ce459837c663f not found: ID does not exist" Nov 23 16:04:59 crc kubenswrapper[5050]: I1123 16:04:59.645887 5050 scope.go:117] "RemoveContainer" containerID="fd90b729b7cb1c1c06518267525af47d227e0ab12fda2d6fdbdc1151baf4bbde" Nov 23 16:04:59 crc kubenswrapper[5050]: E1123 16:04:59.647418 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd90b729b7cb1c1c06518267525af47d227e0ab12fda2d6fdbdc1151baf4bbde\": container with ID starting with fd90b729b7cb1c1c06518267525af47d227e0ab12fda2d6fdbdc1151baf4bbde not found: ID does not exist" containerID="fd90b729b7cb1c1c06518267525af47d227e0ab12fda2d6fdbdc1151baf4bbde" Nov 23 16:04:59 crc kubenswrapper[5050]: I1123 16:04:59.647477 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd90b729b7cb1c1c06518267525af47d227e0ab12fda2d6fdbdc1151baf4bbde"} err="failed to get container status \"fd90b729b7cb1c1c06518267525af47d227e0ab12fda2d6fdbdc1151baf4bbde\": rpc error: code = NotFound desc = could not find container \"fd90b729b7cb1c1c06518267525af47d227e0ab12fda2d6fdbdc1151baf4bbde\": container with ID starting with fd90b729b7cb1c1c06518267525af47d227e0ab12fda2d6fdbdc1151baf4bbde not found: ID does not exist" Nov 23 16:05:01 crc kubenswrapper[5050]: I1123 16:05:01.560928 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0ce93732-1aeb-4045-ae72-8fdd28a5c455" path="/var/lib/kubelet/pods/0ce93732-1aeb-4045-ae72-8fdd28a5c455/volumes" Nov 23 16:05:10 crc kubenswrapper[5050]: I1123 16:05:10.549197 5050 scope.go:117] "RemoveContainer" containerID="3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" Nov 23 16:05:10 crc kubenswrapper[5050]: E1123 16:05:10.550346 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:05:21 crc kubenswrapper[5050]: I1123 16:05:21.550096 5050 scope.go:117] "RemoveContainer" containerID="3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" Nov 23 16:05:21 crc kubenswrapper[5050]: E1123 16:05:21.551048 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:05:36 crc kubenswrapper[5050]: I1123 16:05:36.549479 5050 scope.go:117] "RemoveContainer" containerID="3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" Nov 23 16:05:36 crc kubenswrapper[5050]: E1123 16:05:36.550618 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:05:47 crc kubenswrapper[5050]: I1123 16:05:47.549076 5050 scope.go:117] "RemoveContainer" containerID="3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" Nov 23 16:05:47 crc kubenswrapper[5050]: E1123 16:05:47.550380 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:05:58 crc kubenswrapper[5050]: I1123 16:05:58.548796 5050 scope.go:117] "RemoveContainer" containerID="3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" Nov 23 16:05:58 crc kubenswrapper[5050]: E1123 16:05:58.549995 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:06:06 crc kubenswrapper[5050]: I1123 16:06:06.495642 5050 scope.go:117] "RemoveContainer" containerID="2383b3c5973bd1bcafe22515306281fbd6b9cc09a1d301862448276ae76a3b4e" Nov 23 16:06:06 crc kubenswrapper[5050]: I1123 16:06:06.537959 5050 scope.go:117] "RemoveContainer" containerID="8dbffce1d84fbf64ab72e001bf96bb4dbaf7216e0beb5ac29fe96cce16e6a767" Nov 23 16:06:10 crc kubenswrapper[5050]: I1123 16:06:10.548680 5050 scope.go:117] "RemoveContainer" containerID="3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" Nov 23 16:06:10 crc kubenswrapper[5050]: E1123 16:06:10.549571 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:06:24 crc kubenswrapper[5050]: I1123 16:06:24.202719 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-copy-data"] Nov 23 16:06:24 crc kubenswrapper[5050]: E1123 16:06:24.204099 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ce93732-1aeb-4045-ae72-8fdd28a5c455" containerName="extract-utilities" Nov 23 16:06:24 crc kubenswrapper[5050]: I1123 16:06:24.204122 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ce93732-1aeb-4045-ae72-8fdd28a5c455" containerName="extract-utilities" Nov 23 16:06:24 crc kubenswrapper[5050]: E1123 16:06:24.204153 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ce93732-1aeb-4045-ae72-8fdd28a5c455" containerName="extract-content" Nov 23 16:06:24 crc kubenswrapper[5050]: I1123 16:06:24.204161 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ce93732-1aeb-4045-ae72-8fdd28a5c455" containerName="extract-content" Nov 23 16:06:24 crc kubenswrapper[5050]: E1123 16:06:24.204179 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ce93732-1aeb-4045-ae72-8fdd28a5c455" containerName="registry-server" Nov 23 16:06:24 crc kubenswrapper[5050]: I1123 16:06:24.204191 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ce93732-1aeb-4045-ae72-8fdd28a5c455" containerName="registry-server" Nov 23 16:06:24 crc kubenswrapper[5050]: I1123 16:06:24.204397 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ce93732-1aeb-4045-ae72-8fdd28a5c455" containerName="registry-server" Nov 23 16:06:24 crc kubenswrapper[5050]: I1123 16:06:24.205224 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Nov 23 16:06:24 crc kubenswrapper[5050]: I1123 16:06:24.208251 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-8nrbj" Nov 23 16:06:24 crc kubenswrapper[5050]: I1123 16:06:24.212012 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-copy-data"] Nov 23 16:06:24 crc kubenswrapper[5050]: I1123 16:06:24.315889 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-d5090761-6249-4307-a9aa-7128f07d7cf3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d5090761-6249-4307-a9aa-7128f07d7cf3\") pod \"mariadb-copy-data\" (UID: \"f8acfa10-0e5b-41f9-9ac1-a35d9108f222\") " pod="openstack/mariadb-copy-data" Nov 23 16:06:24 crc kubenswrapper[5050]: I1123 16:06:24.316095 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvfmm\" (UniqueName: \"kubernetes.io/projected/f8acfa10-0e5b-41f9-9ac1-a35d9108f222-kube-api-access-bvfmm\") pod \"mariadb-copy-data\" (UID: \"f8acfa10-0e5b-41f9-9ac1-a35d9108f222\") " pod="openstack/mariadb-copy-data" Nov 23 16:06:24 crc kubenswrapper[5050]: I1123 16:06:24.419976 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-d5090761-6249-4307-a9aa-7128f07d7cf3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d5090761-6249-4307-a9aa-7128f07d7cf3\") pod \"mariadb-copy-data\" (UID: \"f8acfa10-0e5b-41f9-9ac1-a35d9108f222\") " pod="openstack/mariadb-copy-data" Nov 23 16:06:24 crc kubenswrapper[5050]: I1123 16:06:24.420152 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvfmm\" (UniqueName: \"kubernetes.io/projected/f8acfa10-0e5b-41f9-9ac1-a35d9108f222-kube-api-access-bvfmm\") pod \"mariadb-copy-data\" (UID: \"f8acfa10-0e5b-41f9-9ac1-a35d9108f222\") " pod="openstack/mariadb-copy-data" Nov 23 16:06:24 crc kubenswrapper[5050]: I1123 16:06:24.426992 5050 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 23 16:06:24 crc kubenswrapper[5050]: I1123 16:06:24.427055 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-d5090761-6249-4307-a9aa-7128f07d7cf3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d5090761-6249-4307-a9aa-7128f07d7cf3\") pod \"mariadb-copy-data\" (UID: \"f8acfa10-0e5b-41f9-9ac1-a35d9108f222\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/2b53cc07347b0114712f400e68cdfbf4e01055a9229b9a2c3f4ec78515528aba/globalmount\"" pod="openstack/mariadb-copy-data" Nov 23 16:06:24 crc kubenswrapper[5050]: I1123 16:06:24.453600 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvfmm\" (UniqueName: \"kubernetes.io/projected/f8acfa10-0e5b-41f9-9ac1-a35d9108f222-kube-api-access-bvfmm\") pod \"mariadb-copy-data\" (UID: \"f8acfa10-0e5b-41f9-9ac1-a35d9108f222\") " pod="openstack/mariadb-copy-data" Nov 23 16:06:24 crc kubenswrapper[5050]: I1123 16:06:24.470862 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-d5090761-6249-4307-a9aa-7128f07d7cf3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d5090761-6249-4307-a9aa-7128f07d7cf3\") pod \"mariadb-copy-data\" (UID: \"f8acfa10-0e5b-41f9-9ac1-a35d9108f222\") " pod="openstack/mariadb-copy-data" Nov 23 16:06:24 crc kubenswrapper[5050]: I1123 16:06:24.549070 5050 scope.go:117] "RemoveContainer" containerID="3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" Nov 23 16:06:24 crc kubenswrapper[5050]: E1123 16:06:24.549401 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:06:24 crc kubenswrapper[5050]: I1123 16:06:24.552140 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Nov 23 16:06:25 crc kubenswrapper[5050]: I1123 16:06:25.193247 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-copy-data"] Nov 23 16:06:25 crc kubenswrapper[5050]: I1123 16:06:25.512245 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"f8acfa10-0e5b-41f9-9ac1-a35d9108f222","Type":"ContainerStarted","Data":"945238d88966376485f3031d7d7e1804c408ad145e30ab490c7222fc78f60db3"} Nov 23 16:06:26 crc kubenswrapper[5050]: I1123 16:06:26.523073 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"f8acfa10-0e5b-41f9-9ac1-a35d9108f222","Type":"ContainerStarted","Data":"f2cb36ba1161d9d48bd36e2cbb329b5c1719d413f16d6bdcb28916ef87e8af01"} Nov 23 16:06:26 crc kubenswrapper[5050]: I1123 16:06:26.552517 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-copy-data" podStartSLOduration=3.55248747 podStartE2EDuration="3.55248747s" podCreationTimestamp="2025-11-23 16:06:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:06:26.541950363 +0000 UTC m=+5081.708946898" watchObservedRunningTime="2025-11-23 16:06:26.55248747 +0000 UTC m=+5081.719483975" Nov 23 16:06:29 crc kubenswrapper[5050]: I1123 16:06:29.675571 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Nov 23 16:06:29 crc kubenswrapper[5050]: I1123 16:06:29.677479 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 23 16:06:29 crc kubenswrapper[5050]: I1123 16:06:29.691823 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 23 16:06:29 crc kubenswrapper[5050]: I1123 16:06:29.723883 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7xstk\" (UniqueName: \"kubernetes.io/projected/36d91410-991a-481b-8e51-48552449c184-kube-api-access-7xstk\") pod \"mariadb-client\" (UID: \"36d91410-991a-481b-8e51-48552449c184\") " pod="openstack/mariadb-client" Nov 23 16:06:29 crc kubenswrapper[5050]: I1123 16:06:29.825812 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7xstk\" (UniqueName: \"kubernetes.io/projected/36d91410-991a-481b-8e51-48552449c184-kube-api-access-7xstk\") pod \"mariadb-client\" (UID: \"36d91410-991a-481b-8e51-48552449c184\") " pod="openstack/mariadb-client" Nov 23 16:06:29 crc kubenswrapper[5050]: I1123 16:06:29.847091 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7xstk\" (UniqueName: \"kubernetes.io/projected/36d91410-991a-481b-8e51-48552449c184-kube-api-access-7xstk\") pod \"mariadb-client\" (UID: \"36d91410-991a-481b-8e51-48552449c184\") " pod="openstack/mariadb-client" Nov 23 16:06:30 crc kubenswrapper[5050]: I1123 16:06:30.009105 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 23 16:06:30 crc kubenswrapper[5050]: I1123 16:06:30.231638 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 23 16:06:30 crc kubenswrapper[5050]: I1123 16:06:30.559906 5050 generic.go:334] "Generic (PLEG): container finished" podID="36d91410-991a-481b-8e51-48552449c184" containerID="ea31dc6519aca59a5126623a1db0829c746dad379b16e78288734c0aa8eadf6c" exitCode=0 Nov 23 16:06:30 crc kubenswrapper[5050]: I1123 16:06:30.560019 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"36d91410-991a-481b-8e51-48552449c184","Type":"ContainerDied","Data":"ea31dc6519aca59a5126623a1db0829c746dad379b16e78288734c0aa8eadf6c"} Nov 23 16:06:30 crc kubenswrapper[5050]: I1123 16:06:30.560606 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"36d91410-991a-481b-8e51-48552449c184","Type":"ContainerStarted","Data":"12163299ac45bd807aee22fa2bbb28a3d49182e54017d30a9ef5b571f3f5204d"} Nov 23 16:06:31 crc kubenswrapper[5050]: I1123 16:06:31.948988 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 23 16:06:32 crc kubenswrapper[5050]: I1123 16:06:32.017342 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client_36d91410-991a-481b-8e51-48552449c184/mariadb-client/0.log" Nov 23 16:06:32 crc kubenswrapper[5050]: I1123 16:06:32.043138 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Nov 23 16:06:32 crc kubenswrapper[5050]: I1123 16:06:32.048249 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Nov 23 16:06:32 crc kubenswrapper[5050]: I1123 16:06:32.071242 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7xstk\" (UniqueName: \"kubernetes.io/projected/36d91410-991a-481b-8e51-48552449c184-kube-api-access-7xstk\") pod \"36d91410-991a-481b-8e51-48552449c184\" (UID: \"36d91410-991a-481b-8e51-48552449c184\") " Nov 23 16:06:32 crc kubenswrapper[5050]: I1123 16:06:32.077520 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36d91410-991a-481b-8e51-48552449c184-kube-api-access-7xstk" (OuterVolumeSpecName: "kube-api-access-7xstk") pod "36d91410-991a-481b-8e51-48552449c184" (UID: "36d91410-991a-481b-8e51-48552449c184"). InnerVolumeSpecName "kube-api-access-7xstk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:06:32 crc kubenswrapper[5050]: I1123 16:06:32.174242 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7xstk\" (UniqueName: \"kubernetes.io/projected/36d91410-991a-481b-8e51-48552449c184-kube-api-access-7xstk\") on node \"crc\" DevicePath \"\"" Nov 23 16:06:32 crc kubenswrapper[5050]: I1123 16:06:32.184374 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Nov 23 16:06:32 crc kubenswrapper[5050]: E1123 16:06:32.185323 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36d91410-991a-481b-8e51-48552449c184" containerName="mariadb-client" Nov 23 16:06:32 crc kubenswrapper[5050]: I1123 16:06:32.185351 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="36d91410-991a-481b-8e51-48552449c184" containerName="mariadb-client" Nov 23 16:06:32 crc kubenswrapper[5050]: I1123 16:06:32.185616 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="36d91410-991a-481b-8e51-48552449c184" containerName="mariadb-client" Nov 23 16:06:32 crc kubenswrapper[5050]: I1123 16:06:32.186540 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 23 16:06:32 crc kubenswrapper[5050]: I1123 16:06:32.200546 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 23 16:06:32 crc kubenswrapper[5050]: I1123 16:06:32.275595 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pj4k\" (UniqueName: \"kubernetes.io/projected/131c54d7-a256-4268-8592-c935dbee69d0-kube-api-access-8pj4k\") pod \"mariadb-client\" (UID: \"131c54d7-a256-4268-8592-c935dbee69d0\") " pod="openstack/mariadb-client" Nov 23 16:06:32 crc kubenswrapper[5050]: I1123 16:06:32.377963 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pj4k\" (UniqueName: \"kubernetes.io/projected/131c54d7-a256-4268-8592-c935dbee69d0-kube-api-access-8pj4k\") pod \"mariadb-client\" (UID: \"131c54d7-a256-4268-8592-c935dbee69d0\") " pod="openstack/mariadb-client" Nov 23 16:06:32 crc kubenswrapper[5050]: I1123 16:06:32.404801 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pj4k\" (UniqueName: \"kubernetes.io/projected/131c54d7-a256-4268-8592-c935dbee69d0-kube-api-access-8pj4k\") pod \"mariadb-client\" (UID: \"131c54d7-a256-4268-8592-c935dbee69d0\") " pod="openstack/mariadb-client" Nov 23 16:06:32 crc kubenswrapper[5050]: I1123 16:06:32.522049 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 23 16:06:32 crc kubenswrapper[5050]: I1123 16:06:32.584807 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="12163299ac45bd807aee22fa2bbb28a3d49182e54017d30a9ef5b571f3f5204d" Nov 23 16:06:32 crc kubenswrapper[5050]: I1123 16:06:32.584943 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 23 16:06:32 crc kubenswrapper[5050]: I1123 16:06:32.615231 5050 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/mariadb-client" oldPodUID="36d91410-991a-481b-8e51-48552449c184" podUID="131c54d7-a256-4268-8592-c935dbee69d0" Nov 23 16:06:32 crc kubenswrapper[5050]: I1123 16:06:32.801125 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 23 16:06:32 crc kubenswrapper[5050]: W1123 16:06:32.809996 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod131c54d7_a256_4268_8592_c935dbee69d0.slice/crio-8e9616ab4a5ac0a350d8c130f1d251b3c81d76cead9c21217435db23bf2aeee2 WatchSource:0}: Error finding container 8e9616ab4a5ac0a350d8c130f1d251b3c81d76cead9c21217435db23bf2aeee2: Status 404 returned error can't find the container with id 8e9616ab4a5ac0a350d8c130f1d251b3c81d76cead9c21217435db23bf2aeee2 Nov 23 16:06:33 crc kubenswrapper[5050]: I1123 16:06:33.567109 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36d91410-991a-481b-8e51-48552449c184" path="/var/lib/kubelet/pods/36d91410-991a-481b-8e51-48552449c184/volumes" Nov 23 16:06:33 crc kubenswrapper[5050]: I1123 16:06:33.599393 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"131c54d7-a256-4268-8592-c935dbee69d0","Type":"ContainerStarted","Data":"d3458d224bbbc0fc13beedd14a1d143aa5979480bbaa400bb95019b33647770a"} Nov 23 16:06:33 crc kubenswrapper[5050]: I1123 16:06:33.599530 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"131c54d7-a256-4268-8592-c935dbee69d0","Type":"ContainerStarted","Data":"8e9616ab4a5ac0a350d8c130f1d251b3c81d76cead9c21217435db23bf2aeee2"} Nov 23 16:06:33 crc kubenswrapper[5050]: I1123 16:06:33.633615 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-client" podStartSLOduration=1.63357116 podStartE2EDuration="1.63357116s" podCreationTimestamp="2025-11-23 16:06:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:06:33.62258606 +0000 UTC m=+5088.789582585" watchObservedRunningTime="2025-11-23 16:06:33.63357116 +0000 UTC m=+5088.800567695" Nov 23 16:06:34 crc kubenswrapper[5050]: I1123 16:06:34.618547 5050 generic.go:334] "Generic (PLEG): container finished" podID="131c54d7-a256-4268-8592-c935dbee69d0" containerID="d3458d224bbbc0fc13beedd14a1d143aa5979480bbaa400bb95019b33647770a" exitCode=0 Nov 23 16:06:34 crc kubenswrapper[5050]: I1123 16:06:34.618653 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"131c54d7-a256-4268-8592-c935dbee69d0","Type":"ContainerDied","Data":"d3458d224bbbc0fc13beedd14a1d143aa5979480bbaa400bb95019b33647770a"} Nov 23 16:06:36 crc kubenswrapper[5050]: I1123 16:06:36.032085 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 23 16:06:36 crc kubenswrapper[5050]: I1123 16:06:36.090023 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Nov 23 16:06:36 crc kubenswrapper[5050]: I1123 16:06:36.100612 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Nov 23 16:06:36 crc kubenswrapper[5050]: I1123 16:06:36.168615 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8pj4k\" (UniqueName: \"kubernetes.io/projected/131c54d7-a256-4268-8592-c935dbee69d0-kube-api-access-8pj4k\") pod \"131c54d7-a256-4268-8592-c935dbee69d0\" (UID: \"131c54d7-a256-4268-8592-c935dbee69d0\") " Nov 23 16:06:36 crc kubenswrapper[5050]: I1123 16:06:36.175055 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/131c54d7-a256-4268-8592-c935dbee69d0-kube-api-access-8pj4k" (OuterVolumeSpecName: "kube-api-access-8pj4k") pod "131c54d7-a256-4268-8592-c935dbee69d0" (UID: "131c54d7-a256-4268-8592-c935dbee69d0"). InnerVolumeSpecName "kube-api-access-8pj4k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:06:36 crc kubenswrapper[5050]: I1123 16:06:36.270267 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8pj4k\" (UniqueName: \"kubernetes.io/projected/131c54d7-a256-4268-8592-c935dbee69d0-kube-api-access-8pj4k\") on node \"crc\" DevicePath \"\"" Nov 23 16:06:36 crc kubenswrapper[5050]: I1123 16:06:36.640760 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8e9616ab4a5ac0a350d8c130f1d251b3c81d76cead9c21217435db23bf2aeee2" Nov 23 16:06:36 crc kubenswrapper[5050]: I1123 16:06:36.641173 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 23 16:06:37 crc kubenswrapper[5050]: I1123 16:06:37.549355 5050 scope.go:117] "RemoveContainer" containerID="3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" Nov 23 16:06:37 crc kubenswrapper[5050]: E1123 16:06:37.550938 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:06:37 crc kubenswrapper[5050]: I1123 16:06:37.564733 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="131c54d7-a256-4268-8592-c935dbee69d0" path="/var/lib/kubelet/pods/131c54d7-a256-4268-8592-c935dbee69d0/volumes" Nov 23 16:06:52 crc kubenswrapper[5050]: I1123 16:06:52.549174 5050 scope.go:117] "RemoveContainer" containerID="3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" Nov 23 16:06:52 crc kubenswrapper[5050]: E1123 16:06:52.550418 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:07:07 crc kubenswrapper[5050]: I1123 16:07:07.548997 5050 scope.go:117] "RemoveContainer" containerID="3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" Nov 23 16:07:07 crc kubenswrapper[5050]: E1123 16:07:07.550735 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.096700 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 23 16:07:11 crc kubenswrapper[5050]: E1123 16:07:11.098305 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="131c54d7-a256-4268-8592-c935dbee69d0" containerName="mariadb-client" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.098334 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="131c54d7-a256-4268-8592-c935dbee69d0" containerName="mariadb-client" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.098735 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="131c54d7-a256-4268-8592-c935dbee69d0" containerName="mariadb-client" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.100534 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.104945 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.106565 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-rvd4f" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.111114 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.113095 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.118174 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-2"] Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.121094 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.130184 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-1"] Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.132680 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.148247 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-2"] Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.178951 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-1"] Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.288503 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.290621 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.297127 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/bafc2ad6-91da-4cd1-b4e3-4618db95cc11-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"bafc2ad6-91da-4cd1-b4e3-4618db95cc11\") " pod="openstack/ovsdbserver-nb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.297215 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-49d3fe87-dfed-4821-9f0c-9d9a76324976\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-49d3fe87-dfed-4821-9f0c-9d9a76324976\") pod \"ovsdbserver-nb-0\" (UID: \"e42bfb74-79c5-4710-8d55-33da1c765d8c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.297280 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bafc2ad6-91da-4cd1-b4e3-4618db95cc11-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"bafc2ad6-91da-4cd1-b4e3-4618db95cc11\") " pod="openstack/ovsdbserver-nb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.297348 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-4e8aaea0-0f57-47bc-9967-89f394109631\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4e8aaea0-0f57-47bc-9967-89f394109631\") pod \"ovsdbserver-nb-2\" (UID: \"bafc2ad6-91da-4cd1-b4e3-4618db95cc11\") " pod="openstack/ovsdbserver-nb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.297382 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97bef3ec-2ce6-4a13-bbc4-d08d97618e29-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"97bef3ec-2ce6-4a13-bbc4-d08d97618e29\") " pod="openstack/ovsdbserver-nb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.297435 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/97bef3ec-2ce6-4a13-bbc4-d08d97618e29-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"97bef3ec-2ce6-4a13-bbc4-d08d97618e29\") " pod="openstack/ovsdbserver-nb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.297508 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbfbv\" (UniqueName: \"kubernetes.io/projected/97bef3ec-2ce6-4a13-bbc4-d08d97618e29-kube-api-access-sbfbv\") pod \"ovsdbserver-nb-1\" (UID: \"97bef3ec-2ce6-4a13-bbc4-d08d97618e29\") " pod="openstack/ovsdbserver-nb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.297565 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e42bfb74-79c5-4710-8d55-33da1c765d8c-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"e42bfb74-79c5-4710-8d55-33da1c765d8c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.297599 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fc5wz\" (UniqueName: \"kubernetes.io/projected/bafc2ad6-91da-4cd1-b4e3-4618db95cc11-kube-api-access-fc5wz\") pod \"ovsdbserver-nb-2\" (UID: \"bafc2ad6-91da-4cd1-b4e3-4618db95cc11\") " pod="openstack/ovsdbserver-nb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.297648 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97bef3ec-2ce6-4a13-bbc4-d08d97618e29-config\") pod \"ovsdbserver-nb-1\" (UID: \"97bef3ec-2ce6-4a13-bbc4-d08d97618e29\") " pod="openstack/ovsdbserver-nb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.297687 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e42bfb74-79c5-4710-8d55-33da1c765d8c-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"e42bfb74-79c5-4710-8d55-33da1c765d8c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.297721 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tlz67\" (UniqueName: \"kubernetes.io/projected/e42bfb74-79c5-4710-8d55-33da1c765d8c-kube-api-access-tlz67\") pod \"ovsdbserver-nb-0\" (UID: \"e42bfb74-79c5-4710-8d55-33da1c765d8c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.297778 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bafc2ad6-91da-4cd1-b4e3-4618db95cc11-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"bafc2ad6-91da-4cd1-b4e3-4618db95cc11\") " pod="openstack/ovsdbserver-nb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.297810 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/97bef3ec-2ce6-4a13-bbc4-d08d97618e29-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"97bef3ec-2ce6-4a13-bbc4-d08d97618e29\") " pod="openstack/ovsdbserver-nb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.297856 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e42bfb74-79c5-4710-8d55-33da1c765d8c-config\") pod \"ovsdbserver-nb-0\" (UID: \"e42bfb74-79c5-4710-8d55-33da1c765d8c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.297886 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e42bfb74-79c5-4710-8d55-33da1c765d8c-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"e42bfb74-79c5-4710-8d55-33da1c765d8c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.297988 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bafc2ad6-91da-4cd1-b4e3-4618db95cc11-config\") pod \"ovsdbserver-nb-2\" (UID: \"bafc2ad6-91da-4cd1-b4e3-4618db95cc11\") " pod="openstack/ovsdbserver-nb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.298043 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-44d7a7a6-a121-4578-8c4b-f327de39c9b8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-44d7a7a6-a121-4578-8c4b-f327de39c9b8\") pod \"ovsdbserver-nb-1\" (UID: \"97bef3ec-2ce6-4a13-bbc4-d08d97618e29\") " pod="openstack/ovsdbserver-nb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.300246 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-m9hjv" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.300591 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.307651 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.307925 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.317525 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-1"] Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.320039 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.331836 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-2"] Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.333636 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.342545 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-1"] Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.365567 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-2"] Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.400322 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-8597c042-3c74-4a1c-affb-baa50391380f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8597c042-3c74-4a1c-affb-baa50391380f\") pod \"ovsdbserver-sb-0\" (UID: \"63882070-14a9-44ab-9298-11a518e0ee25\") " pod="openstack/ovsdbserver-sb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.400423 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-487xm\" (UniqueName: \"kubernetes.io/projected/63882070-14a9-44ab-9298-11a518e0ee25-kube-api-access-487xm\") pod \"ovsdbserver-sb-0\" (UID: \"63882070-14a9-44ab-9298-11a518e0ee25\") " pod="openstack/ovsdbserver-sb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.400478 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bafc2ad6-91da-4cd1-b4e3-4618db95cc11-config\") pod \"ovsdbserver-nb-2\" (UID: \"bafc2ad6-91da-4cd1-b4e3-4618db95cc11\") " pod="openstack/ovsdbserver-nb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.400508 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/24e76718-a290-4dc2-a5eb-5a0c156ddf42-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"24e76718-a290-4dc2-a5eb-5a0c156ddf42\") " pod="openstack/ovsdbserver-sb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.400534 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-44d7a7a6-a121-4578-8c4b-f327de39c9b8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-44d7a7a6-a121-4578-8c4b-f327de39c9b8\") pod \"ovsdbserver-nb-1\" (UID: \"97bef3ec-2ce6-4a13-bbc4-d08d97618e29\") " pod="openstack/ovsdbserver-nb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.400572 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/bafc2ad6-91da-4cd1-b4e3-4618db95cc11-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"bafc2ad6-91da-4cd1-b4e3-4618db95cc11\") " pod="openstack/ovsdbserver-nb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.400601 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-49d3fe87-dfed-4821-9f0c-9d9a76324976\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-49d3fe87-dfed-4821-9f0c-9d9a76324976\") pod \"ovsdbserver-nb-0\" (UID: \"e42bfb74-79c5-4710-8d55-33da1c765d8c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.400625 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-7f44e105-163c-43c2-8465-6c2b7c10f3d4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7f44e105-163c-43c2-8465-6c2b7c10f3d4\") pod \"ovsdbserver-sb-1\" (UID: \"24e76718-a290-4dc2-a5eb-5a0c156ddf42\") " pod="openstack/ovsdbserver-sb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.400647 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bafc2ad6-91da-4cd1-b4e3-4618db95cc11-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"bafc2ad6-91da-4cd1-b4e3-4618db95cc11\") " pod="openstack/ovsdbserver-nb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.400671 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24e76718-a290-4dc2-a5eb-5a0c156ddf42-config\") pod \"ovsdbserver-sb-1\" (UID: \"24e76718-a290-4dc2-a5eb-5a0c156ddf42\") " pod="openstack/ovsdbserver-sb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.400714 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-4e8aaea0-0f57-47bc-9967-89f394109631\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4e8aaea0-0f57-47bc-9967-89f394109631\") pod \"ovsdbserver-nb-2\" (UID: \"bafc2ad6-91da-4cd1-b4e3-4618db95cc11\") " pod="openstack/ovsdbserver-nb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.400738 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97bef3ec-2ce6-4a13-bbc4-d08d97618e29-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"97bef3ec-2ce6-4a13-bbc4-d08d97618e29\") " pod="openstack/ovsdbserver-nb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.400765 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/24e76718-a290-4dc2-a5eb-5a0c156ddf42-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"24e76718-a290-4dc2-a5eb-5a0c156ddf42\") " pod="openstack/ovsdbserver-sb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.400792 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/97bef3ec-2ce6-4a13-bbc4-d08d97618e29-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"97bef3ec-2ce6-4a13-bbc4-d08d97618e29\") " pod="openstack/ovsdbserver-nb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.400816 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbfbv\" (UniqueName: \"kubernetes.io/projected/97bef3ec-2ce6-4a13-bbc4-d08d97618e29-kube-api-access-sbfbv\") pod \"ovsdbserver-nb-1\" (UID: \"97bef3ec-2ce6-4a13-bbc4-d08d97618e29\") " pod="openstack/ovsdbserver-nb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.400840 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63882070-14a9-44ab-9298-11a518e0ee25-config\") pod \"ovsdbserver-sb-0\" (UID: \"63882070-14a9-44ab-9298-11a518e0ee25\") " pod="openstack/ovsdbserver-sb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.400864 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e42bfb74-79c5-4710-8d55-33da1c765d8c-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"e42bfb74-79c5-4710-8d55-33da1c765d8c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.400882 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63882070-14a9-44ab-9298-11a518e0ee25-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"63882070-14a9-44ab-9298-11a518e0ee25\") " pod="openstack/ovsdbserver-sb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.400904 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fc5wz\" (UniqueName: \"kubernetes.io/projected/bafc2ad6-91da-4cd1-b4e3-4618db95cc11-kube-api-access-fc5wz\") pod \"ovsdbserver-nb-2\" (UID: \"bafc2ad6-91da-4cd1-b4e3-4618db95cc11\") " pod="openstack/ovsdbserver-nb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.400929 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24e76718-a290-4dc2-a5eb-5a0c156ddf42-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"24e76718-a290-4dc2-a5eb-5a0c156ddf42\") " pod="openstack/ovsdbserver-sb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.400953 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzzxc\" (UniqueName: \"kubernetes.io/projected/24e76718-a290-4dc2-a5eb-5a0c156ddf42-kube-api-access-tzzxc\") pod \"ovsdbserver-sb-1\" (UID: \"24e76718-a290-4dc2-a5eb-5a0c156ddf42\") " pod="openstack/ovsdbserver-sb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.400975 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97bef3ec-2ce6-4a13-bbc4-d08d97618e29-config\") pod \"ovsdbserver-nb-1\" (UID: \"97bef3ec-2ce6-4a13-bbc4-d08d97618e29\") " pod="openstack/ovsdbserver-nb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.400997 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e42bfb74-79c5-4710-8d55-33da1c765d8c-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"e42bfb74-79c5-4710-8d55-33da1c765d8c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.401014 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/63882070-14a9-44ab-9298-11a518e0ee25-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"63882070-14a9-44ab-9298-11a518e0ee25\") " pod="openstack/ovsdbserver-sb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.401037 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tlz67\" (UniqueName: \"kubernetes.io/projected/e42bfb74-79c5-4710-8d55-33da1c765d8c-kube-api-access-tlz67\") pod \"ovsdbserver-nb-0\" (UID: \"e42bfb74-79c5-4710-8d55-33da1c765d8c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.401066 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/63882070-14a9-44ab-9298-11a518e0ee25-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"63882070-14a9-44ab-9298-11a518e0ee25\") " pod="openstack/ovsdbserver-sb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.401086 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bafc2ad6-91da-4cd1-b4e3-4618db95cc11-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"bafc2ad6-91da-4cd1-b4e3-4618db95cc11\") " pod="openstack/ovsdbserver-nb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.401106 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/97bef3ec-2ce6-4a13-bbc4-d08d97618e29-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"97bef3ec-2ce6-4a13-bbc4-d08d97618e29\") " pod="openstack/ovsdbserver-nb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.401131 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e42bfb74-79c5-4710-8d55-33da1c765d8c-config\") pod \"ovsdbserver-nb-0\" (UID: \"e42bfb74-79c5-4710-8d55-33da1c765d8c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.401149 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e42bfb74-79c5-4710-8d55-33da1c765d8c-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"e42bfb74-79c5-4710-8d55-33da1c765d8c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.401815 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e42bfb74-79c5-4710-8d55-33da1c765d8c-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"e42bfb74-79c5-4710-8d55-33da1c765d8c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.401817 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/97bef3ec-2ce6-4a13-bbc4-d08d97618e29-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"97bef3ec-2ce6-4a13-bbc4-d08d97618e29\") " pod="openstack/ovsdbserver-nb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.402729 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bafc2ad6-91da-4cd1-b4e3-4618db95cc11-config\") pod \"ovsdbserver-nb-2\" (UID: \"bafc2ad6-91da-4cd1-b4e3-4618db95cc11\") " pod="openstack/ovsdbserver-nb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.402739 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/bafc2ad6-91da-4cd1-b4e3-4618db95cc11-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"bafc2ad6-91da-4cd1-b4e3-4618db95cc11\") " pod="openstack/ovsdbserver-nb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.402995 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e42bfb74-79c5-4710-8d55-33da1c765d8c-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"e42bfb74-79c5-4710-8d55-33da1c765d8c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.403167 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e42bfb74-79c5-4710-8d55-33da1c765d8c-config\") pod \"ovsdbserver-nb-0\" (UID: \"e42bfb74-79c5-4710-8d55-33da1c765d8c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.403556 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/97bef3ec-2ce6-4a13-bbc4-d08d97618e29-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"97bef3ec-2ce6-4a13-bbc4-d08d97618e29\") " pod="openstack/ovsdbserver-nb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.404493 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97bef3ec-2ce6-4a13-bbc4-d08d97618e29-config\") pod \"ovsdbserver-nb-1\" (UID: \"97bef3ec-2ce6-4a13-bbc4-d08d97618e29\") " pod="openstack/ovsdbserver-nb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.406983 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bafc2ad6-91da-4cd1-b4e3-4618db95cc11-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"bafc2ad6-91da-4cd1-b4e3-4618db95cc11\") " pod="openstack/ovsdbserver-nb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.411090 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97bef3ec-2ce6-4a13-bbc4-d08d97618e29-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"97bef3ec-2ce6-4a13-bbc4-d08d97618e29\") " pod="openstack/ovsdbserver-nb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.414016 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e42bfb74-79c5-4710-8d55-33da1c765d8c-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"e42bfb74-79c5-4710-8d55-33da1c765d8c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.416718 5050 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.416823 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-49d3fe87-dfed-4821-9f0c-9d9a76324976\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-49d3fe87-dfed-4821-9f0c-9d9a76324976\") pod \"ovsdbserver-nb-0\" (UID: \"e42bfb74-79c5-4710-8d55-33da1c765d8c\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/152ee254e21c09473662dc9358077d9aa9103d4b7ea1cfa3e3d360db221dd352/globalmount\"" pod="openstack/ovsdbserver-nb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.416902 5050 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.416943 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-44d7a7a6-a121-4578-8c4b-f327de39c9b8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-44d7a7a6-a121-4578-8c4b-f327de39c9b8\") pod \"ovsdbserver-nb-1\" (UID: \"97bef3ec-2ce6-4a13-bbc4-d08d97618e29\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/3f0627f1d3178baf9ea0971b117cb5a6a1f82daba7e6f080bf0bab5ec673df82/globalmount\"" pod="openstack/ovsdbserver-nb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.416953 5050 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.416965 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bafc2ad6-91da-4cd1-b4e3-4618db95cc11-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"bafc2ad6-91da-4cd1-b4e3-4618db95cc11\") " pod="openstack/ovsdbserver-nb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.416986 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-4e8aaea0-0f57-47bc-9967-89f394109631\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4e8aaea0-0f57-47bc-9967-89f394109631\") pod \"ovsdbserver-nb-2\" (UID: \"bafc2ad6-91da-4cd1-b4e3-4618db95cc11\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/e5effefdde6add088104dfae54058785029e4b01ad3eadb1c586bd6cb959aa88/globalmount\"" pod="openstack/ovsdbserver-nb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.418630 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbfbv\" (UniqueName: \"kubernetes.io/projected/97bef3ec-2ce6-4a13-bbc4-d08d97618e29-kube-api-access-sbfbv\") pod \"ovsdbserver-nb-1\" (UID: \"97bef3ec-2ce6-4a13-bbc4-d08d97618e29\") " pod="openstack/ovsdbserver-nb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.422371 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fc5wz\" (UniqueName: \"kubernetes.io/projected/bafc2ad6-91da-4cd1-b4e3-4618db95cc11-kube-api-access-fc5wz\") pod \"ovsdbserver-nb-2\" (UID: \"bafc2ad6-91da-4cd1-b4e3-4618db95cc11\") " pod="openstack/ovsdbserver-nb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.425483 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tlz67\" (UniqueName: \"kubernetes.io/projected/e42bfb74-79c5-4710-8d55-33da1c765d8c-kube-api-access-tlz67\") pod \"ovsdbserver-nb-0\" (UID: \"e42bfb74-79c5-4710-8d55-33da1c765d8c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.444490 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-4e8aaea0-0f57-47bc-9967-89f394109631\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4e8aaea0-0f57-47bc-9967-89f394109631\") pod \"ovsdbserver-nb-2\" (UID: \"bafc2ad6-91da-4cd1-b4e3-4618db95cc11\") " pod="openstack/ovsdbserver-nb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.445853 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-44d7a7a6-a121-4578-8c4b-f327de39c9b8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-44d7a7a6-a121-4578-8c4b-f327de39c9b8\") pod \"ovsdbserver-nb-1\" (UID: \"97bef3ec-2ce6-4a13-bbc4-d08d97618e29\") " pod="openstack/ovsdbserver-nb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.452620 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-49d3fe87-dfed-4821-9f0c-9d9a76324976\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-49d3fe87-dfed-4821-9f0c-9d9a76324976\") pod \"ovsdbserver-nb-0\" (UID: \"e42bfb74-79c5-4710-8d55-33da1c765d8c\") " pod="openstack/ovsdbserver-nb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.469997 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.480126 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.503274 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-6d14ed83-46c0-4f72-90ad-1cf06a06ce90\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6d14ed83-46c0-4f72-90ad-1cf06a06ce90\") pod \"ovsdbserver-sb-2\" (UID: \"5c40394e-5b93-4c3e-876f-d9cbb18b1bf2\") " pod="openstack/ovsdbserver-sb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.503347 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5c40394e-5b93-4c3e-876f-d9cbb18b1bf2-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"5c40394e-5b93-4c3e-876f-d9cbb18b1bf2\") " pod="openstack/ovsdbserver-sb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.503404 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/24e76718-a290-4dc2-a5eb-5a0c156ddf42-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"24e76718-a290-4dc2-a5eb-5a0c156ddf42\") " pod="openstack/ovsdbserver-sb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.503436 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63882070-14a9-44ab-9298-11a518e0ee25-config\") pod \"ovsdbserver-sb-0\" (UID: \"63882070-14a9-44ab-9298-11a518e0ee25\") " pod="openstack/ovsdbserver-sb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.503476 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63882070-14a9-44ab-9298-11a518e0ee25-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"63882070-14a9-44ab-9298-11a518e0ee25\") " pod="openstack/ovsdbserver-sb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.503498 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ntss7\" (UniqueName: \"kubernetes.io/projected/5c40394e-5b93-4c3e-876f-d9cbb18b1bf2-kube-api-access-ntss7\") pod \"ovsdbserver-sb-2\" (UID: \"5c40394e-5b93-4c3e-876f-d9cbb18b1bf2\") " pod="openstack/ovsdbserver-sb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.503523 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24e76718-a290-4dc2-a5eb-5a0c156ddf42-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"24e76718-a290-4dc2-a5eb-5a0c156ddf42\") " pod="openstack/ovsdbserver-sb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.503540 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzzxc\" (UniqueName: \"kubernetes.io/projected/24e76718-a290-4dc2-a5eb-5a0c156ddf42-kube-api-access-tzzxc\") pod \"ovsdbserver-sb-1\" (UID: \"24e76718-a290-4dc2-a5eb-5a0c156ddf42\") " pod="openstack/ovsdbserver-sb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.503561 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/63882070-14a9-44ab-9298-11a518e0ee25-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"63882070-14a9-44ab-9298-11a518e0ee25\") " pod="openstack/ovsdbserver-sb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.503580 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c40394e-5b93-4c3e-876f-d9cbb18b1bf2-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"5c40394e-5b93-4c3e-876f-d9cbb18b1bf2\") " pod="openstack/ovsdbserver-sb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.503599 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/63882070-14a9-44ab-9298-11a518e0ee25-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"63882070-14a9-44ab-9298-11a518e0ee25\") " pod="openstack/ovsdbserver-sb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.503625 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5c40394e-5b93-4c3e-876f-d9cbb18b1bf2-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"5c40394e-5b93-4c3e-876f-d9cbb18b1bf2\") " pod="openstack/ovsdbserver-sb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.503652 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-8597c042-3c74-4a1c-affb-baa50391380f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8597c042-3c74-4a1c-affb-baa50391380f\") pod \"ovsdbserver-sb-0\" (UID: \"63882070-14a9-44ab-9298-11a518e0ee25\") " pod="openstack/ovsdbserver-sb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.503677 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c40394e-5b93-4c3e-876f-d9cbb18b1bf2-config\") pod \"ovsdbserver-sb-2\" (UID: \"5c40394e-5b93-4c3e-876f-d9cbb18b1bf2\") " pod="openstack/ovsdbserver-sb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.503708 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-487xm\" (UniqueName: \"kubernetes.io/projected/63882070-14a9-44ab-9298-11a518e0ee25-kube-api-access-487xm\") pod \"ovsdbserver-sb-0\" (UID: \"63882070-14a9-44ab-9298-11a518e0ee25\") " pod="openstack/ovsdbserver-sb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.503734 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/24e76718-a290-4dc2-a5eb-5a0c156ddf42-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"24e76718-a290-4dc2-a5eb-5a0c156ddf42\") " pod="openstack/ovsdbserver-sb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.503768 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-7f44e105-163c-43c2-8465-6c2b7c10f3d4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7f44e105-163c-43c2-8465-6c2b7c10f3d4\") pod \"ovsdbserver-sb-1\" (UID: \"24e76718-a290-4dc2-a5eb-5a0c156ddf42\") " pod="openstack/ovsdbserver-sb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.503796 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24e76718-a290-4dc2-a5eb-5a0c156ddf42-config\") pod \"ovsdbserver-sb-1\" (UID: \"24e76718-a290-4dc2-a5eb-5a0c156ddf42\") " pod="openstack/ovsdbserver-sb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.504685 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24e76718-a290-4dc2-a5eb-5a0c156ddf42-config\") pod \"ovsdbserver-sb-1\" (UID: \"24e76718-a290-4dc2-a5eb-5a0c156ddf42\") " pod="openstack/ovsdbserver-sb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.505746 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/24e76718-a290-4dc2-a5eb-5a0c156ddf42-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"24e76718-a290-4dc2-a5eb-5a0c156ddf42\") " pod="openstack/ovsdbserver-sb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.506573 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63882070-14a9-44ab-9298-11a518e0ee25-config\") pod \"ovsdbserver-sb-0\" (UID: \"63882070-14a9-44ab-9298-11a518e0ee25\") " pod="openstack/ovsdbserver-sb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.507268 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/63882070-14a9-44ab-9298-11a518e0ee25-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"63882070-14a9-44ab-9298-11a518e0ee25\") " pod="openstack/ovsdbserver-sb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.507664 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/24e76718-a290-4dc2-a5eb-5a0c156ddf42-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"24e76718-a290-4dc2-a5eb-5a0c156ddf42\") " pod="openstack/ovsdbserver-sb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.511789 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/63882070-14a9-44ab-9298-11a518e0ee25-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"63882070-14a9-44ab-9298-11a518e0ee25\") " pod="openstack/ovsdbserver-sb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.512549 5050 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.512657 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-8597c042-3c74-4a1c-affb-baa50391380f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8597c042-3c74-4a1c-affb-baa50391380f\") pod \"ovsdbserver-sb-0\" (UID: \"63882070-14a9-44ab-9298-11a518e0ee25\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/de3d9c1c6fae8b79e260087a68339edf701e6d03444b8127b216852e6203bf24/globalmount\"" pod="openstack/ovsdbserver-sb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.513517 5050 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.513686 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-7f44e105-163c-43c2-8465-6c2b7c10f3d4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7f44e105-163c-43c2-8465-6c2b7c10f3d4\") pod \"ovsdbserver-sb-1\" (UID: \"24e76718-a290-4dc2-a5eb-5a0c156ddf42\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/834a8c2f90219a184fe693a78f2a7a137439045c81b5faebb00b41ab3c394feb/globalmount\"" pod="openstack/ovsdbserver-sb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.514661 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63882070-14a9-44ab-9298-11a518e0ee25-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"63882070-14a9-44ab-9298-11a518e0ee25\") " pod="openstack/ovsdbserver-sb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.526434 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24e76718-a290-4dc2-a5eb-5a0c156ddf42-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"24e76718-a290-4dc2-a5eb-5a0c156ddf42\") " pod="openstack/ovsdbserver-sb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.528572 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-487xm\" (UniqueName: \"kubernetes.io/projected/63882070-14a9-44ab-9298-11a518e0ee25-kube-api-access-487xm\") pod \"ovsdbserver-sb-0\" (UID: \"63882070-14a9-44ab-9298-11a518e0ee25\") " pod="openstack/ovsdbserver-sb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.529905 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzzxc\" (UniqueName: \"kubernetes.io/projected/24e76718-a290-4dc2-a5eb-5a0c156ddf42-kube-api-access-tzzxc\") pod \"ovsdbserver-sb-1\" (UID: \"24e76718-a290-4dc2-a5eb-5a0c156ddf42\") " pod="openstack/ovsdbserver-sb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.548053 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-8597c042-3c74-4a1c-affb-baa50391380f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8597c042-3c74-4a1c-affb-baa50391380f\") pod \"ovsdbserver-sb-0\" (UID: \"63882070-14a9-44ab-9298-11a518e0ee25\") " pod="openstack/ovsdbserver-sb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.548872 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-7f44e105-163c-43c2-8465-6c2b7c10f3d4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7f44e105-163c-43c2-8465-6c2b7c10f3d4\") pod \"ovsdbserver-sb-1\" (UID: \"24e76718-a290-4dc2-a5eb-5a0c156ddf42\") " pod="openstack/ovsdbserver-sb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.606143 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-6d14ed83-46c0-4f72-90ad-1cf06a06ce90\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6d14ed83-46c0-4f72-90ad-1cf06a06ce90\") pod \"ovsdbserver-sb-2\" (UID: \"5c40394e-5b93-4c3e-876f-d9cbb18b1bf2\") " pod="openstack/ovsdbserver-sb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.606219 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5c40394e-5b93-4c3e-876f-d9cbb18b1bf2-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"5c40394e-5b93-4c3e-876f-d9cbb18b1bf2\") " pod="openstack/ovsdbserver-sb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.606259 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ntss7\" (UniqueName: \"kubernetes.io/projected/5c40394e-5b93-4c3e-876f-d9cbb18b1bf2-kube-api-access-ntss7\") pod \"ovsdbserver-sb-2\" (UID: \"5c40394e-5b93-4c3e-876f-d9cbb18b1bf2\") " pod="openstack/ovsdbserver-sb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.606292 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c40394e-5b93-4c3e-876f-d9cbb18b1bf2-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"5c40394e-5b93-4c3e-876f-d9cbb18b1bf2\") " pod="openstack/ovsdbserver-sb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.606322 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5c40394e-5b93-4c3e-876f-d9cbb18b1bf2-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"5c40394e-5b93-4c3e-876f-d9cbb18b1bf2\") " pod="openstack/ovsdbserver-sb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.606347 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c40394e-5b93-4c3e-876f-d9cbb18b1bf2-config\") pod \"ovsdbserver-sb-2\" (UID: \"5c40394e-5b93-4c3e-876f-d9cbb18b1bf2\") " pod="openstack/ovsdbserver-sb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.608882 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5c40394e-5b93-4c3e-876f-d9cbb18b1bf2-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"5c40394e-5b93-4c3e-876f-d9cbb18b1bf2\") " pod="openstack/ovsdbserver-sb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.610581 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c40394e-5b93-4c3e-876f-d9cbb18b1bf2-config\") pod \"ovsdbserver-sb-2\" (UID: \"5c40394e-5b93-4c3e-876f-d9cbb18b1bf2\") " pod="openstack/ovsdbserver-sb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.611413 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5c40394e-5b93-4c3e-876f-d9cbb18b1bf2-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"5c40394e-5b93-4c3e-876f-d9cbb18b1bf2\") " pod="openstack/ovsdbserver-sb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.614925 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c40394e-5b93-4c3e-876f-d9cbb18b1bf2-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"5c40394e-5b93-4c3e-876f-d9cbb18b1bf2\") " pod="openstack/ovsdbserver-sb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.615144 5050 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.615195 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-6d14ed83-46c0-4f72-90ad-1cf06a06ce90\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6d14ed83-46c0-4f72-90ad-1cf06a06ce90\") pod \"ovsdbserver-sb-2\" (UID: \"5c40394e-5b93-4c3e-876f-d9cbb18b1bf2\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9c82fe88c55433d03fafc73d55f1d70be62c0864e7bbfb740156bc570fd6e04c/globalmount\"" pod="openstack/ovsdbserver-sb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.631184 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ntss7\" (UniqueName: \"kubernetes.io/projected/5c40394e-5b93-4c3e-876f-d9cbb18b1bf2-kube-api-access-ntss7\") pod \"ovsdbserver-sb-2\" (UID: \"5c40394e-5b93-4c3e-876f-d9cbb18b1bf2\") " pod="openstack/ovsdbserver-sb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.633053 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.655281 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-1" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.665456 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-6d14ed83-46c0-4f72-90ad-1cf06a06ce90\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6d14ed83-46c0-4f72-90ad-1cf06a06ce90\") pod \"ovsdbserver-sb-2\" (UID: \"5c40394e-5b93-4c3e-876f-d9cbb18b1bf2\") " pod="openstack/ovsdbserver-sb-2" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.755278 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 23 16:07:11 crc kubenswrapper[5050]: I1123 16:07:11.963880 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-2" Nov 23 16:07:12 crc kubenswrapper[5050]: I1123 16:07:12.065385 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-2"] Nov 23 16:07:12 crc kubenswrapper[5050]: I1123 16:07:12.192171 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 23 16:07:12 crc kubenswrapper[5050]: W1123 16:07:12.216184 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod63882070_14a9_44ab_9298_11a518e0ee25.slice/crio-95a81aa559cfb31b3c6471a7f2c74b74c4b98b3321c7c966cc143a2e07826941 WatchSource:0}: Error finding container 95a81aa559cfb31b3c6471a7f2c74b74c4b98b3321c7c966cc143a2e07826941: Status 404 returned error can't find the container with id 95a81aa559cfb31b3c6471a7f2c74b74c4b98b3321c7c966cc143a2e07826941 Nov 23 16:07:12 crc kubenswrapper[5050]: I1123 16:07:12.269065 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-1"] Nov 23 16:07:12 crc kubenswrapper[5050]: I1123 16:07:12.364288 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 23 16:07:12 crc kubenswrapper[5050]: W1123 16:07:12.403676 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode42bfb74_79c5_4710_8d55_33da1c765d8c.slice/crio-3494231d063c58e0bc661df676a4eff79fe8361a7b54b8793c95912f803a7950 WatchSource:0}: Error finding container 3494231d063c58e0bc661df676a4eff79fe8361a7b54b8793c95912f803a7950: Status 404 returned error can't find the container with id 3494231d063c58e0bc661df676a4eff79fe8361a7b54b8793c95912f803a7950 Nov 23 16:07:12 crc kubenswrapper[5050]: I1123 16:07:12.640917 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-2"] Nov 23 16:07:13 crc kubenswrapper[5050]: I1123 16:07:13.013300 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"24e76718-a290-4dc2-a5eb-5a0c156ddf42","Type":"ContainerStarted","Data":"9a163fc6b1134158da9fa3e5f6cdf5111afd79d6f74fdc8d21719e60d45d386a"} Nov 23 16:07:13 crc kubenswrapper[5050]: I1123 16:07:13.013722 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"24e76718-a290-4dc2-a5eb-5a0c156ddf42","Type":"ContainerStarted","Data":"49d63737ef55102224e9a9b4255f8aca4e0bf86d41c5619f0e51cef6941176dd"} Nov 23 16:07:13 crc kubenswrapper[5050]: I1123 16:07:13.013735 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"24e76718-a290-4dc2-a5eb-5a0c156ddf42","Type":"ContainerStarted","Data":"c84530aeb2e0b61ed927afdc87c89fc6eca8fdf64289d0f5f7ef1a3871f29397"} Nov 23 16:07:13 crc kubenswrapper[5050]: I1123 16:07:13.017709 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"bafc2ad6-91da-4cd1-b4e3-4618db95cc11","Type":"ContainerStarted","Data":"10b5e2c9a8bc53a27d68bfdf21ec7fb59d1d559f8e012b72f3ea132f9b27526d"} Nov 23 16:07:13 crc kubenswrapper[5050]: I1123 16:07:13.017778 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"bafc2ad6-91da-4cd1-b4e3-4618db95cc11","Type":"ContainerStarted","Data":"1f7e0648cf17f3ab8347b14048b884fb0c9833b1cea761b54b8053a1022ebe68"} Nov 23 16:07:13 crc kubenswrapper[5050]: I1123 16:07:13.017796 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"bafc2ad6-91da-4cd1-b4e3-4618db95cc11","Type":"ContainerStarted","Data":"56e327e4b2d8ce912e326fa9344475e1759bc0db4fad6dc151891b897abc3d5c"} Nov 23 16:07:13 crc kubenswrapper[5050]: I1123 16:07:13.020288 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"5c40394e-5b93-4c3e-876f-d9cbb18b1bf2","Type":"ContainerStarted","Data":"28f6a99b854096098a2fbd7bfda412383d4a4af9a89ac5f72fdb2277c8e76569"} Nov 23 16:07:13 crc kubenswrapper[5050]: I1123 16:07:13.020331 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"5c40394e-5b93-4c3e-876f-d9cbb18b1bf2","Type":"ContainerStarted","Data":"5363bcfc7d0c9c1be37d20cfc0fbec0de4ac7e34b6141f75319199b257f8f40d"} Nov 23 16:07:13 crc kubenswrapper[5050]: I1123 16:07:13.022874 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"63882070-14a9-44ab-9298-11a518e0ee25","Type":"ContainerStarted","Data":"4c6175cc6f9c519e0f86e70ea6e5eac14b5bee1ae5cd0a30f9876c53f55a5352"} Nov 23 16:07:13 crc kubenswrapper[5050]: I1123 16:07:13.022914 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"63882070-14a9-44ab-9298-11a518e0ee25","Type":"ContainerStarted","Data":"f9b729572616c187ccf27a40f1053640f5e5312ccffefa7d1c7370987ab8c226"} Nov 23 16:07:13 crc kubenswrapper[5050]: I1123 16:07:13.022928 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"63882070-14a9-44ab-9298-11a518e0ee25","Type":"ContainerStarted","Data":"95a81aa559cfb31b3c6471a7f2c74b74c4b98b3321c7c966cc143a2e07826941"} Nov 23 16:07:13 crc kubenswrapper[5050]: I1123 16:07:13.024403 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"e42bfb74-79c5-4710-8d55-33da1c765d8c","Type":"ContainerStarted","Data":"72121fbb66b74a4b65b6884df592b32379be225d95e2c4a73be3922f7798270e"} Nov 23 16:07:13 crc kubenswrapper[5050]: I1123 16:07:13.024543 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"e42bfb74-79c5-4710-8d55-33da1c765d8c","Type":"ContainerStarted","Data":"3494231d063c58e0bc661df676a4eff79fe8361a7b54b8793c95912f803a7950"} Nov 23 16:07:13 crc kubenswrapper[5050]: I1123 16:07:13.048120 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-1" podStartSLOduration=3.048099122 podStartE2EDuration="3.048099122s" podCreationTimestamp="2025-11-23 16:07:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:07:13.041370522 +0000 UTC m=+5128.208367007" watchObservedRunningTime="2025-11-23 16:07:13.048099122 +0000 UTC m=+5128.215095607" Nov 23 16:07:13 crc kubenswrapper[5050]: I1123 16:07:13.068234 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-2" podStartSLOduration=3.0682102 podStartE2EDuration="3.0682102s" podCreationTimestamp="2025-11-23 16:07:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:07:13.060009218 +0000 UTC m=+5128.227005713" watchObservedRunningTime="2025-11-23 16:07:13.0682102 +0000 UTC m=+5128.235206685" Nov 23 16:07:13 crc kubenswrapper[5050]: I1123 16:07:13.083661 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=3.083636855 podStartE2EDuration="3.083636855s" podCreationTimestamp="2025-11-23 16:07:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:07:13.082840893 +0000 UTC m=+5128.249837388" watchObservedRunningTime="2025-11-23 16:07:13.083636855 +0000 UTC m=+5128.250633340" Nov 23 16:07:13 crc kubenswrapper[5050]: I1123 16:07:13.164023 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-1"] Nov 23 16:07:13 crc kubenswrapper[5050]: W1123 16:07:13.167922 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod97bef3ec_2ce6_4a13_bbc4_d08d97618e29.slice/crio-a2dc1d927187a3e9f34133d169ee9d8ccebd89096add9fd580abf92c61e01de2 WatchSource:0}: Error finding container a2dc1d927187a3e9f34133d169ee9d8ccebd89096add9fd580abf92c61e01de2: Status 404 returned error can't find the container with id a2dc1d927187a3e9f34133d169ee9d8ccebd89096add9fd580abf92c61e01de2 Nov 23 16:07:14 crc kubenswrapper[5050]: I1123 16:07:14.039867 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"97bef3ec-2ce6-4a13-bbc4-d08d97618e29","Type":"ContainerStarted","Data":"eb6616eeee6e537c483b41d4ddbfb843d49af808e8f7411740313491e8309925"} Nov 23 16:07:14 crc kubenswrapper[5050]: I1123 16:07:14.040374 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"97bef3ec-2ce6-4a13-bbc4-d08d97618e29","Type":"ContainerStarted","Data":"466b5f8c306be918672e410ef9c3743bde4fb48c6f6831fa6f26f40d91e976f5"} Nov 23 16:07:14 crc kubenswrapper[5050]: I1123 16:07:14.040402 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"97bef3ec-2ce6-4a13-bbc4-d08d97618e29","Type":"ContainerStarted","Data":"a2dc1d927187a3e9f34133d169ee9d8ccebd89096add9fd580abf92c61e01de2"} Nov 23 16:07:14 crc kubenswrapper[5050]: I1123 16:07:14.044988 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"e42bfb74-79c5-4710-8d55-33da1c765d8c","Type":"ContainerStarted","Data":"d582db1e2c1ac8fc7798d9a90672c661dbaa34c3dca82b96840c5981fc7f4f29"} Nov 23 16:07:14 crc kubenswrapper[5050]: I1123 16:07:14.053240 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"5c40394e-5b93-4c3e-876f-d9cbb18b1bf2","Type":"ContainerStarted","Data":"c7e16b59560c3d1a5aedbbc8b67462156de0cbcf868dab277bf9f5000d6a7656"} Nov 23 16:07:14 crc kubenswrapper[5050]: I1123 16:07:14.072792 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-1" podStartSLOduration=4.072772339 podStartE2EDuration="4.072772339s" podCreationTimestamp="2025-11-23 16:07:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:07:14.065625437 +0000 UTC m=+5129.232621942" watchObservedRunningTime="2025-11-23 16:07:14.072772339 +0000 UTC m=+5129.239768824" Nov 23 16:07:14 crc kubenswrapper[5050]: I1123 16:07:14.095209 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=4.095187962 podStartE2EDuration="4.095187962s" podCreationTimestamp="2025-11-23 16:07:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:07:14.093182915 +0000 UTC m=+5129.260179440" watchObservedRunningTime="2025-11-23 16:07:14.095187962 +0000 UTC m=+5129.262184447" Nov 23 16:07:14 crc kubenswrapper[5050]: I1123 16:07:14.129149 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-2" podStartSLOduration=4.129113109 podStartE2EDuration="4.129113109s" podCreationTimestamp="2025-11-23 16:07:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:07:14.116412501 +0000 UTC m=+5129.283408996" watchObservedRunningTime="2025-11-23 16:07:14.129113109 +0000 UTC m=+5129.296109664" Nov 23 16:07:14 crc kubenswrapper[5050]: I1123 16:07:14.470283 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-2" Nov 23 16:07:14 crc kubenswrapper[5050]: I1123 16:07:14.480702 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-1" Nov 23 16:07:14 crc kubenswrapper[5050]: I1123 16:07:14.634550 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 23 16:07:14 crc kubenswrapper[5050]: I1123 16:07:14.655881 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-1" Nov 23 16:07:14 crc kubenswrapper[5050]: I1123 16:07:14.756543 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 23 16:07:14 crc kubenswrapper[5050]: I1123 16:07:14.964337 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-2" Nov 23 16:07:16 crc kubenswrapper[5050]: I1123 16:07:16.470896 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-2" Nov 23 16:07:16 crc kubenswrapper[5050]: I1123 16:07:16.480680 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-1" Nov 23 16:07:16 crc kubenswrapper[5050]: I1123 16:07:16.633957 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 23 16:07:16 crc kubenswrapper[5050]: I1123 16:07:16.655961 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-1" Nov 23 16:07:16 crc kubenswrapper[5050]: I1123 16:07:16.756851 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 23 16:07:16 crc kubenswrapper[5050]: I1123 16:07:16.964911 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-2" Nov 23 16:07:17 crc kubenswrapper[5050]: I1123 16:07:17.541889 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-1" Nov 23 16:07:17 crc kubenswrapper[5050]: I1123 16:07:17.547696 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-2" Nov 23 16:07:17 crc kubenswrapper[5050]: I1123 16:07:17.617757 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-2" Nov 23 16:07:17 crc kubenswrapper[5050]: I1123 16:07:17.702749 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 23 16:07:17 crc kubenswrapper[5050]: I1123 16:07:17.710874 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-1" Nov 23 16:07:17 crc kubenswrapper[5050]: I1123 16:07:17.770343 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-1" Nov 23 16:07:17 crc kubenswrapper[5050]: I1123 16:07:17.788909 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 23 16:07:17 crc kubenswrapper[5050]: I1123 16:07:17.821869 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 23 16:07:17 crc kubenswrapper[5050]: I1123 16:07:17.890486 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86446c66bf-bdq8n"] Nov 23 16:07:17 crc kubenswrapper[5050]: I1123 16:07:17.893025 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86446c66bf-bdq8n" Nov 23 16:07:17 crc kubenswrapper[5050]: I1123 16:07:17.900771 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 23 16:07:17 crc kubenswrapper[5050]: I1123 16:07:17.908999 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86446c66bf-bdq8n"] Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.034953 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-2" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.065614 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zmvl\" (UniqueName: \"kubernetes.io/projected/bad8b331-5365-4fec-ad69-914effd9d3f5-kube-api-access-7zmvl\") pod \"dnsmasq-dns-86446c66bf-bdq8n\" (UID: \"bad8b331-5365-4fec-ad69-914effd9d3f5\") " pod="openstack/dnsmasq-dns-86446c66bf-bdq8n" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.065675 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bad8b331-5365-4fec-ad69-914effd9d3f5-dns-svc\") pod \"dnsmasq-dns-86446c66bf-bdq8n\" (UID: \"bad8b331-5365-4fec-ad69-914effd9d3f5\") " pod="openstack/dnsmasq-dns-86446c66bf-bdq8n" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.065706 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bad8b331-5365-4fec-ad69-914effd9d3f5-config\") pod \"dnsmasq-dns-86446c66bf-bdq8n\" (UID: \"bad8b331-5365-4fec-ad69-914effd9d3f5\") " pod="openstack/dnsmasq-dns-86446c66bf-bdq8n" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.065757 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bad8b331-5365-4fec-ad69-914effd9d3f5-ovsdbserver-nb\") pod \"dnsmasq-dns-86446c66bf-bdq8n\" (UID: \"bad8b331-5365-4fec-ad69-914effd9d3f5\") " pod="openstack/dnsmasq-dns-86446c66bf-bdq8n" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.085074 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86446c66bf-bdq8n"] Nov 23 16:07:18 crc kubenswrapper[5050]: E1123 16:07:18.085857 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[config dns-svc kube-api-access-7zmvl ovsdbserver-nb], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-86446c66bf-bdq8n" podUID="bad8b331-5365-4fec-ad69-914effd9d3f5" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.099853 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86446c66bf-bdq8n" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.115578 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86446c66bf-bdq8n" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.148886 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7c9c679fc-b59c7"] Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.158754 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-1" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.161434 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-2" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.162822 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.164409 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.168490 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bad8b331-5365-4fec-ad69-914effd9d3f5-ovsdbserver-nb\") pod \"dnsmasq-dns-86446c66bf-bdq8n\" (UID: \"bad8b331-5365-4fec-ad69-914effd9d3f5\") " pod="openstack/dnsmasq-dns-86446c66bf-bdq8n" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.168764 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zmvl\" (UniqueName: \"kubernetes.io/projected/bad8b331-5365-4fec-ad69-914effd9d3f5-kube-api-access-7zmvl\") pod \"dnsmasq-dns-86446c66bf-bdq8n\" (UID: \"bad8b331-5365-4fec-ad69-914effd9d3f5\") " pod="openstack/dnsmasq-dns-86446c66bf-bdq8n" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.168839 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bad8b331-5365-4fec-ad69-914effd9d3f5-dns-svc\") pod \"dnsmasq-dns-86446c66bf-bdq8n\" (UID: \"bad8b331-5365-4fec-ad69-914effd9d3f5\") " pod="openstack/dnsmasq-dns-86446c66bf-bdq8n" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.168894 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bad8b331-5365-4fec-ad69-914effd9d3f5-config\") pod \"dnsmasq-dns-86446c66bf-bdq8n\" (UID: \"bad8b331-5365-4fec-ad69-914effd9d3f5\") " pod="openstack/dnsmasq-dns-86446c66bf-bdq8n" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.169813 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bad8b331-5365-4fec-ad69-914effd9d3f5-ovsdbserver-nb\") pod \"dnsmasq-dns-86446c66bf-bdq8n\" (UID: \"bad8b331-5365-4fec-ad69-914effd9d3f5\") " pod="openstack/dnsmasq-dns-86446c66bf-bdq8n" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.169962 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bad8b331-5365-4fec-ad69-914effd9d3f5-config\") pod \"dnsmasq-dns-86446c66bf-bdq8n\" (UID: \"bad8b331-5365-4fec-ad69-914effd9d3f5\") " pod="openstack/dnsmasq-dns-86446c66bf-bdq8n" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.170516 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bad8b331-5365-4fec-ad69-914effd9d3f5-dns-svc\") pod \"dnsmasq-dns-86446c66bf-bdq8n\" (UID: \"bad8b331-5365-4fec-ad69-914effd9d3f5\") " pod="openstack/dnsmasq-dns-86446c66bf-bdq8n" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.171634 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c9c679fc-b59c7"] Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.181856 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.198168 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zmvl\" (UniqueName: \"kubernetes.io/projected/bad8b331-5365-4fec-ad69-914effd9d3f5-kube-api-access-7zmvl\") pod \"dnsmasq-dns-86446c66bf-bdq8n\" (UID: \"bad8b331-5365-4fec-ad69-914effd9d3f5\") " pod="openstack/dnsmasq-dns-86446c66bf-bdq8n" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.271472 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-ovsdbserver-nb\") pod \"dnsmasq-dns-7c9c679fc-b59c7\" (UID: \"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd\") " pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.271653 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-dns-svc\") pod \"dnsmasq-dns-7c9c679fc-b59c7\" (UID: \"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd\") " pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.271767 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-ovsdbserver-sb\") pod \"dnsmasq-dns-7c9c679fc-b59c7\" (UID: \"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd\") " pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.271857 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-config\") pod \"dnsmasq-dns-7c9c679fc-b59c7\" (UID: \"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd\") " pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.271892 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2zldm\" (UniqueName: \"kubernetes.io/projected/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-kube-api-access-2zldm\") pod \"dnsmasq-dns-7c9c679fc-b59c7\" (UID: \"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd\") " pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.376253 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bad8b331-5365-4fec-ad69-914effd9d3f5-dns-svc\") pod \"bad8b331-5365-4fec-ad69-914effd9d3f5\" (UID: \"bad8b331-5365-4fec-ad69-914effd9d3f5\") " Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.376384 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bad8b331-5365-4fec-ad69-914effd9d3f5-config\") pod \"bad8b331-5365-4fec-ad69-914effd9d3f5\" (UID: \"bad8b331-5365-4fec-ad69-914effd9d3f5\") " Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.376603 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7zmvl\" (UniqueName: \"kubernetes.io/projected/bad8b331-5365-4fec-ad69-914effd9d3f5-kube-api-access-7zmvl\") pod \"bad8b331-5365-4fec-ad69-914effd9d3f5\" (UID: \"bad8b331-5365-4fec-ad69-914effd9d3f5\") " Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.376789 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bad8b331-5365-4fec-ad69-914effd9d3f5-ovsdbserver-nb\") pod \"bad8b331-5365-4fec-ad69-914effd9d3f5\" (UID: \"bad8b331-5365-4fec-ad69-914effd9d3f5\") " Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.376826 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bad8b331-5365-4fec-ad69-914effd9d3f5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bad8b331-5365-4fec-ad69-914effd9d3f5" (UID: "bad8b331-5365-4fec-ad69-914effd9d3f5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.377217 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bad8b331-5365-4fec-ad69-914effd9d3f5-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "bad8b331-5365-4fec-ad69-914effd9d3f5" (UID: "bad8b331-5365-4fec-ad69-914effd9d3f5"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.377553 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bad8b331-5365-4fec-ad69-914effd9d3f5-config" (OuterVolumeSpecName: "config") pod "bad8b331-5365-4fec-ad69-914effd9d3f5" (UID: "bad8b331-5365-4fec-ad69-914effd9d3f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.380957 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-dns-svc\") pod \"dnsmasq-dns-7c9c679fc-b59c7\" (UID: \"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd\") " pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.381062 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-ovsdbserver-sb\") pod \"dnsmasq-dns-7c9c679fc-b59c7\" (UID: \"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd\") " pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.381135 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-config\") pod \"dnsmasq-dns-7c9c679fc-b59c7\" (UID: \"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd\") " pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.381173 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2zldm\" (UniqueName: \"kubernetes.io/projected/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-kube-api-access-2zldm\") pod \"dnsmasq-dns-7c9c679fc-b59c7\" (UID: \"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd\") " pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.381507 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-ovsdbserver-nb\") pod \"dnsmasq-dns-7c9c679fc-b59c7\" (UID: \"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd\") " pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.381600 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bad8b331-5365-4fec-ad69-914effd9d3f5-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.381614 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bad8b331-5365-4fec-ad69-914effd9d3f5-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.381626 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bad8b331-5365-4fec-ad69-914effd9d3f5-config\") on node \"crc\" DevicePath \"\"" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.382551 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-ovsdbserver-nb\") pod \"dnsmasq-dns-7c9c679fc-b59c7\" (UID: \"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd\") " pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.382429 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-dns-svc\") pod \"dnsmasq-dns-7c9c679fc-b59c7\" (UID: \"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd\") " pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.382868 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-config\") pod \"dnsmasq-dns-7c9c679fc-b59c7\" (UID: \"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd\") " pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.382897 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-ovsdbserver-sb\") pod \"dnsmasq-dns-7c9c679fc-b59c7\" (UID: \"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd\") " pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.864305 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2zldm\" (UniqueName: \"kubernetes.io/projected/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-kube-api-access-2zldm\") pod \"dnsmasq-dns-7c9c679fc-b59c7\" (UID: \"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd\") " pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.865917 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bad8b331-5365-4fec-ad69-914effd9d3f5-kube-api-access-7zmvl" (OuterVolumeSpecName: "kube-api-access-7zmvl") pod "bad8b331-5365-4fec-ad69-914effd9d3f5" (UID: "bad8b331-5365-4fec-ad69-914effd9d3f5"). InnerVolumeSpecName "kube-api-access-7zmvl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:07:18 crc kubenswrapper[5050]: I1123 16:07:18.890017 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7zmvl\" (UniqueName: \"kubernetes.io/projected/bad8b331-5365-4fec-ad69-914effd9d3f5-kube-api-access-7zmvl\") on node \"crc\" DevicePath \"\"" Nov 23 16:07:19 crc kubenswrapper[5050]: I1123 16:07:19.103489 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86446c66bf-bdq8n" Nov 23 16:07:19 crc kubenswrapper[5050]: I1123 16:07:19.140229 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" Nov 23 16:07:19 crc kubenswrapper[5050]: I1123 16:07:19.163189 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86446c66bf-bdq8n"] Nov 23 16:07:19 crc kubenswrapper[5050]: I1123 16:07:19.171263 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86446c66bf-bdq8n"] Nov 23 16:07:19 crc kubenswrapper[5050]: I1123 16:07:19.441748 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c9c679fc-b59c7"] Nov 23 16:07:19 crc kubenswrapper[5050]: I1123 16:07:19.564272 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bad8b331-5365-4fec-ad69-914effd9d3f5" path="/var/lib/kubelet/pods/bad8b331-5365-4fec-ad69-914effd9d3f5/volumes" Nov 23 16:07:20 crc kubenswrapper[5050]: I1123 16:07:20.117303 5050 generic.go:334] "Generic (PLEG): container finished" podID="6087f6f0-60d6-4e97-bf79-c53bcb68c7cd" containerID="7a12a36cb5e094a99ccf3f2d057dbaded80340ff9f4bf98a7cd0236583d0a156" exitCode=0 Nov 23 16:07:20 crc kubenswrapper[5050]: I1123 16:07:20.117435 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" event={"ID":"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd","Type":"ContainerDied","Data":"7a12a36cb5e094a99ccf3f2d057dbaded80340ff9f4bf98a7cd0236583d0a156"} Nov 23 16:07:20 crc kubenswrapper[5050]: I1123 16:07:20.118003 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" event={"ID":"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd","Type":"ContainerStarted","Data":"c071eda3396adb4a5b016f0891fd35d576ac896502dc5cc11a9363bca33b8fe4"} Nov 23 16:07:20 crc kubenswrapper[5050]: I1123 16:07:20.549313 5050 scope.go:117] "RemoveContainer" containerID="3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" Nov 23 16:07:20 crc kubenswrapper[5050]: E1123 16:07:20.549895 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:07:21 crc kubenswrapper[5050]: I1123 16:07:21.134258 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" event={"ID":"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd","Type":"ContainerStarted","Data":"c78974c1b9e1482068cc20864fb7d5529b2859fbbe8ae659ce48b90e90212f4b"} Nov 23 16:07:21 crc kubenswrapper[5050]: I1123 16:07:21.135194 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" Nov 23 16:07:21 crc kubenswrapper[5050]: I1123 16:07:21.181096 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" podStartSLOduration=3.181022315 podStartE2EDuration="3.181022315s" podCreationTimestamp="2025-11-23 16:07:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:07:21.16383427 +0000 UTC m=+5136.330830765" watchObservedRunningTime="2025-11-23 16:07:21.181022315 +0000 UTC m=+5136.348018830" Nov 23 16:07:21 crc kubenswrapper[5050]: I1123 16:07:21.337651 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-copy-data"] Nov 23 16:07:21 crc kubenswrapper[5050]: I1123 16:07:21.339405 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Nov 23 16:07:21 crc kubenswrapper[5050]: I1123 16:07:21.342226 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovn-data-cert" Nov 23 16:07:21 crc kubenswrapper[5050]: I1123 16:07:21.355519 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-copy-data"] Nov 23 16:07:21 crc kubenswrapper[5050]: I1123 16:07:21.444121 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/2035442b-ccf1-4fb8-8918-4d459a27d992-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"2035442b-ccf1-4fb8-8918-4d459a27d992\") " pod="openstack/ovn-copy-data" Nov 23 16:07:21 crc kubenswrapper[5050]: I1123 16:07:21.444552 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvqnk\" (UniqueName: \"kubernetes.io/projected/2035442b-ccf1-4fb8-8918-4d459a27d992-kube-api-access-xvqnk\") pod \"ovn-copy-data\" (UID: \"2035442b-ccf1-4fb8-8918-4d459a27d992\") " pod="openstack/ovn-copy-data" Nov 23 16:07:21 crc kubenswrapper[5050]: I1123 16:07:21.444893 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-268ad718-4a78-4fc7-af74-caaddde51983\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-268ad718-4a78-4fc7-af74-caaddde51983\") pod \"ovn-copy-data\" (UID: \"2035442b-ccf1-4fb8-8918-4d459a27d992\") " pod="openstack/ovn-copy-data" Nov 23 16:07:21 crc kubenswrapper[5050]: I1123 16:07:21.546529 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-268ad718-4a78-4fc7-af74-caaddde51983\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-268ad718-4a78-4fc7-af74-caaddde51983\") pod \"ovn-copy-data\" (UID: \"2035442b-ccf1-4fb8-8918-4d459a27d992\") " pod="openstack/ovn-copy-data" Nov 23 16:07:21 crc kubenswrapper[5050]: I1123 16:07:21.546718 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/2035442b-ccf1-4fb8-8918-4d459a27d992-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"2035442b-ccf1-4fb8-8918-4d459a27d992\") " pod="openstack/ovn-copy-data" Nov 23 16:07:21 crc kubenswrapper[5050]: I1123 16:07:21.546849 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvqnk\" (UniqueName: \"kubernetes.io/projected/2035442b-ccf1-4fb8-8918-4d459a27d992-kube-api-access-xvqnk\") pod \"ovn-copy-data\" (UID: \"2035442b-ccf1-4fb8-8918-4d459a27d992\") " pod="openstack/ovn-copy-data" Nov 23 16:07:21 crc kubenswrapper[5050]: I1123 16:07:21.553826 5050 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 23 16:07:21 crc kubenswrapper[5050]: I1123 16:07:21.553880 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-268ad718-4a78-4fc7-af74-caaddde51983\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-268ad718-4a78-4fc7-af74-caaddde51983\") pod \"ovn-copy-data\" (UID: \"2035442b-ccf1-4fb8-8918-4d459a27d992\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/ee45c55c74da453aa52924075dcbabcff35fa71fd14a830d00af562a41e46643/globalmount\"" pod="openstack/ovn-copy-data" Nov 23 16:07:21 crc kubenswrapper[5050]: I1123 16:07:21.961482 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/2035442b-ccf1-4fb8-8918-4d459a27d992-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"2035442b-ccf1-4fb8-8918-4d459a27d992\") " pod="openstack/ovn-copy-data" Nov 23 16:07:21 crc kubenswrapper[5050]: I1123 16:07:21.962099 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvqnk\" (UniqueName: \"kubernetes.io/projected/2035442b-ccf1-4fb8-8918-4d459a27d992-kube-api-access-xvqnk\") pod \"ovn-copy-data\" (UID: \"2035442b-ccf1-4fb8-8918-4d459a27d992\") " pod="openstack/ovn-copy-data" Nov 23 16:07:22 crc kubenswrapper[5050]: I1123 16:07:22.211254 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-268ad718-4a78-4fc7-af74-caaddde51983\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-268ad718-4a78-4fc7-af74-caaddde51983\") pod \"ovn-copy-data\" (UID: \"2035442b-ccf1-4fb8-8918-4d459a27d992\") " pod="openstack/ovn-copy-data" Nov 23 16:07:22 crc kubenswrapper[5050]: I1123 16:07:22.283161 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Nov 23 16:07:22 crc kubenswrapper[5050]: I1123 16:07:22.876492 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-copy-data"] Nov 23 16:07:22 crc kubenswrapper[5050]: W1123 16:07:22.879920 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2035442b_ccf1_4fb8_8918_4d459a27d992.slice/crio-872753a0058ca30995d87778ef632ca5b4b2d9f3c98e4e82f1d7b8810573d8ab WatchSource:0}: Error finding container 872753a0058ca30995d87778ef632ca5b4b2d9f3c98e4e82f1d7b8810573d8ab: Status 404 returned error can't find the container with id 872753a0058ca30995d87778ef632ca5b4b2d9f3c98e4e82f1d7b8810573d8ab Nov 23 16:07:23 crc kubenswrapper[5050]: I1123 16:07:23.187600 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"2035442b-ccf1-4fb8-8918-4d459a27d992","Type":"ContainerStarted","Data":"aae2fc8f91b76af9d3e474837e2e9ef4c90c0c1452a1a95351ce279e160546a7"} Nov 23 16:07:23 crc kubenswrapper[5050]: I1123 16:07:23.187667 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"2035442b-ccf1-4fb8-8918-4d459a27d992","Type":"ContainerStarted","Data":"872753a0058ca30995d87778ef632ca5b4b2d9f3c98e4e82f1d7b8810573d8ab"} Nov 23 16:07:23 crc kubenswrapper[5050]: I1123 16:07:23.224258 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-copy-data" podStartSLOduration=3.224184764 podStartE2EDuration="3.224184764s" podCreationTimestamp="2025-11-23 16:07:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:07:23.209921362 +0000 UTC m=+5138.376917877" watchObservedRunningTime="2025-11-23 16:07:23.224184764 +0000 UTC m=+5138.391181279" Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.141845 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.295153 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-7gb49"] Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.295665 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5b7946d7b9-7gb49" podUID="c7e0522c-5cea-4c2a-b6eb-9e59a873c18f" containerName="dnsmasq-dns" containerID="cri-o://81455a62b04c7d108b8480a60c8c52d6f28c14ce24d91e92f93e2b25091a52fc" gracePeriod=10 Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.699955 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.702772 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.706999 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.707284 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.707662 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-mwmdc" Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.721379 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.824172 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab512517-dd97-4fc7-a923-4e2fc21a9af8-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"ab512517-dd97-4fc7-a923-4e2fc21a9af8\") " pod="openstack/ovn-northd-0" Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.824314 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab512517-dd97-4fc7-a923-4e2fc21a9af8-config\") pod \"ovn-northd-0\" (UID: \"ab512517-dd97-4fc7-a923-4e2fc21a9af8\") " pod="openstack/ovn-northd-0" Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.826023 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ab512517-dd97-4fc7-a923-4e2fc21a9af8-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"ab512517-dd97-4fc7-a923-4e2fc21a9af8\") " pod="openstack/ovn-northd-0" Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.826081 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7rp8\" (UniqueName: \"kubernetes.io/projected/ab512517-dd97-4fc7-a923-4e2fc21a9af8-kube-api-access-x7rp8\") pod \"ovn-northd-0\" (UID: \"ab512517-dd97-4fc7-a923-4e2fc21a9af8\") " pod="openstack/ovn-northd-0" Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.826101 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ab512517-dd97-4fc7-a923-4e2fc21a9af8-scripts\") pod \"ovn-northd-0\" (UID: \"ab512517-dd97-4fc7-a923-4e2fc21a9af8\") " pod="openstack/ovn-northd-0" Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.826934 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b7946d7b9-7gb49" Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.927885 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lcp2p\" (UniqueName: \"kubernetes.io/projected/c7e0522c-5cea-4c2a-b6eb-9e59a873c18f-kube-api-access-lcp2p\") pod \"c7e0522c-5cea-4c2a-b6eb-9e59a873c18f\" (UID: \"c7e0522c-5cea-4c2a-b6eb-9e59a873c18f\") " Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.928057 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c7e0522c-5cea-4c2a-b6eb-9e59a873c18f-dns-svc\") pod \"c7e0522c-5cea-4c2a-b6eb-9e59a873c18f\" (UID: \"c7e0522c-5cea-4c2a-b6eb-9e59a873c18f\") " Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.928157 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7e0522c-5cea-4c2a-b6eb-9e59a873c18f-config\") pod \"c7e0522c-5cea-4c2a-b6eb-9e59a873c18f\" (UID: \"c7e0522c-5cea-4c2a-b6eb-9e59a873c18f\") " Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.928558 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab512517-dd97-4fc7-a923-4e2fc21a9af8-config\") pod \"ovn-northd-0\" (UID: \"ab512517-dd97-4fc7-a923-4e2fc21a9af8\") " pod="openstack/ovn-northd-0" Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.928854 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ab512517-dd97-4fc7-a923-4e2fc21a9af8-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"ab512517-dd97-4fc7-a923-4e2fc21a9af8\") " pod="openstack/ovn-northd-0" Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.928902 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7rp8\" (UniqueName: \"kubernetes.io/projected/ab512517-dd97-4fc7-a923-4e2fc21a9af8-kube-api-access-x7rp8\") pod \"ovn-northd-0\" (UID: \"ab512517-dd97-4fc7-a923-4e2fc21a9af8\") " pod="openstack/ovn-northd-0" Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.928942 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ab512517-dd97-4fc7-a923-4e2fc21a9af8-scripts\") pod \"ovn-northd-0\" (UID: \"ab512517-dd97-4fc7-a923-4e2fc21a9af8\") " pod="openstack/ovn-northd-0" Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.929092 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab512517-dd97-4fc7-a923-4e2fc21a9af8-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"ab512517-dd97-4fc7-a923-4e2fc21a9af8\") " pod="openstack/ovn-northd-0" Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.929741 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ab512517-dd97-4fc7-a923-4e2fc21a9af8-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"ab512517-dd97-4fc7-a923-4e2fc21a9af8\") " pod="openstack/ovn-northd-0" Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.930313 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ab512517-dd97-4fc7-a923-4e2fc21a9af8-scripts\") pod \"ovn-northd-0\" (UID: \"ab512517-dd97-4fc7-a923-4e2fc21a9af8\") " pod="openstack/ovn-northd-0" Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.931060 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab512517-dd97-4fc7-a923-4e2fc21a9af8-config\") pod \"ovn-northd-0\" (UID: \"ab512517-dd97-4fc7-a923-4e2fc21a9af8\") " pod="openstack/ovn-northd-0" Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.937847 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7e0522c-5cea-4c2a-b6eb-9e59a873c18f-kube-api-access-lcp2p" (OuterVolumeSpecName: "kube-api-access-lcp2p") pod "c7e0522c-5cea-4c2a-b6eb-9e59a873c18f" (UID: "c7e0522c-5cea-4c2a-b6eb-9e59a873c18f"). InnerVolumeSpecName "kube-api-access-lcp2p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.950668 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7rp8\" (UniqueName: \"kubernetes.io/projected/ab512517-dd97-4fc7-a923-4e2fc21a9af8-kube-api-access-x7rp8\") pod \"ovn-northd-0\" (UID: \"ab512517-dd97-4fc7-a923-4e2fc21a9af8\") " pod="openstack/ovn-northd-0" Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.950988 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab512517-dd97-4fc7-a923-4e2fc21a9af8-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"ab512517-dd97-4fc7-a923-4e2fc21a9af8\") " pod="openstack/ovn-northd-0" Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.973796 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7e0522c-5cea-4c2a-b6eb-9e59a873c18f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c7e0522c-5cea-4c2a-b6eb-9e59a873c18f" (UID: "c7e0522c-5cea-4c2a-b6eb-9e59a873c18f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:07:29 crc kubenswrapper[5050]: I1123 16:07:29.985041 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7e0522c-5cea-4c2a-b6eb-9e59a873c18f-config" (OuterVolumeSpecName: "config") pod "c7e0522c-5cea-4c2a-b6eb-9e59a873c18f" (UID: "c7e0522c-5cea-4c2a-b6eb-9e59a873c18f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:07:30 crc kubenswrapper[5050]: I1123 16:07:30.031145 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c7e0522c-5cea-4c2a-b6eb-9e59a873c18f-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 16:07:30 crc kubenswrapper[5050]: I1123 16:07:30.031186 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7e0522c-5cea-4c2a-b6eb-9e59a873c18f-config\") on node \"crc\" DevicePath \"\"" Nov 23 16:07:30 crc kubenswrapper[5050]: I1123 16:07:30.031199 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lcp2p\" (UniqueName: \"kubernetes.io/projected/c7e0522c-5cea-4c2a-b6eb-9e59a873c18f-kube-api-access-lcp2p\") on node \"crc\" DevicePath \"\"" Nov 23 16:07:30 crc kubenswrapper[5050]: I1123 16:07:30.034259 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 23 16:07:30 crc kubenswrapper[5050]: I1123 16:07:30.286105 5050 generic.go:334] "Generic (PLEG): container finished" podID="c7e0522c-5cea-4c2a-b6eb-9e59a873c18f" containerID="81455a62b04c7d108b8480a60c8c52d6f28c14ce24d91e92f93e2b25091a52fc" exitCode=0 Nov 23 16:07:30 crc kubenswrapper[5050]: I1123 16:07:30.286191 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b7946d7b9-7gb49" Nov 23 16:07:30 crc kubenswrapper[5050]: I1123 16:07:30.286217 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-7gb49" event={"ID":"c7e0522c-5cea-4c2a-b6eb-9e59a873c18f","Type":"ContainerDied","Data":"81455a62b04c7d108b8480a60c8c52d6f28c14ce24d91e92f93e2b25091a52fc"} Nov 23 16:07:30 crc kubenswrapper[5050]: I1123 16:07:30.286890 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-7gb49" event={"ID":"c7e0522c-5cea-4c2a-b6eb-9e59a873c18f","Type":"ContainerDied","Data":"63b3723bfe87d99bd72cc676804c067369f8f4d5db2757d88b5a02978f72e803"} Nov 23 16:07:30 crc kubenswrapper[5050]: I1123 16:07:30.286939 5050 scope.go:117] "RemoveContainer" containerID="81455a62b04c7d108b8480a60c8c52d6f28c14ce24d91e92f93e2b25091a52fc" Nov 23 16:07:30 crc kubenswrapper[5050]: I1123 16:07:30.337652 5050 scope.go:117] "RemoveContainer" containerID="68b6acdc53f59c581882f49b239119e0ba7ea72f0083b0ad3cc9808876190547" Nov 23 16:07:30 crc kubenswrapper[5050]: I1123 16:07:30.338085 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-7gb49"] Nov 23 16:07:30 crc kubenswrapper[5050]: I1123 16:07:30.344304 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-7gb49"] Nov 23 16:07:30 crc kubenswrapper[5050]: I1123 16:07:30.367983 5050 scope.go:117] "RemoveContainer" containerID="81455a62b04c7d108b8480a60c8c52d6f28c14ce24d91e92f93e2b25091a52fc" Nov 23 16:07:30 crc kubenswrapper[5050]: E1123 16:07:30.368847 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"81455a62b04c7d108b8480a60c8c52d6f28c14ce24d91e92f93e2b25091a52fc\": container with ID starting with 81455a62b04c7d108b8480a60c8c52d6f28c14ce24d91e92f93e2b25091a52fc not found: ID does not exist" containerID="81455a62b04c7d108b8480a60c8c52d6f28c14ce24d91e92f93e2b25091a52fc" Nov 23 16:07:30 crc kubenswrapper[5050]: I1123 16:07:30.368910 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81455a62b04c7d108b8480a60c8c52d6f28c14ce24d91e92f93e2b25091a52fc"} err="failed to get container status \"81455a62b04c7d108b8480a60c8c52d6f28c14ce24d91e92f93e2b25091a52fc\": rpc error: code = NotFound desc = could not find container \"81455a62b04c7d108b8480a60c8c52d6f28c14ce24d91e92f93e2b25091a52fc\": container with ID starting with 81455a62b04c7d108b8480a60c8c52d6f28c14ce24d91e92f93e2b25091a52fc not found: ID does not exist" Nov 23 16:07:30 crc kubenswrapper[5050]: I1123 16:07:30.368943 5050 scope.go:117] "RemoveContainer" containerID="68b6acdc53f59c581882f49b239119e0ba7ea72f0083b0ad3cc9808876190547" Nov 23 16:07:30 crc kubenswrapper[5050]: E1123 16:07:30.369513 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"68b6acdc53f59c581882f49b239119e0ba7ea72f0083b0ad3cc9808876190547\": container with ID starting with 68b6acdc53f59c581882f49b239119e0ba7ea72f0083b0ad3cc9808876190547 not found: ID does not exist" containerID="68b6acdc53f59c581882f49b239119e0ba7ea72f0083b0ad3cc9808876190547" Nov 23 16:07:30 crc kubenswrapper[5050]: I1123 16:07:30.369558 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68b6acdc53f59c581882f49b239119e0ba7ea72f0083b0ad3cc9808876190547"} err="failed to get container status \"68b6acdc53f59c581882f49b239119e0ba7ea72f0083b0ad3cc9808876190547\": rpc error: code = NotFound desc = could not find container \"68b6acdc53f59c581882f49b239119e0ba7ea72f0083b0ad3cc9808876190547\": container with ID starting with 68b6acdc53f59c581882f49b239119e0ba7ea72f0083b0ad3cc9808876190547 not found: ID does not exist" Nov 23 16:07:30 crc kubenswrapper[5050]: I1123 16:07:30.503500 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 23 16:07:31 crc kubenswrapper[5050]: I1123 16:07:31.301245 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"ab512517-dd97-4fc7-a923-4e2fc21a9af8","Type":"ContainerStarted","Data":"18e7781ac355be7c7db4dd59a1d56c0dcbda7eb79a6223c3f94bd257eb446514"} Nov 23 16:07:31 crc kubenswrapper[5050]: I1123 16:07:31.303673 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 23 16:07:31 crc kubenswrapper[5050]: I1123 16:07:31.303820 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"ab512517-dd97-4fc7-a923-4e2fc21a9af8","Type":"ContainerStarted","Data":"58834e2157d9a9ad1b60d2ba396bf3b68915d9541d9d99f659d4667fb6321edf"} Nov 23 16:07:31 crc kubenswrapper[5050]: I1123 16:07:31.304091 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"ab512517-dd97-4fc7-a923-4e2fc21a9af8","Type":"ContainerStarted","Data":"af707bd34598b34cbb7c2d2f1392ee07fe6a031c048fc59221173c0c371d0f74"} Nov 23 16:07:31 crc kubenswrapper[5050]: I1123 16:07:31.334992 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.33495875 podStartE2EDuration="2.33495875s" podCreationTimestamp="2025-11-23 16:07:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:07:31.333993863 +0000 UTC m=+5146.500990398" watchObservedRunningTime="2025-11-23 16:07:31.33495875 +0000 UTC m=+5146.501955275" Nov 23 16:07:31 crc kubenswrapper[5050]: I1123 16:07:31.564656 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7e0522c-5cea-4c2a-b6eb-9e59a873c18f" path="/var/lib/kubelet/pods/c7e0522c-5cea-4c2a-b6eb-9e59a873c18f/volumes" Nov 23 16:07:33 crc kubenswrapper[5050]: I1123 16:07:33.550129 5050 scope.go:117] "RemoveContainer" containerID="3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" Nov 23 16:07:33 crc kubenswrapper[5050]: E1123 16:07:33.551439 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:07:34 crc kubenswrapper[5050]: I1123 16:07:34.760724 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5b7946d7b9-7gb49" podUID="c7e0522c-5cea-4c2a-b6eb-9e59a873c18f" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.245:5353: i/o timeout" Nov 23 16:07:35 crc kubenswrapper[5050]: I1123 16:07:35.754194 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-vgsft"] Nov 23 16:07:35 crc kubenswrapper[5050]: E1123 16:07:35.755018 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7e0522c-5cea-4c2a-b6eb-9e59a873c18f" containerName="dnsmasq-dns" Nov 23 16:07:35 crc kubenswrapper[5050]: I1123 16:07:35.755037 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7e0522c-5cea-4c2a-b6eb-9e59a873c18f" containerName="dnsmasq-dns" Nov 23 16:07:35 crc kubenswrapper[5050]: E1123 16:07:35.755067 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7e0522c-5cea-4c2a-b6eb-9e59a873c18f" containerName="init" Nov 23 16:07:35 crc kubenswrapper[5050]: I1123 16:07:35.755074 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7e0522c-5cea-4c2a-b6eb-9e59a873c18f" containerName="init" Nov 23 16:07:35 crc kubenswrapper[5050]: I1123 16:07:35.755236 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7e0522c-5cea-4c2a-b6eb-9e59a873c18f" containerName="dnsmasq-dns" Nov 23 16:07:35 crc kubenswrapper[5050]: I1123 16:07:35.755857 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-vgsft" Nov 23 16:07:35 crc kubenswrapper[5050]: I1123 16:07:35.769534 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-vgsft"] Nov 23 16:07:35 crc kubenswrapper[5050]: I1123 16:07:35.857582 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5b9xn\" (UniqueName: \"kubernetes.io/projected/8a88f6a6-0d39-4175-aa16-62d64b8ca2d7-kube-api-access-5b9xn\") pod \"keystone-db-create-vgsft\" (UID: \"8a88f6a6-0d39-4175-aa16-62d64b8ca2d7\") " pod="openstack/keystone-db-create-vgsft" Nov 23 16:07:35 crc kubenswrapper[5050]: I1123 16:07:35.857733 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a88f6a6-0d39-4175-aa16-62d64b8ca2d7-operator-scripts\") pod \"keystone-db-create-vgsft\" (UID: \"8a88f6a6-0d39-4175-aa16-62d64b8ca2d7\") " pod="openstack/keystone-db-create-vgsft" Nov 23 16:07:35 crc kubenswrapper[5050]: I1123 16:07:35.870589 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-8bb4-account-create-hgrkr"] Nov 23 16:07:35 crc kubenswrapper[5050]: I1123 16:07:35.876831 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8bb4-account-create-hgrkr" Nov 23 16:07:35 crc kubenswrapper[5050]: I1123 16:07:35.885105 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 23 16:07:35 crc kubenswrapper[5050]: I1123 16:07:35.895883 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-8bb4-account-create-hgrkr"] Nov 23 16:07:35 crc kubenswrapper[5050]: I1123 16:07:35.961632 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a88f6a6-0d39-4175-aa16-62d64b8ca2d7-operator-scripts\") pod \"keystone-db-create-vgsft\" (UID: \"8a88f6a6-0d39-4175-aa16-62d64b8ca2d7\") " pod="openstack/keystone-db-create-vgsft" Nov 23 16:07:35 crc kubenswrapper[5050]: I1123 16:07:35.961707 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f261955d-669f-47e5-9a2d-9c7971293dc9-operator-scripts\") pod \"keystone-8bb4-account-create-hgrkr\" (UID: \"f261955d-669f-47e5-9a2d-9c7971293dc9\") " pod="openstack/keystone-8bb4-account-create-hgrkr" Nov 23 16:07:35 crc kubenswrapper[5050]: I1123 16:07:35.961749 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5b9xn\" (UniqueName: \"kubernetes.io/projected/8a88f6a6-0d39-4175-aa16-62d64b8ca2d7-kube-api-access-5b9xn\") pod \"keystone-db-create-vgsft\" (UID: \"8a88f6a6-0d39-4175-aa16-62d64b8ca2d7\") " pod="openstack/keystone-db-create-vgsft" Nov 23 16:07:35 crc kubenswrapper[5050]: I1123 16:07:35.961980 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n72p8\" (UniqueName: \"kubernetes.io/projected/f261955d-669f-47e5-9a2d-9c7971293dc9-kube-api-access-n72p8\") pod \"keystone-8bb4-account-create-hgrkr\" (UID: \"f261955d-669f-47e5-9a2d-9c7971293dc9\") " pod="openstack/keystone-8bb4-account-create-hgrkr" Nov 23 16:07:35 crc kubenswrapper[5050]: I1123 16:07:35.962792 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a88f6a6-0d39-4175-aa16-62d64b8ca2d7-operator-scripts\") pod \"keystone-db-create-vgsft\" (UID: \"8a88f6a6-0d39-4175-aa16-62d64b8ca2d7\") " pod="openstack/keystone-db-create-vgsft" Nov 23 16:07:36 crc kubenswrapper[5050]: I1123 16:07:36.005682 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5b9xn\" (UniqueName: \"kubernetes.io/projected/8a88f6a6-0d39-4175-aa16-62d64b8ca2d7-kube-api-access-5b9xn\") pod \"keystone-db-create-vgsft\" (UID: \"8a88f6a6-0d39-4175-aa16-62d64b8ca2d7\") " pod="openstack/keystone-db-create-vgsft" Nov 23 16:07:36 crc kubenswrapper[5050]: I1123 16:07:36.063921 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f261955d-669f-47e5-9a2d-9c7971293dc9-operator-scripts\") pod \"keystone-8bb4-account-create-hgrkr\" (UID: \"f261955d-669f-47e5-9a2d-9c7971293dc9\") " pod="openstack/keystone-8bb4-account-create-hgrkr" Nov 23 16:07:36 crc kubenswrapper[5050]: I1123 16:07:36.064564 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n72p8\" (UniqueName: \"kubernetes.io/projected/f261955d-669f-47e5-9a2d-9c7971293dc9-kube-api-access-n72p8\") pod \"keystone-8bb4-account-create-hgrkr\" (UID: \"f261955d-669f-47e5-9a2d-9c7971293dc9\") " pod="openstack/keystone-8bb4-account-create-hgrkr" Nov 23 16:07:36 crc kubenswrapper[5050]: I1123 16:07:36.064945 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f261955d-669f-47e5-9a2d-9c7971293dc9-operator-scripts\") pod \"keystone-8bb4-account-create-hgrkr\" (UID: \"f261955d-669f-47e5-9a2d-9c7971293dc9\") " pod="openstack/keystone-8bb4-account-create-hgrkr" Nov 23 16:07:36 crc kubenswrapper[5050]: I1123 16:07:36.083824 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n72p8\" (UniqueName: \"kubernetes.io/projected/f261955d-669f-47e5-9a2d-9c7971293dc9-kube-api-access-n72p8\") pod \"keystone-8bb4-account-create-hgrkr\" (UID: \"f261955d-669f-47e5-9a2d-9c7971293dc9\") " pod="openstack/keystone-8bb4-account-create-hgrkr" Nov 23 16:07:36 crc kubenswrapper[5050]: I1123 16:07:36.096513 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-vgsft" Nov 23 16:07:36 crc kubenswrapper[5050]: I1123 16:07:36.200194 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8bb4-account-create-hgrkr" Nov 23 16:07:36 crc kubenswrapper[5050]: I1123 16:07:36.581490 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-vgsft"] Nov 23 16:07:36 crc kubenswrapper[5050]: I1123 16:07:36.703369 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-8bb4-account-create-hgrkr"] Nov 23 16:07:37 crc kubenswrapper[5050]: W1123 16:07:37.071547 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8a88f6a6_0d39_4175_aa16_62d64b8ca2d7.slice/crio-210ee66e9ea10cfae6de9ca885e333bd622d5ffecc5e5dd8710bc14be0dc0117 WatchSource:0}: Error finding container 210ee66e9ea10cfae6de9ca885e333bd622d5ffecc5e5dd8710bc14be0dc0117: Status 404 returned error can't find the container with id 210ee66e9ea10cfae6de9ca885e333bd622d5ffecc5e5dd8710bc14be0dc0117 Nov 23 16:07:37 crc kubenswrapper[5050]: W1123 16:07:37.074851 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf261955d_669f_47e5_9a2d_9c7971293dc9.slice/crio-67c204dfc5ef1e506a153759b446b332abaa70a6e86148e75ca925d6a9c86116 WatchSource:0}: Error finding container 67c204dfc5ef1e506a153759b446b332abaa70a6e86148e75ca925d6a9c86116: Status 404 returned error can't find the container with id 67c204dfc5ef1e506a153759b446b332abaa70a6e86148e75ca925d6a9c86116 Nov 23 16:07:37 crc kubenswrapper[5050]: I1123 16:07:37.390702 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-vgsft" event={"ID":"8a88f6a6-0d39-4175-aa16-62d64b8ca2d7","Type":"ContainerStarted","Data":"210ee66e9ea10cfae6de9ca885e333bd622d5ffecc5e5dd8710bc14be0dc0117"} Nov 23 16:07:37 crc kubenswrapper[5050]: I1123 16:07:37.393437 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-8bb4-account-create-hgrkr" event={"ID":"f261955d-669f-47e5-9a2d-9c7971293dc9","Type":"ContainerStarted","Data":"ca200ab6a5fe18c0f0660b43a31ccea3c0d72af2ab3902ebee55b9215300203b"} Nov 23 16:07:37 crc kubenswrapper[5050]: I1123 16:07:37.393528 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-8bb4-account-create-hgrkr" event={"ID":"f261955d-669f-47e5-9a2d-9c7971293dc9","Type":"ContainerStarted","Data":"67c204dfc5ef1e506a153759b446b332abaa70a6e86148e75ca925d6a9c86116"} Nov 23 16:07:37 crc kubenswrapper[5050]: I1123 16:07:37.424990 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-8bb4-account-create-hgrkr" podStartSLOduration=2.424953832 podStartE2EDuration="2.424953832s" podCreationTimestamp="2025-11-23 16:07:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:07:37.418627133 +0000 UTC m=+5152.585623668" watchObservedRunningTime="2025-11-23 16:07:37.424953832 +0000 UTC m=+5152.591950347" Nov 23 16:07:37 crc kubenswrapper[5050]: E1123 16:07:37.743070 5050 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf261955d_669f_47e5_9a2d_9c7971293dc9.slice/crio-conmon-ca200ab6a5fe18c0f0660b43a31ccea3c0d72af2ab3902ebee55b9215300203b.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf261955d_669f_47e5_9a2d_9c7971293dc9.slice/crio-ca200ab6a5fe18c0f0660b43a31ccea3c0d72af2ab3902ebee55b9215300203b.scope\": RecentStats: unable to find data in memory cache]" Nov 23 16:07:38 crc kubenswrapper[5050]: I1123 16:07:38.411292 5050 generic.go:334] "Generic (PLEG): container finished" podID="f261955d-669f-47e5-9a2d-9c7971293dc9" containerID="ca200ab6a5fe18c0f0660b43a31ccea3c0d72af2ab3902ebee55b9215300203b" exitCode=0 Nov 23 16:07:38 crc kubenswrapper[5050]: I1123 16:07:38.411937 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-8bb4-account-create-hgrkr" event={"ID":"f261955d-669f-47e5-9a2d-9c7971293dc9","Type":"ContainerDied","Data":"ca200ab6a5fe18c0f0660b43a31ccea3c0d72af2ab3902ebee55b9215300203b"} Nov 23 16:07:38 crc kubenswrapper[5050]: I1123 16:07:38.418813 5050 generic.go:334] "Generic (PLEG): container finished" podID="8a88f6a6-0d39-4175-aa16-62d64b8ca2d7" containerID="82813006167f2355ac50e44e9399807f591cc7e00b9e6cb62a096324a162d1a4" exitCode=0 Nov 23 16:07:38 crc kubenswrapper[5050]: I1123 16:07:38.418902 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-vgsft" event={"ID":"8a88f6a6-0d39-4175-aa16-62d64b8ca2d7","Type":"ContainerDied","Data":"82813006167f2355ac50e44e9399807f591cc7e00b9e6cb62a096324a162d1a4"} Nov 23 16:07:40 crc kubenswrapper[5050]: I1123 16:07:40.003075 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8bb4-account-create-hgrkr" Nov 23 16:07:40 crc kubenswrapper[5050]: I1123 16:07:40.014676 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-vgsft" Nov 23 16:07:40 crc kubenswrapper[5050]: I1123 16:07:40.058916 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5b9xn\" (UniqueName: \"kubernetes.io/projected/8a88f6a6-0d39-4175-aa16-62d64b8ca2d7-kube-api-access-5b9xn\") pod \"8a88f6a6-0d39-4175-aa16-62d64b8ca2d7\" (UID: \"8a88f6a6-0d39-4175-aa16-62d64b8ca2d7\") " Nov 23 16:07:40 crc kubenswrapper[5050]: I1123 16:07:40.059383 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f261955d-669f-47e5-9a2d-9c7971293dc9-operator-scripts\") pod \"f261955d-669f-47e5-9a2d-9c7971293dc9\" (UID: \"f261955d-669f-47e5-9a2d-9c7971293dc9\") " Nov 23 16:07:40 crc kubenswrapper[5050]: I1123 16:07:40.059687 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n72p8\" (UniqueName: \"kubernetes.io/projected/f261955d-669f-47e5-9a2d-9c7971293dc9-kube-api-access-n72p8\") pod \"f261955d-669f-47e5-9a2d-9c7971293dc9\" (UID: \"f261955d-669f-47e5-9a2d-9c7971293dc9\") " Nov 23 16:07:40 crc kubenswrapper[5050]: I1123 16:07:40.059783 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a88f6a6-0d39-4175-aa16-62d64b8ca2d7-operator-scripts\") pod \"8a88f6a6-0d39-4175-aa16-62d64b8ca2d7\" (UID: \"8a88f6a6-0d39-4175-aa16-62d64b8ca2d7\") " Nov 23 16:07:40 crc kubenswrapper[5050]: I1123 16:07:40.060088 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f261955d-669f-47e5-9a2d-9c7971293dc9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f261955d-669f-47e5-9a2d-9c7971293dc9" (UID: "f261955d-669f-47e5-9a2d-9c7971293dc9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:07:40 crc kubenswrapper[5050]: I1123 16:07:40.060327 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f261955d-669f-47e5-9a2d-9c7971293dc9-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:07:40 crc kubenswrapper[5050]: I1123 16:07:40.060792 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a88f6a6-0d39-4175-aa16-62d64b8ca2d7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8a88f6a6-0d39-4175-aa16-62d64b8ca2d7" (UID: "8a88f6a6-0d39-4175-aa16-62d64b8ca2d7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:07:40 crc kubenswrapper[5050]: I1123 16:07:40.069287 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f261955d-669f-47e5-9a2d-9c7971293dc9-kube-api-access-n72p8" (OuterVolumeSpecName: "kube-api-access-n72p8") pod "f261955d-669f-47e5-9a2d-9c7971293dc9" (UID: "f261955d-669f-47e5-9a2d-9c7971293dc9"). InnerVolumeSpecName "kube-api-access-n72p8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:07:40 crc kubenswrapper[5050]: I1123 16:07:40.069494 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a88f6a6-0d39-4175-aa16-62d64b8ca2d7-kube-api-access-5b9xn" (OuterVolumeSpecName: "kube-api-access-5b9xn") pod "8a88f6a6-0d39-4175-aa16-62d64b8ca2d7" (UID: "8a88f6a6-0d39-4175-aa16-62d64b8ca2d7"). InnerVolumeSpecName "kube-api-access-5b9xn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:07:40 crc kubenswrapper[5050]: I1123 16:07:40.118060 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 23 16:07:40 crc kubenswrapper[5050]: I1123 16:07:40.163282 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n72p8\" (UniqueName: \"kubernetes.io/projected/f261955d-669f-47e5-9a2d-9c7971293dc9-kube-api-access-n72p8\") on node \"crc\" DevicePath \"\"" Nov 23 16:07:40 crc kubenswrapper[5050]: I1123 16:07:40.164210 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a88f6a6-0d39-4175-aa16-62d64b8ca2d7-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:07:40 crc kubenswrapper[5050]: I1123 16:07:40.164338 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5b9xn\" (UniqueName: \"kubernetes.io/projected/8a88f6a6-0d39-4175-aa16-62d64b8ca2d7-kube-api-access-5b9xn\") on node \"crc\" DevicePath \"\"" Nov 23 16:07:40 crc kubenswrapper[5050]: I1123 16:07:40.443129 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-vgsft" event={"ID":"8a88f6a6-0d39-4175-aa16-62d64b8ca2d7","Type":"ContainerDied","Data":"210ee66e9ea10cfae6de9ca885e333bd622d5ffecc5e5dd8710bc14be0dc0117"} Nov 23 16:07:40 crc kubenswrapper[5050]: I1123 16:07:40.443180 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="210ee66e9ea10cfae6de9ca885e333bd622d5ffecc5e5dd8710bc14be0dc0117" Nov 23 16:07:40 crc kubenswrapper[5050]: I1123 16:07:40.443213 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-vgsft" Nov 23 16:07:40 crc kubenswrapper[5050]: I1123 16:07:40.444918 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-8bb4-account-create-hgrkr" event={"ID":"f261955d-669f-47e5-9a2d-9c7971293dc9","Type":"ContainerDied","Data":"67c204dfc5ef1e506a153759b446b332abaa70a6e86148e75ca925d6a9c86116"} Nov 23 16:07:40 crc kubenswrapper[5050]: I1123 16:07:40.445107 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="67c204dfc5ef1e506a153759b446b332abaa70a6e86148e75ca925d6a9c86116" Nov 23 16:07:40 crc kubenswrapper[5050]: I1123 16:07:40.444997 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8bb4-account-create-hgrkr" Nov 23 16:07:46 crc kubenswrapper[5050]: I1123 16:07:46.411185 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-wsz5j"] Nov 23 16:07:46 crc kubenswrapper[5050]: E1123 16:07:46.412435 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f261955d-669f-47e5-9a2d-9c7971293dc9" containerName="mariadb-account-create" Nov 23 16:07:46 crc kubenswrapper[5050]: I1123 16:07:46.412478 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f261955d-669f-47e5-9a2d-9c7971293dc9" containerName="mariadb-account-create" Nov 23 16:07:46 crc kubenswrapper[5050]: E1123 16:07:46.412516 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a88f6a6-0d39-4175-aa16-62d64b8ca2d7" containerName="mariadb-database-create" Nov 23 16:07:46 crc kubenswrapper[5050]: I1123 16:07:46.412526 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a88f6a6-0d39-4175-aa16-62d64b8ca2d7" containerName="mariadb-database-create" Nov 23 16:07:46 crc kubenswrapper[5050]: I1123 16:07:46.412724 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a88f6a6-0d39-4175-aa16-62d64b8ca2d7" containerName="mariadb-database-create" Nov 23 16:07:46 crc kubenswrapper[5050]: I1123 16:07:46.412744 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="f261955d-669f-47e5-9a2d-9c7971293dc9" containerName="mariadb-account-create" Nov 23 16:07:46 crc kubenswrapper[5050]: I1123 16:07:46.413662 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-wsz5j" Nov 23 16:07:46 crc kubenswrapper[5050]: I1123 16:07:46.417171 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 23 16:07:46 crc kubenswrapper[5050]: I1123 16:07:46.417493 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-dtsf9" Nov 23 16:07:46 crc kubenswrapper[5050]: I1123 16:07:46.417572 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 23 16:07:46 crc kubenswrapper[5050]: I1123 16:07:46.417636 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 23 16:07:46 crc kubenswrapper[5050]: I1123 16:07:46.420427 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6624adb-caff-44b3-89dc-0cdd87aa56e7-combined-ca-bundle\") pod \"keystone-db-sync-wsz5j\" (UID: \"e6624adb-caff-44b3-89dc-0cdd87aa56e7\") " pod="openstack/keystone-db-sync-wsz5j" Nov 23 16:07:46 crc kubenswrapper[5050]: I1123 16:07:46.420737 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmnf2\" (UniqueName: \"kubernetes.io/projected/e6624adb-caff-44b3-89dc-0cdd87aa56e7-kube-api-access-cmnf2\") pod \"keystone-db-sync-wsz5j\" (UID: \"e6624adb-caff-44b3-89dc-0cdd87aa56e7\") " pod="openstack/keystone-db-sync-wsz5j" Nov 23 16:07:46 crc kubenswrapper[5050]: I1123 16:07:46.420904 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6624adb-caff-44b3-89dc-0cdd87aa56e7-config-data\") pod \"keystone-db-sync-wsz5j\" (UID: \"e6624adb-caff-44b3-89dc-0cdd87aa56e7\") " pod="openstack/keystone-db-sync-wsz5j" Nov 23 16:07:46 crc kubenswrapper[5050]: I1123 16:07:46.431524 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-wsz5j"] Nov 23 16:07:46 crc kubenswrapper[5050]: I1123 16:07:46.522435 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmnf2\" (UniqueName: \"kubernetes.io/projected/e6624adb-caff-44b3-89dc-0cdd87aa56e7-kube-api-access-cmnf2\") pod \"keystone-db-sync-wsz5j\" (UID: \"e6624adb-caff-44b3-89dc-0cdd87aa56e7\") " pod="openstack/keystone-db-sync-wsz5j" Nov 23 16:07:46 crc kubenswrapper[5050]: I1123 16:07:46.523773 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6624adb-caff-44b3-89dc-0cdd87aa56e7-config-data\") pod \"keystone-db-sync-wsz5j\" (UID: \"e6624adb-caff-44b3-89dc-0cdd87aa56e7\") " pod="openstack/keystone-db-sync-wsz5j" Nov 23 16:07:46 crc kubenswrapper[5050]: I1123 16:07:46.523957 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6624adb-caff-44b3-89dc-0cdd87aa56e7-combined-ca-bundle\") pod \"keystone-db-sync-wsz5j\" (UID: \"e6624adb-caff-44b3-89dc-0cdd87aa56e7\") " pod="openstack/keystone-db-sync-wsz5j" Nov 23 16:07:46 crc kubenswrapper[5050]: I1123 16:07:46.532502 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6624adb-caff-44b3-89dc-0cdd87aa56e7-combined-ca-bundle\") pod \"keystone-db-sync-wsz5j\" (UID: \"e6624adb-caff-44b3-89dc-0cdd87aa56e7\") " pod="openstack/keystone-db-sync-wsz5j" Nov 23 16:07:46 crc kubenswrapper[5050]: I1123 16:07:46.533211 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6624adb-caff-44b3-89dc-0cdd87aa56e7-config-data\") pod \"keystone-db-sync-wsz5j\" (UID: \"e6624adb-caff-44b3-89dc-0cdd87aa56e7\") " pod="openstack/keystone-db-sync-wsz5j" Nov 23 16:07:46 crc kubenswrapper[5050]: I1123 16:07:46.544558 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmnf2\" (UniqueName: \"kubernetes.io/projected/e6624adb-caff-44b3-89dc-0cdd87aa56e7-kube-api-access-cmnf2\") pod \"keystone-db-sync-wsz5j\" (UID: \"e6624adb-caff-44b3-89dc-0cdd87aa56e7\") " pod="openstack/keystone-db-sync-wsz5j" Nov 23 16:07:46 crc kubenswrapper[5050]: I1123 16:07:46.549128 5050 scope.go:117] "RemoveContainer" containerID="3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" Nov 23 16:07:46 crc kubenswrapper[5050]: E1123 16:07:46.549587 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:07:46 crc kubenswrapper[5050]: I1123 16:07:46.747152 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-wsz5j" Nov 23 16:07:47 crc kubenswrapper[5050]: I1123 16:07:47.246274 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-wsz5j"] Nov 23 16:07:47 crc kubenswrapper[5050]: I1123 16:07:47.533168 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-wsz5j" event={"ID":"e6624adb-caff-44b3-89dc-0cdd87aa56e7","Type":"ContainerStarted","Data":"58a7a4c46aa9f634166bd2e00d75e57043f0f439be060bf2f0359dae331c5488"} Nov 23 16:07:47 crc kubenswrapper[5050]: I1123 16:07:47.533234 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-wsz5j" event={"ID":"e6624adb-caff-44b3-89dc-0cdd87aa56e7","Type":"ContainerStarted","Data":"ba87ef1d9b12548a098dfab29d0bd87e19f9f80523320d839323e659b362c98e"} Nov 23 16:07:47 crc kubenswrapper[5050]: I1123 16:07:47.574403 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-wsz5j" podStartSLOduration=1.57437869 podStartE2EDuration="1.57437869s" podCreationTimestamp="2025-11-23 16:07:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:07:47.567739233 +0000 UTC m=+5162.734735758" watchObservedRunningTime="2025-11-23 16:07:47.57437869 +0000 UTC m=+5162.741375175" Nov 23 16:07:49 crc kubenswrapper[5050]: I1123 16:07:49.583725 5050 generic.go:334] "Generic (PLEG): container finished" podID="e6624adb-caff-44b3-89dc-0cdd87aa56e7" containerID="58a7a4c46aa9f634166bd2e00d75e57043f0f439be060bf2f0359dae331c5488" exitCode=0 Nov 23 16:07:49 crc kubenswrapper[5050]: I1123 16:07:49.599157 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-wsz5j" event={"ID":"e6624adb-caff-44b3-89dc-0cdd87aa56e7","Type":"ContainerDied","Data":"58a7a4c46aa9f634166bd2e00d75e57043f0f439be060bf2f0359dae331c5488"} Nov 23 16:07:51 crc kubenswrapper[5050]: I1123 16:07:51.152165 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-wsz5j" Nov 23 16:07:51 crc kubenswrapper[5050]: I1123 16:07:51.244471 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cmnf2\" (UniqueName: \"kubernetes.io/projected/e6624adb-caff-44b3-89dc-0cdd87aa56e7-kube-api-access-cmnf2\") pod \"e6624adb-caff-44b3-89dc-0cdd87aa56e7\" (UID: \"e6624adb-caff-44b3-89dc-0cdd87aa56e7\") " Nov 23 16:07:51 crc kubenswrapper[5050]: I1123 16:07:51.244575 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6624adb-caff-44b3-89dc-0cdd87aa56e7-combined-ca-bundle\") pod \"e6624adb-caff-44b3-89dc-0cdd87aa56e7\" (UID: \"e6624adb-caff-44b3-89dc-0cdd87aa56e7\") " Nov 23 16:07:51 crc kubenswrapper[5050]: I1123 16:07:51.244681 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6624adb-caff-44b3-89dc-0cdd87aa56e7-config-data\") pod \"e6624adb-caff-44b3-89dc-0cdd87aa56e7\" (UID: \"e6624adb-caff-44b3-89dc-0cdd87aa56e7\") " Nov 23 16:07:51 crc kubenswrapper[5050]: I1123 16:07:51.253304 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6624adb-caff-44b3-89dc-0cdd87aa56e7-kube-api-access-cmnf2" (OuterVolumeSpecName: "kube-api-access-cmnf2") pod "e6624adb-caff-44b3-89dc-0cdd87aa56e7" (UID: "e6624adb-caff-44b3-89dc-0cdd87aa56e7"). InnerVolumeSpecName "kube-api-access-cmnf2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:07:51 crc kubenswrapper[5050]: I1123 16:07:51.269820 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6624adb-caff-44b3-89dc-0cdd87aa56e7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e6624adb-caff-44b3-89dc-0cdd87aa56e7" (UID: "e6624adb-caff-44b3-89dc-0cdd87aa56e7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:07:51 crc kubenswrapper[5050]: I1123 16:07:51.308060 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6624adb-caff-44b3-89dc-0cdd87aa56e7-config-data" (OuterVolumeSpecName: "config-data") pod "e6624adb-caff-44b3-89dc-0cdd87aa56e7" (UID: "e6624adb-caff-44b3-89dc-0cdd87aa56e7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:07:51 crc kubenswrapper[5050]: I1123 16:07:51.347556 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6624adb-caff-44b3-89dc-0cdd87aa56e7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:07:51 crc kubenswrapper[5050]: I1123 16:07:51.347604 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6624adb-caff-44b3-89dc-0cdd87aa56e7-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:07:51 crc kubenswrapper[5050]: I1123 16:07:51.347618 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cmnf2\" (UniqueName: \"kubernetes.io/projected/e6624adb-caff-44b3-89dc-0cdd87aa56e7-kube-api-access-cmnf2\") on node \"crc\" DevicePath \"\"" Nov 23 16:07:51 crc kubenswrapper[5050]: I1123 16:07:51.612290 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-wsz5j" event={"ID":"e6624adb-caff-44b3-89dc-0cdd87aa56e7","Type":"ContainerDied","Data":"ba87ef1d9b12548a098dfab29d0bd87e19f9f80523320d839323e659b362c98e"} Nov 23 16:07:51 crc kubenswrapper[5050]: I1123 16:07:51.612359 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ba87ef1d9b12548a098dfab29d0bd87e19f9f80523320d839323e659b362c98e" Nov 23 16:07:51 crc kubenswrapper[5050]: I1123 16:07:51.612509 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-wsz5j" Nov 23 16:07:51 crc kubenswrapper[5050]: I1123 16:07:51.900614 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5db55b45-t7h8n"] Nov 23 16:07:51 crc kubenswrapper[5050]: E1123 16:07:51.901374 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6624adb-caff-44b3-89dc-0cdd87aa56e7" containerName="keystone-db-sync" Nov 23 16:07:51 crc kubenswrapper[5050]: I1123 16:07:51.901393 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6624adb-caff-44b3-89dc-0cdd87aa56e7" containerName="keystone-db-sync" Nov 23 16:07:51 crc kubenswrapper[5050]: I1123 16:07:51.901591 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6624adb-caff-44b3-89dc-0cdd87aa56e7" containerName="keystone-db-sync" Nov 23 16:07:51 crc kubenswrapper[5050]: I1123 16:07:51.902631 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5db55b45-t7h8n" Nov 23 16:07:51 crc kubenswrapper[5050]: I1123 16:07:51.918403 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-2nr74"] Nov 23 16:07:51 crc kubenswrapper[5050]: I1123 16:07:51.919742 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-2nr74" Nov 23 16:07:51 crc kubenswrapper[5050]: I1123 16:07:51.925845 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 23 16:07:51 crc kubenswrapper[5050]: I1123 16:07:51.926012 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-dtsf9" Nov 23 16:07:51 crc kubenswrapper[5050]: I1123 16:07:51.926184 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 23 16:07:51 crc kubenswrapper[5050]: I1123 16:07:51.926305 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 23 16:07:51 crc kubenswrapper[5050]: I1123 16:07:51.926577 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 23 16:07:51 crc kubenswrapper[5050]: I1123 16:07:51.944708 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5db55b45-t7h8n"] Nov 23 16:07:51 crc kubenswrapper[5050]: I1123 16:07:51.955688 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-2nr74"] Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.063077 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-credential-keys\") pod \"keystone-bootstrap-2nr74\" (UID: \"f169bde5-3dc7-445e-8145-0f70b58e28b9\") " pod="openstack/keystone-bootstrap-2nr74" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.063430 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-fernet-keys\") pod \"keystone-bootstrap-2nr74\" (UID: \"f169bde5-3dc7-445e-8145-0f70b58e28b9\") " pod="openstack/keystone-bootstrap-2nr74" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.063502 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-scripts\") pod \"keystone-bootstrap-2nr74\" (UID: \"f169bde5-3dc7-445e-8145-0f70b58e28b9\") " pod="openstack/keystone-bootstrap-2nr74" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.063538 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7sx4\" (UniqueName: \"kubernetes.io/projected/84ccdf47-4526-4bbe-b9a6-ae44490bd586-kube-api-access-z7sx4\") pod \"dnsmasq-dns-5db55b45-t7h8n\" (UID: \"84ccdf47-4526-4bbe-b9a6-ae44490bd586\") " pod="openstack/dnsmasq-dns-5db55b45-t7h8n" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.063637 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-combined-ca-bundle\") pod \"keystone-bootstrap-2nr74\" (UID: \"f169bde5-3dc7-445e-8145-0f70b58e28b9\") " pod="openstack/keystone-bootstrap-2nr74" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.063701 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84ccdf47-4526-4bbe-b9a6-ae44490bd586-dns-svc\") pod \"dnsmasq-dns-5db55b45-t7h8n\" (UID: \"84ccdf47-4526-4bbe-b9a6-ae44490bd586\") " pod="openstack/dnsmasq-dns-5db55b45-t7h8n" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.063725 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-config-data\") pod \"keystone-bootstrap-2nr74\" (UID: \"f169bde5-3dc7-445e-8145-0f70b58e28b9\") " pod="openstack/keystone-bootstrap-2nr74" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.063754 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84ccdf47-4526-4bbe-b9a6-ae44490bd586-ovsdbserver-nb\") pod \"dnsmasq-dns-5db55b45-t7h8n\" (UID: \"84ccdf47-4526-4bbe-b9a6-ae44490bd586\") " pod="openstack/dnsmasq-dns-5db55b45-t7h8n" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.063773 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llgkm\" (UniqueName: \"kubernetes.io/projected/f169bde5-3dc7-445e-8145-0f70b58e28b9-kube-api-access-llgkm\") pod \"keystone-bootstrap-2nr74\" (UID: \"f169bde5-3dc7-445e-8145-0f70b58e28b9\") " pod="openstack/keystone-bootstrap-2nr74" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.063796 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84ccdf47-4526-4bbe-b9a6-ae44490bd586-ovsdbserver-sb\") pod \"dnsmasq-dns-5db55b45-t7h8n\" (UID: \"84ccdf47-4526-4bbe-b9a6-ae44490bd586\") " pod="openstack/dnsmasq-dns-5db55b45-t7h8n" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.063848 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84ccdf47-4526-4bbe-b9a6-ae44490bd586-config\") pod \"dnsmasq-dns-5db55b45-t7h8n\" (UID: \"84ccdf47-4526-4bbe-b9a6-ae44490bd586\") " pod="openstack/dnsmasq-dns-5db55b45-t7h8n" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.165588 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84ccdf47-4526-4bbe-b9a6-ae44490bd586-config\") pod \"dnsmasq-dns-5db55b45-t7h8n\" (UID: \"84ccdf47-4526-4bbe-b9a6-ae44490bd586\") " pod="openstack/dnsmasq-dns-5db55b45-t7h8n" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.165701 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-credential-keys\") pod \"keystone-bootstrap-2nr74\" (UID: \"f169bde5-3dc7-445e-8145-0f70b58e28b9\") " pod="openstack/keystone-bootstrap-2nr74" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.165742 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-fernet-keys\") pod \"keystone-bootstrap-2nr74\" (UID: \"f169bde5-3dc7-445e-8145-0f70b58e28b9\") " pod="openstack/keystone-bootstrap-2nr74" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.165778 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-scripts\") pod \"keystone-bootstrap-2nr74\" (UID: \"f169bde5-3dc7-445e-8145-0f70b58e28b9\") " pod="openstack/keystone-bootstrap-2nr74" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.165816 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7sx4\" (UniqueName: \"kubernetes.io/projected/84ccdf47-4526-4bbe-b9a6-ae44490bd586-kube-api-access-z7sx4\") pod \"dnsmasq-dns-5db55b45-t7h8n\" (UID: \"84ccdf47-4526-4bbe-b9a6-ae44490bd586\") " pod="openstack/dnsmasq-dns-5db55b45-t7h8n" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.165854 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-combined-ca-bundle\") pod \"keystone-bootstrap-2nr74\" (UID: \"f169bde5-3dc7-445e-8145-0f70b58e28b9\") " pod="openstack/keystone-bootstrap-2nr74" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.165886 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84ccdf47-4526-4bbe-b9a6-ae44490bd586-dns-svc\") pod \"dnsmasq-dns-5db55b45-t7h8n\" (UID: \"84ccdf47-4526-4bbe-b9a6-ae44490bd586\") " pod="openstack/dnsmasq-dns-5db55b45-t7h8n" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.165915 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-config-data\") pod \"keystone-bootstrap-2nr74\" (UID: \"f169bde5-3dc7-445e-8145-0f70b58e28b9\") " pod="openstack/keystone-bootstrap-2nr74" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.165958 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84ccdf47-4526-4bbe-b9a6-ae44490bd586-ovsdbserver-nb\") pod \"dnsmasq-dns-5db55b45-t7h8n\" (UID: \"84ccdf47-4526-4bbe-b9a6-ae44490bd586\") " pod="openstack/dnsmasq-dns-5db55b45-t7h8n" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.165987 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llgkm\" (UniqueName: \"kubernetes.io/projected/f169bde5-3dc7-445e-8145-0f70b58e28b9-kube-api-access-llgkm\") pod \"keystone-bootstrap-2nr74\" (UID: \"f169bde5-3dc7-445e-8145-0f70b58e28b9\") " pod="openstack/keystone-bootstrap-2nr74" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.166023 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84ccdf47-4526-4bbe-b9a6-ae44490bd586-ovsdbserver-sb\") pod \"dnsmasq-dns-5db55b45-t7h8n\" (UID: \"84ccdf47-4526-4bbe-b9a6-ae44490bd586\") " pod="openstack/dnsmasq-dns-5db55b45-t7h8n" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.167087 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84ccdf47-4526-4bbe-b9a6-ae44490bd586-config\") pod \"dnsmasq-dns-5db55b45-t7h8n\" (UID: \"84ccdf47-4526-4bbe-b9a6-ae44490bd586\") " pod="openstack/dnsmasq-dns-5db55b45-t7h8n" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.167099 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84ccdf47-4526-4bbe-b9a6-ae44490bd586-ovsdbserver-sb\") pod \"dnsmasq-dns-5db55b45-t7h8n\" (UID: \"84ccdf47-4526-4bbe-b9a6-ae44490bd586\") " pod="openstack/dnsmasq-dns-5db55b45-t7h8n" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.168087 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84ccdf47-4526-4bbe-b9a6-ae44490bd586-dns-svc\") pod \"dnsmasq-dns-5db55b45-t7h8n\" (UID: \"84ccdf47-4526-4bbe-b9a6-ae44490bd586\") " pod="openstack/dnsmasq-dns-5db55b45-t7h8n" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.168527 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84ccdf47-4526-4bbe-b9a6-ae44490bd586-ovsdbserver-nb\") pod \"dnsmasq-dns-5db55b45-t7h8n\" (UID: \"84ccdf47-4526-4bbe-b9a6-ae44490bd586\") " pod="openstack/dnsmasq-dns-5db55b45-t7h8n" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.172059 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-combined-ca-bundle\") pod \"keystone-bootstrap-2nr74\" (UID: \"f169bde5-3dc7-445e-8145-0f70b58e28b9\") " pod="openstack/keystone-bootstrap-2nr74" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.173063 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-config-data\") pod \"keystone-bootstrap-2nr74\" (UID: \"f169bde5-3dc7-445e-8145-0f70b58e28b9\") " pod="openstack/keystone-bootstrap-2nr74" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.175746 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-scripts\") pod \"keystone-bootstrap-2nr74\" (UID: \"f169bde5-3dc7-445e-8145-0f70b58e28b9\") " pod="openstack/keystone-bootstrap-2nr74" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.176607 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-credential-keys\") pod \"keystone-bootstrap-2nr74\" (UID: \"f169bde5-3dc7-445e-8145-0f70b58e28b9\") " pod="openstack/keystone-bootstrap-2nr74" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.178072 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-fernet-keys\") pod \"keystone-bootstrap-2nr74\" (UID: \"f169bde5-3dc7-445e-8145-0f70b58e28b9\") " pod="openstack/keystone-bootstrap-2nr74" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.186892 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llgkm\" (UniqueName: \"kubernetes.io/projected/f169bde5-3dc7-445e-8145-0f70b58e28b9-kube-api-access-llgkm\") pod \"keystone-bootstrap-2nr74\" (UID: \"f169bde5-3dc7-445e-8145-0f70b58e28b9\") " pod="openstack/keystone-bootstrap-2nr74" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.189151 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7sx4\" (UniqueName: \"kubernetes.io/projected/84ccdf47-4526-4bbe-b9a6-ae44490bd586-kube-api-access-z7sx4\") pod \"dnsmasq-dns-5db55b45-t7h8n\" (UID: \"84ccdf47-4526-4bbe-b9a6-ae44490bd586\") " pod="openstack/dnsmasq-dns-5db55b45-t7h8n" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.225863 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5db55b45-t7h8n" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.246053 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-2nr74" Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.744902 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-2nr74"] Nov 23 16:07:52 crc kubenswrapper[5050]: I1123 16:07:52.766185 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5db55b45-t7h8n"] Nov 23 16:07:52 crc kubenswrapper[5050]: W1123 16:07:52.872716 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod84ccdf47_4526_4bbe_b9a6_ae44490bd586.slice/crio-c5adc79ed7d1e537dbc9f54fda4d77f947914f0bdb29ca51ca363084afa19182 WatchSource:0}: Error finding container c5adc79ed7d1e537dbc9f54fda4d77f947914f0bdb29ca51ca363084afa19182: Status 404 returned error can't find the container with id c5adc79ed7d1e537dbc9f54fda4d77f947914f0bdb29ca51ca363084afa19182 Nov 23 16:07:53 crc kubenswrapper[5050]: I1123 16:07:53.646622 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-2nr74" event={"ID":"f169bde5-3dc7-445e-8145-0f70b58e28b9","Type":"ContainerStarted","Data":"c60f0a4489aca68db2f287a17d908c0fd6c671e48582e0301d53ce00eb4ac8d3"} Nov 23 16:07:53 crc kubenswrapper[5050]: I1123 16:07:53.647100 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-2nr74" event={"ID":"f169bde5-3dc7-445e-8145-0f70b58e28b9","Type":"ContainerStarted","Data":"7b761b819ec0100ffdfcf0f8685a03537e6b0c5b0b2a9eada006229904361487"} Nov 23 16:07:53 crc kubenswrapper[5050]: I1123 16:07:53.651361 5050 generic.go:334] "Generic (PLEG): container finished" podID="84ccdf47-4526-4bbe-b9a6-ae44490bd586" containerID="9900fe592641ae76c655fa4413c5caa9d0a21a676e3683fda980933feef158af" exitCode=0 Nov 23 16:07:53 crc kubenswrapper[5050]: I1123 16:07:53.651399 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5db55b45-t7h8n" event={"ID":"84ccdf47-4526-4bbe-b9a6-ae44490bd586","Type":"ContainerDied","Data":"9900fe592641ae76c655fa4413c5caa9d0a21a676e3683fda980933feef158af"} Nov 23 16:07:53 crc kubenswrapper[5050]: I1123 16:07:53.651418 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5db55b45-t7h8n" event={"ID":"84ccdf47-4526-4bbe-b9a6-ae44490bd586","Type":"ContainerStarted","Data":"c5adc79ed7d1e537dbc9f54fda4d77f947914f0bdb29ca51ca363084afa19182"} Nov 23 16:07:53 crc kubenswrapper[5050]: I1123 16:07:53.689971 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-2nr74" podStartSLOduration=2.6899481119999997 podStartE2EDuration="2.689948112s" podCreationTimestamp="2025-11-23 16:07:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:07:53.675962467 +0000 UTC m=+5168.842958952" watchObservedRunningTime="2025-11-23 16:07:53.689948112 +0000 UTC m=+5168.856944597" Nov 23 16:07:54 crc kubenswrapper[5050]: I1123 16:07:54.670762 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5db55b45-t7h8n" event={"ID":"84ccdf47-4526-4bbe-b9a6-ae44490bd586","Type":"ContainerStarted","Data":"6335fec393250d969a48eb8dfed45b2b17e53f7fc2c8e8ac2b1d1084abdaab37"} Nov 23 16:07:54 crc kubenswrapper[5050]: I1123 16:07:54.724531 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5db55b45-t7h8n" podStartSLOduration=3.724493178 podStartE2EDuration="3.724493178s" podCreationTimestamp="2025-11-23 16:07:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:07:54.705653576 +0000 UTC m=+5169.872650121" watchObservedRunningTime="2025-11-23 16:07:54.724493178 +0000 UTC m=+5169.891489723" Nov 23 16:07:55 crc kubenswrapper[5050]: I1123 16:07:55.681150 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5db55b45-t7h8n" Nov 23 16:07:56 crc kubenswrapper[5050]: I1123 16:07:56.695140 5050 generic.go:334] "Generic (PLEG): container finished" podID="f169bde5-3dc7-445e-8145-0f70b58e28b9" containerID="c60f0a4489aca68db2f287a17d908c0fd6c671e48582e0301d53ce00eb4ac8d3" exitCode=0 Nov 23 16:07:56 crc kubenswrapper[5050]: I1123 16:07:56.696514 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-2nr74" event={"ID":"f169bde5-3dc7-445e-8145-0f70b58e28b9","Type":"ContainerDied","Data":"c60f0a4489aca68db2f287a17d908c0fd6c671e48582e0301d53ce00eb4ac8d3"} Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.175019 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-2nr74" Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.302490 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-fernet-keys\") pod \"f169bde5-3dc7-445e-8145-0f70b58e28b9\" (UID: \"f169bde5-3dc7-445e-8145-0f70b58e28b9\") " Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.302580 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-scripts\") pod \"f169bde5-3dc7-445e-8145-0f70b58e28b9\" (UID: \"f169bde5-3dc7-445e-8145-0f70b58e28b9\") " Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.302660 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-config-data\") pod \"f169bde5-3dc7-445e-8145-0f70b58e28b9\" (UID: \"f169bde5-3dc7-445e-8145-0f70b58e28b9\") " Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.302733 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-llgkm\" (UniqueName: \"kubernetes.io/projected/f169bde5-3dc7-445e-8145-0f70b58e28b9-kube-api-access-llgkm\") pod \"f169bde5-3dc7-445e-8145-0f70b58e28b9\" (UID: \"f169bde5-3dc7-445e-8145-0f70b58e28b9\") " Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.302766 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-credential-keys\") pod \"f169bde5-3dc7-445e-8145-0f70b58e28b9\" (UID: \"f169bde5-3dc7-445e-8145-0f70b58e28b9\") " Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.302787 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-combined-ca-bundle\") pod \"f169bde5-3dc7-445e-8145-0f70b58e28b9\" (UID: \"f169bde5-3dc7-445e-8145-0f70b58e28b9\") " Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.310692 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f169bde5-3dc7-445e-8145-0f70b58e28b9-kube-api-access-llgkm" (OuterVolumeSpecName: "kube-api-access-llgkm") pod "f169bde5-3dc7-445e-8145-0f70b58e28b9" (UID: "f169bde5-3dc7-445e-8145-0f70b58e28b9"). InnerVolumeSpecName "kube-api-access-llgkm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.311131 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-scripts" (OuterVolumeSpecName: "scripts") pod "f169bde5-3dc7-445e-8145-0f70b58e28b9" (UID: "f169bde5-3dc7-445e-8145-0f70b58e28b9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.311240 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "f169bde5-3dc7-445e-8145-0f70b58e28b9" (UID: "f169bde5-3dc7-445e-8145-0f70b58e28b9"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.311559 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "f169bde5-3dc7-445e-8145-0f70b58e28b9" (UID: "f169bde5-3dc7-445e-8145-0f70b58e28b9"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.344039 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-config-data" (OuterVolumeSpecName: "config-data") pod "f169bde5-3dc7-445e-8145-0f70b58e28b9" (UID: "f169bde5-3dc7-445e-8145-0f70b58e28b9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.344704 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f169bde5-3dc7-445e-8145-0f70b58e28b9" (UID: "f169bde5-3dc7-445e-8145-0f70b58e28b9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.405307 5050 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.405350 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.405363 5050 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.405373 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.405386 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f169bde5-3dc7-445e-8145-0f70b58e28b9-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.405397 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-llgkm\" (UniqueName: \"kubernetes.io/projected/f169bde5-3dc7-445e-8145-0f70b58e28b9-kube-api-access-llgkm\") on node \"crc\" DevicePath \"\"" Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.721109 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-2nr74" event={"ID":"f169bde5-3dc7-445e-8145-0f70b58e28b9","Type":"ContainerDied","Data":"7b761b819ec0100ffdfcf0f8685a03537e6b0c5b0b2a9eada006229904361487"} Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.721640 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7b761b819ec0100ffdfcf0f8685a03537e6b0c5b0b2a9eada006229904361487" Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.721182 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-2nr74" Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.805565 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-2nr74"] Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.813931 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-2nr74"] Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.893162 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-bskzx"] Nov 23 16:07:58 crc kubenswrapper[5050]: E1123 16:07:58.893559 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f169bde5-3dc7-445e-8145-0f70b58e28b9" containerName="keystone-bootstrap" Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.893578 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f169bde5-3dc7-445e-8145-0f70b58e28b9" containerName="keystone-bootstrap" Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.893772 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="f169bde5-3dc7-445e-8145-0f70b58e28b9" containerName="keystone-bootstrap" Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.894461 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-bskzx" Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.897749 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.897837 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.897894 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.899966 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.907123 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-dtsf9" Nov 23 16:07:58 crc kubenswrapper[5050]: I1123 16:07:58.907416 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-bskzx"] Nov 23 16:07:59 crc kubenswrapper[5050]: I1123 16:07:59.015552 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-combined-ca-bundle\") pod \"keystone-bootstrap-bskzx\" (UID: \"d0643b76-5863-43b7-b8a9-cdc50659bc95\") " pod="openstack/keystone-bootstrap-bskzx" Nov 23 16:07:59 crc kubenswrapper[5050]: I1123 16:07:59.015653 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-credential-keys\") pod \"keystone-bootstrap-bskzx\" (UID: \"d0643b76-5863-43b7-b8a9-cdc50659bc95\") " pod="openstack/keystone-bootstrap-bskzx" Nov 23 16:07:59 crc kubenswrapper[5050]: I1123 16:07:59.015714 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-config-data\") pod \"keystone-bootstrap-bskzx\" (UID: \"d0643b76-5863-43b7-b8a9-cdc50659bc95\") " pod="openstack/keystone-bootstrap-bskzx" Nov 23 16:07:59 crc kubenswrapper[5050]: I1123 16:07:59.015750 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-fernet-keys\") pod \"keystone-bootstrap-bskzx\" (UID: \"d0643b76-5863-43b7-b8a9-cdc50659bc95\") " pod="openstack/keystone-bootstrap-bskzx" Nov 23 16:07:59 crc kubenswrapper[5050]: I1123 16:07:59.015791 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ngr4\" (UniqueName: \"kubernetes.io/projected/d0643b76-5863-43b7-b8a9-cdc50659bc95-kube-api-access-8ngr4\") pod \"keystone-bootstrap-bskzx\" (UID: \"d0643b76-5863-43b7-b8a9-cdc50659bc95\") " pod="openstack/keystone-bootstrap-bskzx" Nov 23 16:07:59 crc kubenswrapper[5050]: I1123 16:07:59.015817 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-scripts\") pod \"keystone-bootstrap-bskzx\" (UID: \"d0643b76-5863-43b7-b8a9-cdc50659bc95\") " pod="openstack/keystone-bootstrap-bskzx" Nov 23 16:07:59 crc kubenswrapper[5050]: I1123 16:07:59.117430 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-combined-ca-bundle\") pod \"keystone-bootstrap-bskzx\" (UID: \"d0643b76-5863-43b7-b8a9-cdc50659bc95\") " pod="openstack/keystone-bootstrap-bskzx" Nov 23 16:07:59 crc kubenswrapper[5050]: I1123 16:07:59.117599 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-credential-keys\") pod \"keystone-bootstrap-bskzx\" (UID: \"d0643b76-5863-43b7-b8a9-cdc50659bc95\") " pod="openstack/keystone-bootstrap-bskzx" Nov 23 16:07:59 crc kubenswrapper[5050]: I1123 16:07:59.117688 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-config-data\") pod \"keystone-bootstrap-bskzx\" (UID: \"d0643b76-5863-43b7-b8a9-cdc50659bc95\") " pod="openstack/keystone-bootstrap-bskzx" Nov 23 16:07:59 crc kubenswrapper[5050]: I1123 16:07:59.117744 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-fernet-keys\") pod \"keystone-bootstrap-bskzx\" (UID: \"d0643b76-5863-43b7-b8a9-cdc50659bc95\") " pod="openstack/keystone-bootstrap-bskzx" Nov 23 16:07:59 crc kubenswrapper[5050]: I1123 16:07:59.117798 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ngr4\" (UniqueName: \"kubernetes.io/projected/d0643b76-5863-43b7-b8a9-cdc50659bc95-kube-api-access-8ngr4\") pod \"keystone-bootstrap-bskzx\" (UID: \"d0643b76-5863-43b7-b8a9-cdc50659bc95\") " pod="openstack/keystone-bootstrap-bskzx" Nov 23 16:07:59 crc kubenswrapper[5050]: I1123 16:07:59.117838 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-scripts\") pod \"keystone-bootstrap-bskzx\" (UID: \"d0643b76-5863-43b7-b8a9-cdc50659bc95\") " pod="openstack/keystone-bootstrap-bskzx" Nov 23 16:07:59 crc kubenswrapper[5050]: I1123 16:07:59.124104 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-combined-ca-bundle\") pod \"keystone-bootstrap-bskzx\" (UID: \"d0643b76-5863-43b7-b8a9-cdc50659bc95\") " pod="openstack/keystone-bootstrap-bskzx" Nov 23 16:07:59 crc kubenswrapper[5050]: I1123 16:07:59.124380 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-scripts\") pod \"keystone-bootstrap-bskzx\" (UID: \"d0643b76-5863-43b7-b8a9-cdc50659bc95\") " pod="openstack/keystone-bootstrap-bskzx" Nov 23 16:07:59 crc kubenswrapper[5050]: I1123 16:07:59.125366 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-config-data\") pod \"keystone-bootstrap-bskzx\" (UID: \"d0643b76-5863-43b7-b8a9-cdc50659bc95\") " pod="openstack/keystone-bootstrap-bskzx" Nov 23 16:07:59 crc kubenswrapper[5050]: I1123 16:07:59.125397 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-fernet-keys\") pod \"keystone-bootstrap-bskzx\" (UID: \"d0643b76-5863-43b7-b8a9-cdc50659bc95\") " pod="openstack/keystone-bootstrap-bskzx" Nov 23 16:07:59 crc kubenswrapper[5050]: I1123 16:07:59.127143 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-credential-keys\") pod \"keystone-bootstrap-bskzx\" (UID: \"d0643b76-5863-43b7-b8a9-cdc50659bc95\") " pod="openstack/keystone-bootstrap-bskzx" Nov 23 16:07:59 crc kubenswrapper[5050]: I1123 16:07:59.136896 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ngr4\" (UniqueName: \"kubernetes.io/projected/d0643b76-5863-43b7-b8a9-cdc50659bc95-kube-api-access-8ngr4\") pod \"keystone-bootstrap-bskzx\" (UID: \"d0643b76-5863-43b7-b8a9-cdc50659bc95\") " pod="openstack/keystone-bootstrap-bskzx" Nov 23 16:07:59 crc kubenswrapper[5050]: I1123 16:07:59.212035 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-bskzx" Nov 23 16:07:59 crc kubenswrapper[5050]: I1123 16:07:59.572001 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f169bde5-3dc7-445e-8145-0f70b58e28b9" path="/var/lib/kubelet/pods/f169bde5-3dc7-445e-8145-0f70b58e28b9/volumes" Nov 23 16:07:59 crc kubenswrapper[5050]: I1123 16:07:59.780349 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-bskzx"] Nov 23 16:08:00 crc kubenswrapper[5050]: I1123 16:08:00.549271 5050 scope.go:117] "RemoveContainer" containerID="3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" Nov 23 16:08:00 crc kubenswrapper[5050]: E1123 16:08:00.550410 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:08:00 crc kubenswrapper[5050]: I1123 16:08:00.749200 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-bskzx" event={"ID":"d0643b76-5863-43b7-b8a9-cdc50659bc95","Type":"ContainerStarted","Data":"4067dac9832390b9f4abb04f8b909d5a6b02f1da6f8ddb6c096247ef85e3ad20"} Nov 23 16:08:00 crc kubenswrapper[5050]: I1123 16:08:00.749263 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-bskzx" event={"ID":"d0643b76-5863-43b7-b8a9-cdc50659bc95","Type":"ContainerStarted","Data":"5459eb05dc798f6ef5ce70af24320b3078cd588c7628958ff2d2b0deb4031944"} Nov 23 16:08:00 crc kubenswrapper[5050]: I1123 16:08:00.782411 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-bskzx" podStartSLOduration=2.782378221 podStartE2EDuration="2.782378221s" podCreationTimestamp="2025-11-23 16:07:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:08:00.778693007 +0000 UTC m=+5175.945689572" watchObservedRunningTime="2025-11-23 16:08:00.782378221 +0000 UTC m=+5175.949374746" Nov 23 16:08:02 crc kubenswrapper[5050]: I1123 16:08:02.227821 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5db55b45-t7h8n" Nov 23 16:08:02 crc kubenswrapper[5050]: I1123 16:08:02.323399 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c9c679fc-b59c7"] Nov 23 16:08:02 crc kubenswrapper[5050]: I1123 16:08:02.323796 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" podUID="6087f6f0-60d6-4e97-bf79-c53bcb68c7cd" containerName="dnsmasq-dns" containerID="cri-o://c78974c1b9e1482068cc20864fb7d5529b2859fbbe8ae659ce48b90e90212f4b" gracePeriod=10 Nov 23 16:08:02 crc kubenswrapper[5050]: I1123 16:08:02.803281 5050 generic.go:334] "Generic (PLEG): container finished" podID="6087f6f0-60d6-4e97-bf79-c53bcb68c7cd" containerID="c78974c1b9e1482068cc20864fb7d5529b2859fbbe8ae659ce48b90e90212f4b" exitCode=0 Nov 23 16:08:02 crc kubenswrapper[5050]: I1123 16:08:02.803344 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" event={"ID":"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd","Type":"ContainerDied","Data":"c78974c1b9e1482068cc20864fb7d5529b2859fbbe8ae659ce48b90e90212f4b"} Nov 23 16:08:02 crc kubenswrapper[5050]: I1123 16:08:02.803397 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" event={"ID":"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd","Type":"ContainerDied","Data":"c071eda3396adb4a5b016f0891fd35d576ac896502dc5cc11a9363bca33b8fe4"} Nov 23 16:08:02 crc kubenswrapper[5050]: I1123 16:08:02.803412 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c071eda3396adb4a5b016f0891fd35d576ac896502dc5cc11a9363bca33b8fe4" Nov 23 16:08:02 crc kubenswrapper[5050]: I1123 16:08:02.932581 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" Nov 23 16:08:03 crc kubenswrapper[5050]: I1123 16:08:03.034892 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-ovsdbserver-sb\") pod \"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd\" (UID: \"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd\") " Nov 23 16:08:03 crc kubenswrapper[5050]: I1123 16:08:03.035066 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-dns-svc\") pod \"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd\" (UID: \"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd\") " Nov 23 16:08:03 crc kubenswrapper[5050]: I1123 16:08:03.035173 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-config\") pod \"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd\" (UID: \"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd\") " Nov 23 16:08:03 crc kubenswrapper[5050]: I1123 16:08:03.035216 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-ovsdbserver-nb\") pod \"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd\" (UID: \"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd\") " Nov 23 16:08:03 crc kubenswrapper[5050]: I1123 16:08:03.035301 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2zldm\" (UniqueName: \"kubernetes.io/projected/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-kube-api-access-2zldm\") pod \"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd\" (UID: \"6087f6f0-60d6-4e97-bf79-c53bcb68c7cd\") " Nov 23 16:08:03 crc kubenswrapper[5050]: I1123 16:08:03.048690 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-kube-api-access-2zldm" (OuterVolumeSpecName: "kube-api-access-2zldm") pod "6087f6f0-60d6-4e97-bf79-c53bcb68c7cd" (UID: "6087f6f0-60d6-4e97-bf79-c53bcb68c7cd"). InnerVolumeSpecName "kube-api-access-2zldm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:08:03 crc kubenswrapper[5050]: I1123 16:08:03.082918 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6087f6f0-60d6-4e97-bf79-c53bcb68c7cd" (UID: "6087f6f0-60d6-4e97-bf79-c53bcb68c7cd"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:08:03 crc kubenswrapper[5050]: I1123 16:08:03.084763 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-config" (OuterVolumeSpecName: "config") pod "6087f6f0-60d6-4e97-bf79-c53bcb68c7cd" (UID: "6087f6f0-60d6-4e97-bf79-c53bcb68c7cd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:08:03 crc kubenswrapper[5050]: I1123 16:08:03.091255 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6087f6f0-60d6-4e97-bf79-c53bcb68c7cd" (UID: "6087f6f0-60d6-4e97-bf79-c53bcb68c7cd"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:08:03 crc kubenswrapper[5050]: I1123 16:08:03.104519 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6087f6f0-60d6-4e97-bf79-c53bcb68c7cd" (UID: "6087f6f0-60d6-4e97-bf79-c53bcb68c7cd"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:08:03 crc kubenswrapper[5050]: I1123 16:08:03.137382 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-config\") on node \"crc\" DevicePath \"\"" Nov 23 16:08:03 crc kubenswrapper[5050]: I1123 16:08:03.137433 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 23 16:08:03 crc kubenswrapper[5050]: I1123 16:08:03.137462 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2zldm\" (UniqueName: \"kubernetes.io/projected/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-kube-api-access-2zldm\") on node \"crc\" DevicePath \"\"" Nov 23 16:08:03 crc kubenswrapper[5050]: I1123 16:08:03.137478 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 23 16:08:03 crc kubenswrapper[5050]: I1123 16:08:03.137488 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 16:08:03 crc kubenswrapper[5050]: I1123 16:08:03.815789 5050 generic.go:334] "Generic (PLEG): container finished" podID="d0643b76-5863-43b7-b8a9-cdc50659bc95" containerID="4067dac9832390b9f4abb04f8b909d5a6b02f1da6f8ddb6c096247ef85e3ad20" exitCode=0 Nov 23 16:08:03 crc kubenswrapper[5050]: I1123 16:08:03.815872 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-bskzx" event={"ID":"d0643b76-5863-43b7-b8a9-cdc50659bc95","Type":"ContainerDied","Data":"4067dac9832390b9f4abb04f8b909d5a6b02f1da6f8ddb6c096247ef85e3ad20"} Nov 23 16:08:03 crc kubenswrapper[5050]: I1123 16:08:03.815913 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c9c679fc-b59c7" Nov 23 16:08:03 crc kubenswrapper[5050]: I1123 16:08:03.868950 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c9c679fc-b59c7"] Nov 23 16:08:03 crc kubenswrapper[5050]: I1123 16:08:03.876414 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7c9c679fc-b59c7"] Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.255381 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-bskzx" Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.295614 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-combined-ca-bundle\") pod \"d0643b76-5863-43b7-b8a9-cdc50659bc95\" (UID: \"d0643b76-5863-43b7-b8a9-cdc50659bc95\") " Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.295802 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8ngr4\" (UniqueName: \"kubernetes.io/projected/d0643b76-5863-43b7-b8a9-cdc50659bc95-kube-api-access-8ngr4\") pod \"d0643b76-5863-43b7-b8a9-cdc50659bc95\" (UID: \"d0643b76-5863-43b7-b8a9-cdc50659bc95\") " Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.295977 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-credential-keys\") pod \"d0643b76-5863-43b7-b8a9-cdc50659bc95\" (UID: \"d0643b76-5863-43b7-b8a9-cdc50659bc95\") " Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.296016 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-fernet-keys\") pod \"d0643b76-5863-43b7-b8a9-cdc50659bc95\" (UID: \"d0643b76-5863-43b7-b8a9-cdc50659bc95\") " Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.296064 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-scripts\") pod \"d0643b76-5863-43b7-b8a9-cdc50659bc95\" (UID: \"d0643b76-5863-43b7-b8a9-cdc50659bc95\") " Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.296145 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-config-data\") pod \"d0643b76-5863-43b7-b8a9-cdc50659bc95\" (UID: \"d0643b76-5863-43b7-b8a9-cdc50659bc95\") " Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.313706 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "d0643b76-5863-43b7-b8a9-cdc50659bc95" (UID: "d0643b76-5863-43b7-b8a9-cdc50659bc95"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.316145 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-scripts" (OuterVolumeSpecName: "scripts") pod "d0643b76-5863-43b7-b8a9-cdc50659bc95" (UID: "d0643b76-5863-43b7-b8a9-cdc50659bc95"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.319550 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0643b76-5863-43b7-b8a9-cdc50659bc95-kube-api-access-8ngr4" (OuterVolumeSpecName: "kube-api-access-8ngr4") pod "d0643b76-5863-43b7-b8a9-cdc50659bc95" (UID: "d0643b76-5863-43b7-b8a9-cdc50659bc95"). InnerVolumeSpecName "kube-api-access-8ngr4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.332517 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "d0643b76-5863-43b7-b8a9-cdc50659bc95" (UID: "d0643b76-5863-43b7-b8a9-cdc50659bc95"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.344416 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d0643b76-5863-43b7-b8a9-cdc50659bc95" (UID: "d0643b76-5863-43b7-b8a9-cdc50659bc95"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.359053 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-config-data" (OuterVolumeSpecName: "config-data") pod "d0643b76-5863-43b7-b8a9-cdc50659bc95" (UID: "d0643b76-5863-43b7-b8a9-cdc50659bc95"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.398521 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.399037 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8ngr4\" (UniqueName: \"kubernetes.io/projected/d0643b76-5863-43b7-b8a9-cdc50659bc95-kube-api-access-8ngr4\") on node \"crc\" DevicePath \"\"" Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.399053 5050 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.399066 5050 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.399077 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.399769 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0643b76-5863-43b7-b8a9-cdc50659bc95-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.569799 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6087f6f0-60d6-4e97-bf79-c53bcb68c7cd" path="/var/lib/kubelet/pods/6087f6f0-60d6-4e97-bf79-c53bcb68c7cd/volumes" Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.843671 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-bskzx" event={"ID":"d0643b76-5863-43b7-b8a9-cdc50659bc95","Type":"ContainerDied","Data":"5459eb05dc798f6ef5ce70af24320b3078cd588c7628958ff2d2b0deb4031944"} Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.843759 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-bskzx" Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.843764 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5459eb05dc798f6ef5ce70af24320b3078cd588c7628958ff2d2b0deb4031944" Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.986667 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-79794bd4b-bfchw"] Nov 23 16:08:05 crc kubenswrapper[5050]: E1123 16:08:05.987128 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0643b76-5863-43b7-b8a9-cdc50659bc95" containerName="keystone-bootstrap" Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.987152 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0643b76-5863-43b7-b8a9-cdc50659bc95" containerName="keystone-bootstrap" Nov 23 16:08:05 crc kubenswrapper[5050]: E1123 16:08:05.987172 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6087f6f0-60d6-4e97-bf79-c53bcb68c7cd" containerName="init" Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.987180 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="6087f6f0-60d6-4e97-bf79-c53bcb68c7cd" containerName="init" Nov 23 16:08:05 crc kubenswrapper[5050]: E1123 16:08:05.987203 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6087f6f0-60d6-4e97-bf79-c53bcb68c7cd" containerName="dnsmasq-dns" Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.987213 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="6087f6f0-60d6-4e97-bf79-c53bcb68c7cd" containerName="dnsmasq-dns" Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.987404 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="6087f6f0-60d6-4e97-bf79-c53bcb68c7cd" containerName="dnsmasq-dns" Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.987429 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0643b76-5863-43b7-b8a9-cdc50659bc95" containerName="keystone-bootstrap" Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.988151 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-79794bd4b-bfchw" Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.994802 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.995089 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.995412 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 23 16:08:05 crc kubenswrapper[5050]: I1123 16:08:05.995657 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-dtsf9" Nov 23 16:08:06 crc kubenswrapper[5050]: I1123 16:08:06.016984 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6c8bf1f5-950d-4fa2-b42e-d43554319c11-credential-keys\") pod \"keystone-79794bd4b-bfchw\" (UID: \"6c8bf1f5-950d-4fa2-b42e-d43554319c11\") " pod="openstack/keystone-79794bd4b-bfchw" Nov 23 16:08:06 crc kubenswrapper[5050]: I1123 16:08:06.017163 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c8bf1f5-950d-4fa2-b42e-d43554319c11-scripts\") pod \"keystone-79794bd4b-bfchw\" (UID: \"6c8bf1f5-950d-4fa2-b42e-d43554319c11\") " pod="openstack/keystone-79794bd4b-bfchw" Nov 23 16:08:06 crc kubenswrapper[5050]: I1123 16:08:06.017199 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4fh8x\" (UniqueName: \"kubernetes.io/projected/6c8bf1f5-950d-4fa2-b42e-d43554319c11-kube-api-access-4fh8x\") pod \"keystone-79794bd4b-bfchw\" (UID: \"6c8bf1f5-950d-4fa2-b42e-d43554319c11\") " pod="openstack/keystone-79794bd4b-bfchw" Nov 23 16:08:06 crc kubenswrapper[5050]: I1123 16:08:06.017280 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c8bf1f5-950d-4fa2-b42e-d43554319c11-combined-ca-bundle\") pod \"keystone-79794bd4b-bfchw\" (UID: \"6c8bf1f5-950d-4fa2-b42e-d43554319c11\") " pod="openstack/keystone-79794bd4b-bfchw" Nov 23 16:08:06 crc kubenswrapper[5050]: I1123 16:08:06.017332 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6c8bf1f5-950d-4fa2-b42e-d43554319c11-fernet-keys\") pod \"keystone-79794bd4b-bfchw\" (UID: \"6c8bf1f5-950d-4fa2-b42e-d43554319c11\") " pod="openstack/keystone-79794bd4b-bfchw" Nov 23 16:08:06 crc kubenswrapper[5050]: I1123 16:08:06.017367 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c8bf1f5-950d-4fa2-b42e-d43554319c11-config-data\") pod \"keystone-79794bd4b-bfchw\" (UID: \"6c8bf1f5-950d-4fa2-b42e-d43554319c11\") " pod="openstack/keystone-79794bd4b-bfchw" Nov 23 16:08:06 crc kubenswrapper[5050]: I1123 16:08:06.030401 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-79794bd4b-bfchw"] Nov 23 16:08:06 crc kubenswrapper[5050]: I1123 16:08:06.122401 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6c8bf1f5-950d-4fa2-b42e-d43554319c11-credential-keys\") pod \"keystone-79794bd4b-bfchw\" (UID: \"6c8bf1f5-950d-4fa2-b42e-d43554319c11\") " pod="openstack/keystone-79794bd4b-bfchw" Nov 23 16:08:06 crc kubenswrapper[5050]: I1123 16:08:06.122528 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c8bf1f5-950d-4fa2-b42e-d43554319c11-scripts\") pod \"keystone-79794bd4b-bfchw\" (UID: \"6c8bf1f5-950d-4fa2-b42e-d43554319c11\") " pod="openstack/keystone-79794bd4b-bfchw" Nov 23 16:08:06 crc kubenswrapper[5050]: I1123 16:08:06.122555 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4fh8x\" (UniqueName: \"kubernetes.io/projected/6c8bf1f5-950d-4fa2-b42e-d43554319c11-kube-api-access-4fh8x\") pod \"keystone-79794bd4b-bfchw\" (UID: \"6c8bf1f5-950d-4fa2-b42e-d43554319c11\") " pod="openstack/keystone-79794bd4b-bfchw" Nov 23 16:08:06 crc kubenswrapper[5050]: I1123 16:08:06.122600 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c8bf1f5-950d-4fa2-b42e-d43554319c11-combined-ca-bundle\") pod \"keystone-79794bd4b-bfchw\" (UID: \"6c8bf1f5-950d-4fa2-b42e-d43554319c11\") " pod="openstack/keystone-79794bd4b-bfchw" Nov 23 16:08:06 crc kubenswrapper[5050]: I1123 16:08:06.122627 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6c8bf1f5-950d-4fa2-b42e-d43554319c11-fernet-keys\") pod \"keystone-79794bd4b-bfchw\" (UID: \"6c8bf1f5-950d-4fa2-b42e-d43554319c11\") " pod="openstack/keystone-79794bd4b-bfchw" Nov 23 16:08:06 crc kubenswrapper[5050]: I1123 16:08:06.122646 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c8bf1f5-950d-4fa2-b42e-d43554319c11-config-data\") pod \"keystone-79794bd4b-bfchw\" (UID: \"6c8bf1f5-950d-4fa2-b42e-d43554319c11\") " pod="openstack/keystone-79794bd4b-bfchw" Nov 23 16:08:06 crc kubenswrapper[5050]: I1123 16:08:06.132160 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c8bf1f5-950d-4fa2-b42e-d43554319c11-scripts\") pod \"keystone-79794bd4b-bfchw\" (UID: \"6c8bf1f5-950d-4fa2-b42e-d43554319c11\") " pod="openstack/keystone-79794bd4b-bfchw" Nov 23 16:08:06 crc kubenswrapper[5050]: I1123 16:08:06.135087 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6c8bf1f5-950d-4fa2-b42e-d43554319c11-credential-keys\") pod \"keystone-79794bd4b-bfchw\" (UID: \"6c8bf1f5-950d-4fa2-b42e-d43554319c11\") " pod="openstack/keystone-79794bd4b-bfchw" Nov 23 16:08:06 crc kubenswrapper[5050]: I1123 16:08:06.140135 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c8bf1f5-950d-4fa2-b42e-d43554319c11-combined-ca-bundle\") pod \"keystone-79794bd4b-bfchw\" (UID: \"6c8bf1f5-950d-4fa2-b42e-d43554319c11\") " pod="openstack/keystone-79794bd4b-bfchw" Nov 23 16:08:06 crc kubenswrapper[5050]: I1123 16:08:06.148169 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6c8bf1f5-950d-4fa2-b42e-d43554319c11-fernet-keys\") pod \"keystone-79794bd4b-bfchw\" (UID: \"6c8bf1f5-950d-4fa2-b42e-d43554319c11\") " pod="openstack/keystone-79794bd4b-bfchw" Nov 23 16:08:06 crc kubenswrapper[5050]: I1123 16:08:06.148754 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c8bf1f5-950d-4fa2-b42e-d43554319c11-config-data\") pod \"keystone-79794bd4b-bfchw\" (UID: \"6c8bf1f5-950d-4fa2-b42e-d43554319c11\") " pod="openstack/keystone-79794bd4b-bfchw" Nov 23 16:08:06 crc kubenswrapper[5050]: I1123 16:08:06.152917 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4fh8x\" (UniqueName: \"kubernetes.io/projected/6c8bf1f5-950d-4fa2-b42e-d43554319c11-kube-api-access-4fh8x\") pod \"keystone-79794bd4b-bfchw\" (UID: \"6c8bf1f5-950d-4fa2-b42e-d43554319c11\") " pod="openstack/keystone-79794bd4b-bfchw" Nov 23 16:08:06 crc kubenswrapper[5050]: I1123 16:08:06.334712 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-79794bd4b-bfchw" Nov 23 16:08:06 crc kubenswrapper[5050]: I1123 16:08:06.652072 5050 scope.go:117] "RemoveContainer" containerID="fa4b64dabe63914d29dc2700faef5fa3d4a39e94ec82e779cbf8c75ad42295b9" Nov 23 16:08:06 crc kubenswrapper[5050]: I1123 16:08:06.767867 5050 scope.go:117] "RemoveContainer" containerID="791b5dcb41c80301c9185f3bba166ea79d1275eb5f2a12921db851d4b437795a" Nov 23 16:08:06 crc kubenswrapper[5050]: I1123 16:08:06.788605 5050 scope.go:117] "RemoveContainer" containerID="2049c9b4f9b263662c1497d87a8bad29fdba1e8a3605a0926f00c8c3dc6d91ea" Nov 23 16:08:06 crc kubenswrapper[5050]: I1123 16:08:06.950533 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-79794bd4b-bfchw"] Nov 23 16:08:07 crc kubenswrapper[5050]: I1123 16:08:07.871872 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-79794bd4b-bfchw" event={"ID":"6c8bf1f5-950d-4fa2-b42e-d43554319c11","Type":"ContainerStarted","Data":"8b0dadfdb1413330bb672fbc84f26de335ecf314b9f65236fe9c59f1ec3a4004"} Nov 23 16:08:07 crc kubenswrapper[5050]: I1123 16:08:07.872596 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-79794bd4b-bfchw" event={"ID":"6c8bf1f5-950d-4fa2-b42e-d43554319c11","Type":"ContainerStarted","Data":"09ee0b7245a3b13a335c1e4164f38cc74afdacf3863fd188373971a01893cfea"} Nov 23 16:08:07 crc kubenswrapper[5050]: I1123 16:08:07.872637 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-79794bd4b-bfchw" Nov 23 16:08:07 crc kubenswrapper[5050]: I1123 16:08:07.903023 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-79794bd4b-bfchw" podStartSLOduration=2.902985666 podStartE2EDuration="2.902985666s" podCreationTimestamp="2025-11-23 16:08:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:08:07.890782072 +0000 UTC m=+5183.057778597" watchObservedRunningTime="2025-11-23 16:08:07.902985666 +0000 UTC m=+5183.069982191" Nov 23 16:08:13 crc kubenswrapper[5050]: I1123 16:08:13.554124 5050 scope.go:117] "RemoveContainer" containerID="3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" Nov 23 16:08:13 crc kubenswrapper[5050]: E1123 16:08:13.555290 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:08:25 crc kubenswrapper[5050]: I1123 16:08:25.561021 5050 scope.go:117] "RemoveContainer" containerID="3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" Nov 23 16:08:25 crc kubenswrapper[5050]: E1123 16:08:25.562586 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:08:28 crc kubenswrapper[5050]: I1123 16:08:28.830507 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-cpcmz"] Nov 23 16:08:28 crc kubenswrapper[5050]: I1123 16:08:28.840663 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cpcmz" Nov 23 16:08:28 crc kubenswrapper[5050]: I1123 16:08:28.848395 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cpcmz"] Nov 23 16:08:28 crc kubenswrapper[5050]: I1123 16:08:28.936080 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8bb5658-7303-4a68-b3d8-2b3cd25ad4de-catalog-content\") pod \"certified-operators-cpcmz\" (UID: \"d8bb5658-7303-4a68-b3d8-2b3cd25ad4de\") " pod="openshift-marketplace/certified-operators-cpcmz" Nov 23 16:08:28 crc kubenswrapper[5050]: I1123 16:08:28.936222 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-krcwg\" (UniqueName: \"kubernetes.io/projected/d8bb5658-7303-4a68-b3d8-2b3cd25ad4de-kube-api-access-krcwg\") pod \"certified-operators-cpcmz\" (UID: \"d8bb5658-7303-4a68-b3d8-2b3cd25ad4de\") " pod="openshift-marketplace/certified-operators-cpcmz" Nov 23 16:08:28 crc kubenswrapper[5050]: I1123 16:08:28.936503 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8bb5658-7303-4a68-b3d8-2b3cd25ad4de-utilities\") pod \"certified-operators-cpcmz\" (UID: \"d8bb5658-7303-4a68-b3d8-2b3cd25ad4de\") " pod="openshift-marketplace/certified-operators-cpcmz" Nov 23 16:08:29 crc kubenswrapper[5050]: I1123 16:08:29.039027 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8bb5658-7303-4a68-b3d8-2b3cd25ad4de-catalog-content\") pod \"certified-operators-cpcmz\" (UID: \"d8bb5658-7303-4a68-b3d8-2b3cd25ad4de\") " pod="openshift-marketplace/certified-operators-cpcmz" Nov 23 16:08:29 crc kubenswrapper[5050]: I1123 16:08:29.039506 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-krcwg\" (UniqueName: \"kubernetes.io/projected/d8bb5658-7303-4a68-b3d8-2b3cd25ad4de-kube-api-access-krcwg\") pod \"certified-operators-cpcmz\" (UID: \"d8bb5658-7303-4a68-b3d8-2b3cd25ad4de\") " pod="openshift-marketplace/certified-operators-cpcmz" Nov 23 16:08:29 crc kubenswrapper[5050]: I1123 16:08:29.039562 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8bb5658-7303-4a68-b3d8-2b3cd25ad4de-utilities\") pod \"certified-operators-cpcmz\" (UID: \"d8bb5658-7303-4a68-b3d8-2b3cd25ad4de\") " pod="openshift-marketplace/certified-operators-cpcmz" Nov 23 16:08:29 crc kubenswrapper[5050]: I1123 16:08:29.039693 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8bb5658-7303-4a68-b3d8-2b3cd25ad4de-catalog-content\") pod \"certified-operators-cpcmz\" (UID: \"d8bb5658-7303-4a68-b3d8-2b3cd25ad4de\") " pod="openshift-marketplace/certified-operators-cpcmz" Nov 23 16:08:29 crc kubenswrapper[5050]: I1123 16:08:29.039966 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8bb5658-7303-4a68-b3d8-2b3cd25ad4de-utilities\") pod \"certified-operators-cpcmz\" (UID: \"d8bb5658-7303-4a68-b3d8-2b3cd25ad4de\") " pod="openshift-marketplace/certified-operators-cpcmz" Nov 23 16:08:29 crc kubenswrapper[5050]: I1123 16:08:29.070036 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-krcwg\" (UniqueName: \"kubernetes.io/projected/d8bb5658-7303-4a68-b3d8-2b3cd25ad4de-kube-api-access-krcwg\") pod \"certified-operators-cpcmz\" (UID: \"d8bb5658-7303-4a68-b3d8-2b3cd25ad4de\") " pod="openshift-marketplace/certified-operators-cpcmz" Nov 23 16:08:29 crc kubenswrapper[5050]: I1123 16:08:29.177390 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cpcmz" Nov 23 16:08:29 crc kubenswrapper[5050]: I1123 16:08:29.748229 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cpcmz"] Nov 23 16:08:30 crc kubenswrapper[5050]: I1123 16:08:30.156682 5050 generic.go:334] "Generic (PLEG): container finished" podID="d8bb5658-7303-4a68-b3d8-2b3cd25ad4de" containerID="293da16e90a0a5c7f80d8cbc820ec68ff2222df54aa06c9e5366c7f71dc4df48" exitCode=0 Nov 23 16:08:30 crc kubenswrapper[5050]: I1123 16:08:30.156989 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cpcmz" event={"ID":"d8bb5658-7303-4a68-b3d8-2b3cd25ad4de","Type":"ContainerDied","Data":"293da16e90a0a5c7f80d8cbc820ec68ff2222df54aa06c9e5366c7f71dc4df48"} Nov 23 16:08:30 crc kubenswrapper[5050]: I1123 16:08:30.157325 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cpcmz" event={"ID":"d8bb5658-7303-4a68-b3d8-2b3cd25ad4de","Type":"ContainerStarted","Data":"05e6cfae475119e8e2b470aad385522a351a3c473d2be853491f1cb96113eae3"} Nov 23 16:08:31 crc kubenswrapper[5050]: I1123 16:08:31.169028 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cpcmz" event={"ID":"d8bb5658-7303-4a68-b3d8-2b3cd25ad4de","Type":"ContainerStarted","Data":"760bbf16e9e24185c01b8a6eed36d6f472c18c83de9a3b1860a261b34d12ec29"} Nov 23 16:08:32 crc kubenswrapper[5050]: I1123 16:08:32.188397 5050 generic.go:334] "Generic (PLEG): container finished" podID="d8bb5658-7303-4a68-b3d8-2b3cd25ad4de" containerID="760bbf16e9e24185c01b8a6eed36d6f472c18c83de9a3b1860a261b34d12ec29" exitCode=0 Nov 23 16:08:32 crc kubenswrapper[5050]: I1123 16:08:32.188577 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cpcmz" event={"ID":"d8bb5658-7303-4a68-b3d8-2b3cd25ad4de","Type":"ContainerDied","Data":"760bbf16e9e24185c01b8a6eed36d6f472c18c83de9a3b1860a261b34d12ec29"} Nov 23 16:08:33 crc kubenswrapper[5050]: I1123 16:08:33.203702 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cpcmz" event={"ID":"d8bb5658-7303-4a68-b3d8-2b3cd25ad4de","Type":"ContainerStarted","Data":"58e06c6127443d11c5216fd4dbc490a5c131583871551626922a04d6d9fa703c"} Nov 23 16:08:33 crc kubenswrapper[5050]: I1123 16:08:33.232025 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-cpcmz" podStartSLOduration=2.779709606 podStartE2EDuration="5.231945792s" podCreationTimestamp="2025-11-23 16:08:28 +0000 UTC" firstStartedPulling="2025-11-23 16:08:30.160012332 +0000 UTC m=+5205.327008817" lastFinishedPulling="2025-11-23 16:08:32.612248478 +0000 UTC m=+5207.779245003" observedRunningTime="2025-11-23 16:08:33.223292048 +0000 UTC m=+5208.390288533" watchObservedRunningTime="2025-11-23 16:08:33.231945792 +0000 UTC m=+5208.398942317" Nov 23 16:08:37 crc kubenswrapper[5050]: I1123 16:08:37.892859 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-79794bd4b-bfchw" Nov 23 16:08:38 crc kubenswrapper[5050]: I1123 16:08:38.551263 5050 scope.go:117] "RemoveContainer" containerID="3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" Nov 23 16:08:39 crc kubenswrapper[5050]: I1123 16:08:39.177851 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-cpcmz" Nov 23 16:08:39 crc kubenswrapper[5050]: I1123 16:08:39.178056 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-cpcmz" Nov 23 16:08:39 crc kubenswrapper[5050]: I1123 16:08:39.260150 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-cpcmz" Nov 23 16:08:39 crc kubenswrapper[5050]: I1123 16:08:39.326313 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-cpcmz" Nov 23 16:08:39 crc kubenswrapper[5050]: I1123 16:08:39.528886 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cpcmz"] Nov 23 16:08:40 crc kubenswrapper[5050]: I1123 16:08:40.280294 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"b87f9772414b4630105844191caea96d4b6e03191eb9f84073781caf3da21f1e"} Nov 23 16:08:40 crc kubenswrapper[5050]: I1123 16:08:40.926437 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 23 16:08:40 crc kubenswrapper[5050]: I1123 16:08:40.929089 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 23 16:08:40 crc kubenswrapper[5050]: I1123 16:08:40.931767 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-4qzz2" Nov 23 16:08:40 crc kubenswrapper[5050]: I1123 16:08:40.932357 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 23 16:08:40 crc kubenswrapper[5050]: I1123 16:08:40.932612 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 23 16:08:40 crc kubenswrapper[5050]: I1123 16:08:40.948669 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 23 16:08:41 crc kubenswrapper[5050]: I1123 16:08:41.103801 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/43f64101-e0e6-46aa-befc-e06a3d420fb0-openstack-config-secret\") pod \"openstackclient\" (UID: \"43f64101-e0e6-46aa-befc-e06a3d420fb0\") " pod="openstack/openstackclient" Nov 23 16:08:41 crc kubenswrapper[5050]: I1123 16:08:41.103991 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2c5l\" (UniqueName: \"kubernetes.io/projected/43f64101-e0e6-46aa-befc-e06a3d420fb0-kube-api-access-v2c5l\") pod \"openstackclient\" (UID: \"43f64101-e0e6-46aa-befc-e06a3d420fb0\") " pod="openstack/openstackclient" Nov 23 16:08:41 crc kubenswrapper[5050]: I1123 16:08:41.104299 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/43f64101-e0e6-46aa-befc-e06a3d420fb0-openstack-config\") pod \"openstackclient\" (UID: \"43f64101-e0e6-46aa-befc-e06a3d420fb0\") " pod="openstack/openstackclient" Nov 23 16:08:41 crc kubenswrapper[5050]: I1123 16:08:41.206215 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/43f64101-e0e6-46aa-befc-e06a3d420fb0-openstack-config\") pod \"openstackclient\" (UID: \"43f64101-e0e6-46aa-befc-e06a3d420fb0\") " pod="openstack/openstackclient" Nov 23 16:08:41 crc kubenswrapper[5050]: I1123 16:08:41.206364 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/43f64101-e0e6-46aa-befc-e06a3d420fb0-openstack-config-secret\") pod \"openstackclient\" (UID: \"43f64101-e0e6-46aa-befc-e06a3d420fb0\") " pod="openstack/openstackclient" Nov 23 16:08:41 crc kubenswrapper[5050]: I1123 16:08:41.206470 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2c5l\" (UniqueName: \"kubernetes.io/projected/43f64101-e0e6-46aa-befc-e06a3d420fb0-kube-api-access-v2c5l\") pod \"openstackclient\" (UID: \"43f64101-e0e6-46aa-befc-e06a3d420fb0\") " pod="openstack/openstackclient" Nov 23 16:08:41 crc kubenswrapper[5050]: I1123 16:08:41.208832 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/43f64101-e0e6-46aa-befc-e06a3d420fb0-openstack-config\") pod \"openstackclient\" (UID: \"43f64101-e0e6-46aa-befc-e06a3d420fb0\") " pod="openstack/openstackclient" Nov 23 16:08:41 crc kubenswrapper[5050]: I1123 16:08:41.266804 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/43f64101-e0e6-46aa-befc-e06a3d420fb0-openstack-config-secret\") pod \"openstackclient\" (UID: \"43f64101-e0e6-46aa-befc-e06a3d420fb0\") " pod="openstack/openstackclient" Nov 23 16:08:41 crc kubenswrapper[5050]: I1123 16:08:41.266881 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2c5l\" (UniqueName: \"kubernetes.io/projected/43f64101-e0e6-46aa-befc-e06a3d420fb0-kube-api-access-v2c5l\") pod \"openstackclient\" (UID: \"43f64101-e0e6-46aa-befc-e06a3d420fb0\") " pod="openstack/openstackclient" Nov 23 16:08:41 crc kubenswrapper[5050]: I1123 16:08:41.294368 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-cpcmz" podUID="d8bb5658-7303-4a68-b3d8-2b3cd25ad4de" containerName="registry-server" containerID="cri-o://58e06c6127443d11c5216fd4dbc490a5c131583871551626922a04d6d9fa703c" gracePeriod=2 Nov 23 16:08:41 crc kubenswrapper[5050]: I1123 16:08:41.560626 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 23 16:08:41 crc kubenswrapper[5050]: I1123 16:08:41.704601 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cpcmz" Nov 23 16:08:41 crc kubenswrapper[5050]: I1123 16:08:41.819063 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-krcwg\" (UniqueName: \"kubernetes.io/projected/d8bb5658-7303-4a68-b3d8-2b3cd25ad4de-kube-api-access-krcwg\") pod \"d8bb5658-7303-4a68-b3d8-2b3cd25ad4de\" (UID: \"d8bb5658-7303-4a68-b3d8-2b3cd25ad4de\") " Nov 23 16:08:41 crc kubenswrapper[5050]: I1123 16:08:41.819245 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8bb5658-7303-4a68-b3d8-2b3cd25ad4de-catalog-content\") pod \"d8bb5658-7303-4a68-b3d8-2b3cd25ad4de\" (UID: \"d8bb5658-7303-4a68-b3d8-2b3cd25ad4de\") " Nov 23 16:08:41 crc kubenswrapper[5050]: I1123 16:08:41.819573 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8bb5658-7303-4a68-b3d8-2b3cd25ad4de-utilities\") pod \"d8bb5658-7303-4a68-b3d8-2b3cd25ad4de\" (UID: \"d8bb5658-7303-4a68-b3d8-2b3cd25ad4de\") " Nov 23 16:08:41 crc kubenswrapper[5050]: I1123 16:08:41.820885 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8bb5658-7303-4a68-b3d8-2b3cd25ad4de-utilities" (OuterVolumeSpecName: "utilities") pod "d8bb5658-7303-4a68-b3d8-2b3cd25ad4de" (UID: "d8bb5658-7303-4a68-b3d8-2b3cd25ad4de"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:08:41 crc kubenswrapper[5050]: I1123 16:08:41.827190 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8bb5658-7303-4a68-b3d8-2b3cd25ad4de-kube-api-access-krcwg" (OuterVolumeSpecName: "kube-api-access-krcwg") pod "d8bb5658-7303-4a68-b3d8-2b3cd25ad4de" (UID: "d8bb5658-7303-4a68-b3d8-2b3cd25ad4de"). InnerVolumeSpecName "kube-api-access-krcwg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:08:41 crc kubenswrapper[5050]: I1123 16:08:41.875078 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8bb5658-7303-4a68-b3d8-2b3cd25ad4de-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d8bb5658-7303-4a68-b3d8-2b3cd25ad4de" (UID: "d8bb5658-7303-4a68-b3d8-2b3cd25ad4de"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:08:41 crc kubenswrapper[5050]: I1123 16:08:41.922670 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8bb5658-7303-4a68-b3d8-2b3cd25ad4de-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 16:08:41 crc kubenswrapper[5050]: I1123 16:08:41.922774 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8bb5658-7303-4a68-b3d8-2b3cd25ad4de-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 16:08:41 crc kubenswrapper[5050]: I1123 16:08:41.922839 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-krcwg\" (UniqueName: \"kubernetes.io/projected/d8bb5658-7303-4a68-b3d8-2b3cd25ad4de-kube-api-access-krcwg\") on node \"crc\" DevicePath \"\"" Nov 23 16:08:42 crc kubenswrapper[5050]: I1123 16:08:42.055955 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 23 16:08:42 crc kubenswrapper[5050]: W1123 16:08:42.065565 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod43f64101_e0e6_46aa_befc_e06a3d420fb0.slice/crio-e3e60acda8b43d1902dd32c7febb38e2d5836ff10a21b178355223d1ffabf48a WatchSource:0}: Error finding container e3e60acda8b43d1902dd32c7febb38e2d5836ff10a21b178355223d1ffabf48a: Status 404 returned error can't find the container with id e3e60acda8b43d1902dd32c7febb38e2d5836ff10a21b178355223d1ffabf48a Nov 23 16:08:42 crc kubenswrapper[5050]: I1123 16:08:42.308779 5050 generic.go:334] "Generic (PLEG): container finished" podID="d8bb5658-7303-4a68-b3d8-2b3cd25ad4de" containerID="58e06c6127443d11c5216fd4dbc490a5c131583871551626922a04d6d9fa703c" exitCode=0 Nov 23 16:08:42 crc kubenswrapper[5050]: I1123 16:08:42.308929 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cpcmz" Nov 23 16:08:42 crc kubenswrapper[5050]: I1123 16:08:42.308851 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cpcmz" event={"ID":"d8bb5658-7303-4a68-b3d8-2b3cd25ad4de","Type":"ContainerDied","Data":"58e06c6127443d11c5216fd4dbc490a5c131583871551626922a04d6d9fa703c"} Nov 23 16:08:42 crc kubenswrapper[5050]: I1123 16:08:42.309251 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cpcmz" event={"ID":"d8bb5658-7303-4a68-b3d8-2b3cd25ad4de","Type":"ContainerDied","Data":"05e6cfae475119e8e2b470aad385522a351a3c473d2be853491f1cb96113eae3"} Nov 23 16:08:42 crc kubenswrapper[5050]: I1123 16:08:42.309307 5050 scope.go:117] "RemoveContainer" containerID="58e06c6127443d11c5216fd4dbc490a5c131583871551626922a04d6d9fa703c" Nov 23 16:08:42 crc kubenswrapper[5050]: I1123 16:08:42.311682 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"43f64101-e0e6-46aa-befc-e06a3d420fb0","Type":"ContainerStarted","Data":"eaa940447e814c1c972a68b877469c7877979b581be817e1b9484fcd434c987a"} Nov 23 16:08:42 crc kubenswrapper[5050]: I1123 16:08:42.311713 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"43f64101-e0e6-46aa-befc-e06a3d420fb0","Type":"ContainerStarted","Data":"e3e60acda8b43d1902dd32c7febb38e2d5836ff10a21b178355223d1ffabf48a"} Nov 23 16:08:42 crc kubenswrapper[5050]: I1123 16:08:42.344484 5050 scope.go:117] "RemoveContainer" containerID="760bbf16e9e24185c01b8a6eed36d6f472c18c83de9a3b1860a261b34d12ec29" Nov 23 16:08:42 crc kubenswrapper[5050]: I1123 16:08:42.349807 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.349781128 podStartE2EDuration="2.349781128s" podCreationTimestamp="2025-11-23 16:08:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:08:42.343666325 +0000 UTC m=+5217.510662810" watchObservedRunningTime="2025-11-23 16:08:42.349781128 +0000 UTC m=+5217.516777633" Nov 23 16:08:42 crc kubenswrapper[5050]: I1123 16:08:42.369199 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cpcmz"] Nov 23 16:08:42 crc kubenswrapper[5050]: I1123 16:08:42.371217 5050 scope.go:117] "RemoveContainer" containerID="293da16e90a0a5c7f80d8cbc820ec68ff2222df54aa06c9e5366c7f71dc4df48" Nov 23 16:08:42 crc kubenswrapper[5050]: I1123 16:08:42.387953 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-cpcmz"] Nov 23 16:08:42 crc kubenswrapper[5050]: I1123 16:08:42.414951 5050 scope.go:117] "RemoveContainer" containerID="58e06c6127443d11c5216fd4dbc490a5c131583871551626922a04d6d9fa703c" Nov 23 16:08:42 crc kubenswrapper[5050]: E1123 16:08:42.415707 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"58e06c6127443d11c5216fd4dbc490a5c131583871551626922a04d6d9fa703c\": container with ID starting with 58e06c6127443d11c5216fd4dbc490a5c131583871551626922a04d6d9fa703c not found: ID does not exist" containerID="58e06c6127443d11c5216fd4dbc490a5c131583871551626922a04d6d9fa703c" Nov 23 16:08:42 crc kubenswrapper[5050]: I1123 16:08:42.415778 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58e06c6127443d11c5216fd4dbc490a5c131583871551626922a04d6d9fa703c"} err="failed to get container status \"58e06c6127443d11c5216fd4dbc490a5c131583871551626922a04d6d9fa703c\": rpc error: code = NotFound desc = could not find container \"58e06c6127443d11c5216fd4dbc490a5c131583871551626922a04d6d9fa703c\": container with ID starting with 58e06c6127443d11c5216fd4dbc490a5c131583871551626922a04d6d9fa703c not found: ID does not exist" Nov 23 16:08:42 crc kubenswrapper[5050]: I1123 16:08:42.415829 5050 scope.go:117] "RemoveContainer" containerID="760bbf16e9e24185c01b8a6eed36d6f472c18c83de9a3b1860a261b34d12ec29" Nov 23 16:08:42 crc kubenswrapper[5050]: E1123 16:08:42.416376 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"760bbf16e9e24185c01b8a6eed36d6f472c18c83de9a3b1860a261b34d12ec29\": container with ID starting with 760bbf16e9e24185c01b8a6eed36d6f472c18c83de9a3b1860a261b34d12ec29 not found: ID does not exist" containerID="760bbf16e9e24185c01b8a6eed36d6f472c18c83de9a3b1860a261b34d12ec29" Nov 23 16:08:42 crc kubenswrapper[5050]: I1123 16:08:42.416425 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"760bbf16e9e24185c01b8a6eed36d6f472c18c83de9a3b1860a261b34d12ec29"} err="failed to get container status \"760bbf16e9e24185c01b8a6eed36d6f472c18c83de9a3b1860a261b34d12ec29\": rpc error: code = NotFound desc = could not find container \"760bbf16e9e24185c01b8a6eed36d6f472c18c83de9a3b1860a261b34d12ec29\": container with ID starting with 760bbf16e9e24185c01b8a6eed36d6f472c18c83de9a3b1860a261b34d12ec29 not found: ID does not exist" Nov 23 16:08:42 crc kubenswrapper[5050]: I1123 16:08:42.416526 5050 scope.go:117] "RemoveContainer" containerID="293da16e90a0a5c7f80d8cbc820ec68ff2222df54aa06c9e5366c7f71dc4df48" Nov 23 16:08:42 crc kubenswrapper[5050]: E1123 16:08:42.416984 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"293da16e90a0a5c7f80d8cbc820ec68ff2222df54aa06c9e5366c7f71dc4df48\": container with ID starting with 293da16e90a0a5c7f80d8cbc820ec68ff2222df54aa06c9e5366c7f71dc4df48 not found: ID does not exist" containerID="293da16e90a0a5c7f80d8cbc820ec68ff2222df54aa06c9e5366c7f71dc4df48" Nov 23 16:08:42 crc kubenswrapper[5050]: I1123 16:08:42.417037 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"293da16e90a0a5c7f80d8cbc820ec68ff2222df54aa06c9e5366c7f71dc4df48"} err="failed to get container status \"293da16e90a0a5c7f80d8cbc820ec68ff2222df54aa06c9e5366c7f71dc4df48\": rpc error: code = NotFound desc = could not find container \"293da16e90a0a5c7f80d8cbc820ec68ff2222df54aa06c9e5366c7f71dc4df48\": container with ID starting with 293da16e90a0a5c7f80d8cbc820ec68ff2222df54aa06c9e5366c7f71dc4df48 not found: ID does not exist" Nov 23 16:08:43 crc kubenswrapper[5050]: I1123 16:08:43.565800 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8bb5658-7303-4a68-b3d8-2b3cd25ad4de" path="/var/lib/kubelet/pods/d8bb5658-7303-4a68-b3d8-2b3cd25ad4de/volumes" Nov 23 16:09:07 crc kubenswrapper[5050]: I1123 16:09:07.936529 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-q4z2r"] Nov 23 16:09:07 crc kubenswrapper[5050]: E1123 16:09:07.937761 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8bb5658-7303-4a68-b3d8-2b3cd25ad4de" containerName="registry-server" Nov 23 16:09:07 crc kubenswrapper[5050]: I1123 16:09:07.937779 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8bb5658-7303-4a68-b3d8-2b3cd25ad4de" containerName="registry-server" Nov 23 16:09:07 crc kubenswrapper[5050]: E1123 16:09:07.937803 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8bb5658-7303-4a68-b3d8-2b3cd25ad4de" containerName="extract-utilities" Nov 23 16:09:07 crc kubenswrapper[5050]: I1123 16:09:07.937809 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8bb5658-7303-4a68-b3d8-2b3cd25ad4de" containerName="extract-utilities" Nov 23 16:09:07 crc kubenswrapper[5050]: E1123 16:09:07.937819 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8bb5658-7303-4a68-b3d8-2b3cd25ad4de" containerName="extract-content" Nov 23 16:09:07 crc kubenswrapper[5050]: I1123 16:09:07.937825 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8bb5658-7303-4a68-b3d8-2b3cd25ad4de" containerName="extract-content" Nov 23 16:09:07 crc kubenswrapper[5050]: I1123 16:09:07.938001 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8bb5658-7303-4a68-b3d8-2b3cd25ad4de" containerName="registry-server" Nov 23 16:09:07 crc kubenswrapper[5050]: I1123 16:09:07.939365 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q4z2r" Nov 23 16:09:07 crc kubenswrapper[5050]: I1123 16:09:07.948829 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-q4z2r"] Nov 23 16:09:08 crc kubenswrapper[5050]: I1123 16:09:08.089959 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-scccq\" (UniqueName: \"kubernetes.io/projected/83e06022-fb23-4d95-8a79-9feacb515ec8-kube-api-access-scccq\") pod \"redhat-operators-q4z2r\" (UID: \"83e06022-fb23-4d95-8a79-9feacb515ec8\") " pod="openshift-marketplace/redhat-operators-q4z2r" Nov 23 16:09:08 crc kubenswrapper[5050]: I1123 16:09:08.090088 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83e06022-fb23-4d95-8a79-9feacb515ec8-utilities\") pod \"redhat-operators-q4z2r\" (UID: \"83e06022-fb23-4d95-8a79-9feacb515ec8\") " pod="openshift-marketplace/redhat-operators-q4z2r" Nov 23 16:09:08 crc kubenswrapper[5050]: I1123 16:09:08.090123 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83e06022-fb23-4d95-8a79-9feacb515ec8-catalog-content\") pod \"redhat-operators-q4z2r\" (UID: \"83e06022-fb23-4d95-8a79-9feacb515ec8\") " pod="openshift-marketplace/redhat-operators-q4z2r" Nov 23 16:09:08 crc kubenswrapper[5050]: I1123 16:09:08.192152 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-scccq\" (UniqueName: \"kubernetes.io/projected/83e06022-fb23-4d95-8a79-9feacb515ec8-kube-api-access-scccq\") pod \"redhat-operators-q4z2r\" (UID: \"83e06022-fb23-4d95-8a79-9feacb515ec8\") " pod="openshift-marketplace/redhat-operators-q4z2r" Nov 23 16:09:08 crc kubenswrapper[5050]: I1123 16:09:08.192709 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83e06022-fb23-4d95-8a79-9feacb515ec8-utilities\") pod \"redhat-operators-q4z2r\" (UID: \"83e06022-fb23-4d95-8a79-9feacb515ec8\") " pod="openshift-marketplace/redhat-operators-q4z2r" Nov 23 16:09:08 crc kubenswrapper[5050]: I1123 16:09:08.192738 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83e06022-fb23-4d95-8a79-9feacb515ec8-catalog-content\") pod \"redhat-operators-q4z2r\" (UID: \"83e06022-fb23-4d95-8a79-9feacb515ec8\") " pod="openshift-marketplace/redhat-operators-q4z2r" Nov 23 16:09:08 crc kubenswrapper[5050]: I1123 16:09:08.193281 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83e06022-fb23-4d95-8a79-9feacb515ec8-catalog-content\") pod \"redhat-operators-q4z2r\" (UID: \"83e06022-fb23-4d95-8a79-9feacb515ec8\") " pod="openshift-marketplace/redhat-operators-q4z2r" Nov 23 16:09:08 crc kubenswrapper[5050]: I1123 16:09:08.193365 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83e06022-fb23-4d95-8a79-9feacb515ec8-utilities\") pod \"redhat-operators-q4z2r\" (UID: \"83e06022-fb23-4d95-8a79-9feacb515ec8\") " pod="openshift-marketplace/redhat-operators-q4z2r" Nov 23 16:09:08 crc kubenswrapper[5050]: I1123 16:09:08.215593 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-scccq\" (UniqueName: \"kubernetes.io/projected/83e06022-fb23-4d95-8a79-9feacb515ec8-kube-api-access-scccq\") pod \"redhat-operators-q4z2r\" (UID: \"83e06022-fb23-4d95-8a79-9feacb515ec8\") " pod="openshift-marketplace/redhat-operators-q4z2r" Nov 23 16:09:08 crc kubenswrapper[5050]: I1123 16:09:08.274487 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q4z2r" Nov 23 16:09:08 crc kubenswrapper[5050]: I1123 16:09:08.815972 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-q4z2r"] Nov 23 16:09:09 crc kubenswrapper[5050]: I1123 16:09:09.640422 5050 generic.go:334] "Generic (PLEG): container finished" podID="83e06022-fb23-4d95-8a79-9feacb515ec8" containerID="99a55b1738a4944688fc196781ec733d4aee8edfbbc4cd0d9110e8f3d0f1e0be" exitCode=0 Nov 23 16:09:09 crc kubenswrapper[5050]: I1123 16:09:09.640498 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q4z2r" event={"ID":"83e06022-fb23-4d95-8a79-9feacb515ec8","Type":"ContainerDied","Data":"99a55b1738a4944688fc196781ec733d4aee8edfbbc4cd0d9110e8f3d0f1e0be"} Nov 23 16:09:09 crc kubenswrapper[5050]: I1123 16:09:09.644539 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q4z2r" event={"ID":"83e06022-fb23-4d95-8a79-9feacb515ec8","Type":"ContainerStarted","Data":"ba445182208603e800c9dca1f0fe2c5d6f352d518dd55f472566fd2ee7ddeef9"} Nov 23 16:09:10 crc kubenswrapper[5050]: I1123 16:09:10.132529 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-kvx4h"] Nov 23 16:09:10 crc kubenswrapper[5050]: I1123 16:09:10.134311 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kvx4h" Nov 23 16:09:10 crc kubenswrapper[5050]: I1123 16:09:10.142135 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kvx4h"] Nov 23 16:09:10 crc kubenswrapper[5050]: I1123 16:09:10.241094 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fv4b6\" (UniqueName: \"kubernetes.io/projected/e2d2c8fe-8777-4f9c-8e42-32be7614f595-kube-api-access-fv4b6\") pod \"community-operators-kvx4h\" (UID: \"e2d2c8fe-8777-4f9c-8e42-32be7614f595\") " pod="openshift-marketplace/community-operators-kvx4h" Nov 23 16:09:10 crc kubenswrapper[5050]: I1123 16:09:10.241585 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2d2c8fe-8777-4f9c-8e42-32be7614f595-utilities\") pod \"community-operators-kvx4h\" (UID: \"e2d2c8fe-8777-4f9c-8e42-32be7614f595\") " pod="openshift-marketplace/community-operators-kvx4h" Nov 23 16:09:10 crc kubenswrapper[5050]: I1123 16:09:10.241664 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2d2c8fe-8777-4f9c-8e42-32be7614f595-catalog-content\") pod \"community-operators-kvx4h\" (UID: \"e2d2c8fe-8777-4f9c-8e42-32be7614f595\") " pod="openshift-marketplace/community-operators-kvx4h" Nov 23 16:09:10 crc kubenswrapper[5050]: I1123 16:09:10.343681 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fv4b6\" (UniqueName: \"kubernetes.io/projected/e2d2c8fe-8777-4f9c-8e42-32be7614f595-kube-api-access-fv4b6\") pod \"community-operators-kvx4h\" (UID: \"e2d2c8fe-8777-4f9c-8e42-32be7614f595\") " pod="openshift-marketplace/community-operators-kvx4h" Nov 23 16:09:10 crc kubenswrapper[5050]: I1123 16:09:10.343821 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2d2c8fe-8777-4f9c-8e42-32be7614f595-utilities\") pod \"community-operators-kvx4h\" (UID: \"e2d2c8fe-8777-4f9c-8e42-32be7614f595\") " pod="openshift-marketplace/community-operators-kvx4h" Nov 23 16:09:10 crc kubenswrapper[5050]: I1123 16:09:10.343867 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2d2c8fe-8777-4f9c-8e42-32be7614f595-catalog-content\") pod \"community-operators-kvx4h\" (UID: \"e2d2c8fe-8777-4f9c-8e42-32be7614f595\") " pod="openshift-marketplace/community-operators-kvx4h" Nov 23 16:09:10 crc kubenswrapper[5050]: I1123 16:09:10.344769 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2d2c8fe-8777-4f9c-8e42-32be7614f595-catalog-content\") pod \"community-operators-kvx4h\" (UID: \"e2d2c8fe-8777-4f9c-8e42-32be7614f595\") " pod="openshift-marketplace/community-operators-kvx4h" Nov 23 16:09:10 crc kubenswrapper[5050]: I1123 16:09:10.347297 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2d2c8fe-8777-4f9c-8e42-32be7614f595-utilities\") pod \"community-operators-kvx4h\" (UID: \"e2d2c8fe-8777-4f9c-8e42-32be7614f595\") " pod="openshift-marketplace/community-operators-kvx4h" Nov 23 16:09:10 crc kubenswrapper[5050]: I1123 16:09:10.371991 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fv4b6\" (UniqueName: \"kubernetes.io/projected/e2d2c8fe-8777-4f9c-8e42-32be7614f595-kube-api-access-fv4b6\") pod \"community-operators-kvx4h\" (UID: \"e2d2c8fe-8777-4f9c-8e42-32be7614f595\") " pod="openshift-marketplace/community-operators-kvx4h" Nov 23 16:09:10 crc kubenswrapper[5050]: I1123 16:09:10.472178 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kvx4h" Nov 23 16:09:10 crc kubenswrapper[5050]: I1123 16:09:10.671060 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q4z2r" event={"ID":"83e06022-fb23-4d95-8a79-9feacb515ec8","Type":"ContainerStarted","Data":"30ca41d2c59c1415741e89ddbb16f0b535d1be80272136ca19488a0bb8adb846"} Nov 23 16:09:11 crc kubenswrapper[5050]: I1123 16:09:11.075592 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kvx4h"] Nov 23 16:09:11 crc kubenswrapper[5050]: I1123 16:09:11.696522 5050 generic.go:334] "Generic (PLEG): container finished" podID="83e06022-fb23-4d95-8a79-9feacb515ec8" containerID="30ca41d2c59c1415741e89ddbb16f0b535d1be80272136ca19488a0bb8adb846" exitCode=0 Nov 23 16:09:11 crc kubenswrapper[5050]: I1123 16:09:11.696630 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q4z2r" event={"ID":"83e06022-fb23-4d95-8a79-9feacb515ec8","Type":"ContainerDied","Data":"30ca41d2c59c1415741e89ddbb16f0b535d1be80272136ca19488a0bb8adb846"} Nov 23 16:09:11 crc kubenswrapper[5050]: I1123 16:09:11.700368 5050 generic.go:334] "Generic (PLEG): container finished" podID="e2d2c8fe-8777-4f9c-8e42-32be7614f595" containerID="5a60a1ec7afd185e23ae9692b30e5ca33b154e87cb9519568a201f6d399b917e" exitCode=0 Nov 23 16:09:11 crc kubenswrapper[5050]: I1123 16:09:11.700623 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kvx4h" event={"ID":"e2d2c8fe-8777-4f9c-8e42-32be7614f595","Type":"ContainerDied","Data":"5a60a1ec7afd185e23ae9692b30e5ca33b154e87cb9519568a201f6d399b917e"} Nov 23 16:09:11 crc kubenswrapper[5050]: I1123 16:09:11.700903 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kvx4h" event={"ID":"e2d2c8fe-8777-4f9c-8e42-32be7614f595","Type":"ContainerStarted","Data":"55bc69cb1fc4ea6526ae36b9d3f5ccc6c1b04939653c8d6f735231a353ecc458"} Nov 23 16:09:13 crc kubenswrapper[5050]: I1123 16:09:13.723944 5050 generic.go:334] "Generic (PLEG): container finished" podID="e2d2c8fe-8777-4f9c-8e42-32be7614f595" containerID="73bc63ceb9fc8f8e714dfdec3f92cac43b237fc21b48af024b52e2c287989b58" exitCode=0 Nov 23 16:09:13 crc kubenswrapper[5050]: I1123 16:09:13.724086 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kvx4h" event={"ID":"e2d2c8fe-8777-4f9c-8e42-32be7614f595","Type":"ContainerDied","Data":"73bc63ceb9fc8f8e714dfdec3f92cac43b237fc21b48af024b52e2c287989b58"} Nov 23 16:09:13 crc kubenswrapper[5050]: I1123 16:09:13.727498 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q4z2r" event={"ID":"83e06022-fb23-4d95-8a79-9feacb515ec8","Type":"ContainerStarted","Data":"b469e8bdf387e05fd24728912fa3defcbb741fb14756bcf8ff9040f5833a19fe"} Nov 23 16:09:13 crc kubenswrapper[5050]: I1123 16:09:13.801290 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-q4z2r" podStartSLOduration=4.245337197 podStartE2EDuration="6.801270051s" podCreationTimestamp="2025-11-23 16:09:07 +0000 UTC" firstStartedPulling="2025-11-23 16:09:09.646540503 +0000 UTC m=+5244.813536988" lastFinishedPulling="2025-11-23 16:09:12.202473367 +0000 UTC m=+5247.369469842" observedRunningTime="2025-11-23 16:09:13.796295251 +0000 UTC m=+5248.963291746" watchObservedRunningTime="2025-11-23 16:09:13.801270051 +0000 UTC m=+5248.968266536" Nov 23 16:09:14 crc kubenswrapper[5050]: I1123 16:09:14.739819 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kvx4h" event={"ID":"e2d2c8fe-8777-4f9c-8e42-32be7614f595","Type":"ContainerStarted","Data":"8fcfb38a51a934390fa271cf92ca33ff754fc1cce9c82eada066776b1a233546"} Nov 23 16:09:14 crc kubenswrapper[5050]: I1123 16:09:14.774306 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-kvx4h" podStartSLOduration=2.230658252 podStartE2EDuration="4.774272579s" podCreationTimestamp="2025-11-23 16:09:10 +0000 UTC" firstStartedPulling="2025-11-23 16:09:11.703257134 +0000 UTC m=+5246.870253659" lastFinishedPulling="2025-11-23 16:09:14.246871501 +0000 UTC m=+5249.413867986" observedRunningTime="2025-11-23 16:09:14.765311676 +0000 UTC m=+5249.932308161" watchObservedRunningTime="2025-11-23 16:09:14.774272579 +0000 UTC m=+5249.941269064" Nov 23 16:09:18 crc kubenswrapper[5050]: I1123 16:09:18.275501 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-q4z2r" Nov 23 16:09:18 crc kubenswrapper[5050]: I1123 16:09:18.277274 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-q4z2r" Nov 23 16:09:19 crc kubenswrapper[5050]: I1123 16:09:19.322981 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-q4z2r" podUID="83e06022-fb23-4d95-8a79-9feacb515ec8" containerName="registry-server" probeResult="failure" output=< Nov 23 16:09:19 crc kubenswrapper[5050]: timeout: failed to connect service ":50051" within 1s Nov 23 16:09:19 crc kubenswrapper[5050]: > Nov 23 16:09:20 crc kubenswrapper[5050]: I1123 16:09:20.472495 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-kvx4h" Nov 23 16:09:20 crc kubenswrapper[5050]: I1123 16:09:20.472645 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-kvx4h" Nov 23 16:09:20 crc kubenswrapper[5050]: I1123 16:09:20.540012 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-kvx4h" Nov 23 16:09:20 crc kubenswrapper[5050]: I1123 16:09:20.873105 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-kvx4h" Nov 23 16:09:20 crc kubenswrapper[5050]: I1123 16:09:20.935180 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kvx4h"] Nov 23 16:09:22 crc kubenswrapper[5050]: I1123 16:09:22.837524 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-kvx4h" podUID="e2d2c8fe-8777-4f9c-8e42-32be7614f595" containerName="registry-server" containerID="cri-o://8fcfb38a51a934390fa271cf92ca33ff754fc1cce9c82eada066776b1a233546" gracePeriod=2 Nov 23 16:09:23 crc kubenswrapper[5050]: I1123 16:09:23.848592 5050 generic.go:334] "Generic (PLEG): container finished" podID="e2d2c8fe-8777-4f9c-8e42-32be7614f595" containerID="8fcfb38a51a934390fa271cf92ca33ff754fc1cce9c82eada066776b1a233546" exitCode=0 Nov 23 16:09:23 crc kubenswrapper[5050]: I1123 16:09:23.848678 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kvx4h" event={"ID":"e2d2c8fe-8777-4f9c-8e42-32be7614f595","Type":"ContainerDied","Data":"8fcfb38a51a934390fa271cf92ca33ff754fc1cce9c82eada066776b1a233546"} Nov 23 16:09:23 crc kubenswrapper[5050]: I1123 16:09:23.849003 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kvx4h" event={"ID":"e2d2c8fe-8777-4f9c-8e42-32be7614f595","Type":"ContainerDied","Data":"55bc69cb1fc4ea6526ae36b9d3f5ccc6c1b04939653c8d6f735231a353ecc458"} Nov 23 16:09:23 crc kubenswrapper[5050]: I1123 16:09:23.849021 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="55bc69cb1fc4ea6526ae36b9d3f5ccc6c1b04939653c8d6f735231a353ecc458" Nov 23 16:09:23 crc kubenswrapper[5050]: I1123 16:09:23.859214 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kvx4h" Nov 23 16:09:24 crc kubenswrapper[5050]: I1123 16:09:24.025199 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fv4b6\" (UniqueName: \"kubernetes.io/projected/e2d2c8fe-8777-4f9c-8e42-32be7614f595-kube-api-access-fv4b6\") pod \"e2d2c8fe-8777-4f9c-8e42-32be7614f595\" (UID: \"e2d2c8fe-8777-4f9c-8e42-32be7614f595\") " Nov 23 16:09:24 crc kubenswrapper[5050]: I1123 16:09:24.025317 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2d2c8fe-8777-4f9c-8e42-32be7614f595-utilities\") pod \"e2d2c8fe-8777-4f9c-8e42-32be7614f595\" (UID: \"e2d2c8fe-8777-4f9c-8e42-32be7614f595\") " Nov 23 16:09:24 crc kubenswrapper[5050]: I1123 16:09:24.025492 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2d2c8fe-8777-4f9c-8e42-32be7614f595-catalog-content\") pod \"e2d2c8fe-8777-4f9c-8e42-32be7614f595\" (UID: \"e2d2c8fe-8777-4f9c-8e42-32be7614f595\") " Nov 23 16:09:24 crc kubenswrapper[5050]: I1123 16:09:24.026512 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e2d2c8fe-8777-4f9c-8e42-32be7614f595-utilities" (OuterVolumeSpecName: "utilities") pod "e2d2c8fe-8777-4f9c-8e42-32be7614f595" (UID: "e2d2c8fe-8777-4f9c-8e42-32be7614f595"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:09:24 crc kubenswrapper[5050]: I1123 16:09:24.037789 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2d2c8fe-8777-4f9c-8e42-32be7614f595-kube-api-access-fv4b6" (OuterVolumeSpecName: "kube-api-access-fv4b6") pod "e2d2c8fe-8777-4f9c-8e42-32be7614f595" (UID: "e2d2c8fe-8777-4f9c-8e42-32be7614f595"). InnerVolumeSpecName "kube-api-access-fv4b6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:09:24 crc kubenswrapper[5050]: I1123 16:09:24.085137 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e2d2c8fe-8777-4f9c-8e42-32be7614f595-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e2d2c8fe-8777-4f9c-8e42-32be7614f595" (UID: "e2d2c8fe-8777-4f9c-8e42-32be7614f595"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:09:24 crc kubenswrapper[5050]: I1123 16:09:24.127506 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fv4b6\" (UniqueName: \"kubernetes.io/projected/e2d2c8fe-8777-4f9c-8e42-32be7614f595-kube-api-access-fv4b6\") on node \"crc\" DevicePath \"\"" Nov 23 16:09:24 crc kubenswrapper[5050]: I1123 16:09:24.127547 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2d2c8fe-8777-4f9c-8e42-32be7614f595-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 16:09:24 crc kubenswrapper[5050]: I1123 16:09:24.127557 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2d2c8fe-8777-4f9c-8e42-32be7614f595-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 16:09:24 crc kubenswrapper[5050]: I1123 16:09:24.859189 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kvx4h" Nov 23 16:09:24 crc kubenswrapper[5050]: I1123 16:09:24.914248 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kvx4h"] Nov 23 16:09:24 crc kubenswrapper[5050]: I1123 16:09:24.929100 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-kvx4h"] Nov 23 16:09:25 crc kubenswrapper[5050]: I1123 16:09:25.576585 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2d2c8fe-8777-4f9c-8e42-32be7614f595" path="/var/lib/kubelet/pods/e2d2c8fe-8777-4f9c-8e42-32be7614f595/volumes" Nov 23 16:09:28 crc kubenswrapper[5050]: I1123 16:09:28.336347 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-q4z2r" Nov 23 16:09:28 crc kubenswrapper[5050]: I1123 16:09:28.396367 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-q4z2r" Nov 23 16:09:28 crc kubenswrapper[5050]: I1123 16:09:28.580490 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-q4z2r"] Nov 23 16:09:29 crc kubenswrapper[5050]: I1123 16:09:29.924924 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-q4z2r" podUID="83e06022-fb23-4d95-8a79-9feacb515ec8" containerName="registry-server" containerID="cri-o://b469e8bdf387e05fd24728912fa3defcbb741fb14756bcf8ff9040f5833a19fe" gracePeriod=2 Nov 23 16:09:30 crc kubenswrapper[5050]: I1123 16:09:30.395134 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q4z2r" Nov 23 16:09:30 crc kubenswrapper[5050]: I1123 16:09:30.485844 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83e06022-fb23-4d95-8a79-9feacb515ec8-utilities\") pod \"83e06022-fb23-4d95-8a79-9feacb515ec8\" (UID: \"83e06022-fb23-4d95-8a79-9feacb515ec8\") " Nov 23 16:09:30 crc kubenswrapper[5050]: I1123 16:09:30.486073 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-scccq\" (UniqueName: \"kubernetes.io/projected/83e06022-fb23-4d95-8a79-9feacb515ec8-kube-api-access-scccq\") pod \"83e06022-fb23-4d95-8a79-9feacb515ec8\" (UID: \"83e06022-fb23-4d95-8a79-9feacb515ec8\") " Nov 23 16:09:30 crc kubenswrapper[5050]: I1123 16:09:30.486213 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83e06022-fb23-4d95-8a79-9feacb515ec8-catalog-content\") pod \"83e06022-fb23-4d95-8a79-9feacb515ec8\" (UID: \"83e06022-fb23-4d95-8a79-9feacb515ec8\") " Nov 23 16:09:30 crc kubenswrapper[5050]: I1123 16:09:30.487636 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83e06022-fb23-4d95-8a79-9feacb515ec8-utilities" (OuterVolumeSpecName: "utilities") pod "83e06022-fb23-4d95-8a79-9feacb515ec8" (UID: "83e06022-fb23-4d95-8a79-9feacb515ec8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:09:30 crc kubenswrapper[5050]: I1123 16:09:30.494301 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83e06022-fb23-4d95-8a79-9feacb515ec8-kube-api-access-scccq" (OuterVolumeSpecName: "kube-api-access-scccq") pod "83e06022-fb23-4d95-8a79-9feacb515ec8" (UID: "83e06022-fb23-4d95-8a79-9feacb515ec8"). InnerVolumeSpecName "kube-api-access-scccq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:09:30 crc kubenswrapper[5050]: I1123 16:09:30.583052 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83e06022-fb23-4d95-8a79-9feacb515ec8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "83e06022-fb23-4d95-8a79-9feacb515ec8" (UID: "83e06022-fb23-4d95-8a79-9feacb515ec8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:09:30 crc kubenswrapper[5050]: I1123 16:09:30.587978 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-scccq\" (UniqueName: \"kubernetes.io/projected/83e06022-fb23-4d95-8a79-9feacb515ec8-kube-api-access-scccq\") on node \"crc\" DevicePath \"\"" Nov 23 16:09:30 crc kubenswrapper[5050]: I1123 16:09:30.588001 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83e06022-fb23-4d95-8a79-9feacb515ec8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 16:09:30 crc kubenswrapper[5050]: I1123 16:09:30.588012 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83e06022-fb23-4d95-8a79-9feacb515ec8-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 16:09:30 crc kubenswrapper[5050]: I1123 16:09:30.937302 5050 generic.go:334] "Generic (PLEG): container finished" podID="83e06022-fb23-4d95-8a79-9feacb515ec8" containerID="b469e8bdf387e05fd24728912fa3defcbb741fb14756bcf8ff9040f5833a19fe" exitCode=0 Nov 23 16:09:30 crc kubenswrapper[5050]: I1123 16:09:30.937354 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q4z2r" event={"ID":"83e06022-fb23-4d95-8a79-9feacb515ec8","Type":"ContainerDied","Data":"b469e8bdf387e05fd24728912fa3defcbb741fb14756bcf8ff9040f5833a19fe"} Nov 23 16:09:30 crc kubenswrapper[5050]: I1123 16:09:30.937383 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q4z2r" event={"ID":"83e06022-fb23-4d95-8a79-9feacb515ec8","Type":"ContainerDied","Data":"ba445182208603e800c9dca1f0fe2c5d6f352d518dd55f472566fd2ee7ddeef9"} Nov 23 16:09:30 crc kubenswrapper[5050]: I1123 16:09:30.937400 5050 scope.go:117] "RemoveContainer" containerID="b469e8bdf387e05fd24728912fa3defcbb741fb14756bcf8ff9040f5833a19fe" Nov 23 16:09:30 crc kubenswrapper[5050]: I1123 16:09:30.937480 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q4z2r" Nov 23 16:09:30 crc kubenswrapper[5050]: I1123 16:09:30.970095 5050 scope.go:117] "RemoveContainer" containerID="30ca41d2c59c1415741e89ddbb16f0b535d1be80272136ca19488a0bb8adb846" Nov 23 16:09:30 crc kubenswrapper[5050]: I1123 16:09:30.992849 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-q4z2r"] Nov 23 16:09:30 crc kubenswrapper[5050]: I1123 16:09:30.999458 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-q4z2r"] Nov 23 16:09:31 crc kubenswrapper[5050]: I1123 16:09:31.013552 5050 scope.go:117] "RemoveContainer" containerID="99a55b1738a4944688fc196781ec733d4aee8edfbbc4cd0d9110e8f3d0f1e0be" Nov 23 16:09:31 crc kubenswrapper[5050]: I1123 16:09:31.061259 5050 scope.go:117] "RemoveContainer" containerID="b469e8bdf387e05fd24728912fa3defcbb741fb14756bcf8ff9040f5833a19fe" Nov 23 16:09:31 crc kubenswrapper[5050]: E1123 16:09:31.062625 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b469e8bdf387e05fd24728912fa3defcbb741fb14756bcf8ff9040f5833a19fe\": container with ID starting with b469e8bdf387e05fd24728912fa3defcbb741fb14756bcf8ff9040f5833a19fe not found: ID does not exist" containerID="b469e8bdf387e05fd24728912fa3defcbb741fb14756bcf8ff9040f5833a19fe" Nov 23 16:09:31 crc kubenswrapper[5050]: I1123 16:09:31.062705 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b469e8bdf387e05fd24728912fa3defcbb741fb14756bcf8ff9040f5833a19fe"} err="failed to get container status \"b469e8bdf387e05fd24728912fa3defcbb741fb14756bcf8ff9040f5833a19fe\": rpc error: code = NotFound desc = could not find container \"b469e8bdf387e05fd24728912fa3defcbb741fb14756bcf8ff9040f5833a19fe\": container with ID starting with b469e8bdf387e05fd24728912fa3defcbb741fb14756bcf8ff9040f5833a19fe not found: ID does not exist" Nov 23 16:09:31 crc kubenswrapper[5050]: I1123 16:09:31.062756 5050 scope.go:117] "RemoveContainer" containerID="30ca41d2c59c1415741e89ddbb16f0b535d1be80272136ca19488a0bb8adb846" Nov 23 16:09:31 crc kubenswrapper[5050]: E1123 16:09:31.063335 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"30ca41d2c59c1415741e89ddbb16f0b535d1be80272136ca19488a0bb8adb846\": container with ID starting with 30ca41d2c59c1415741e89ddbb16f0b535d1be80272136ca19488a0bb8adb846 not found: ID does not exist" containerID="30ca41d2c59c1415741e89ddbb16f0b535d1be80272136ca19488a0bb8adb846" Nov 23 16:09:31 crc kubenswrapper[5050]: I1123 16:09:31.063407 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30ca41d2c59c1415741e89ddbb16f0b535d1be80272136ca19488a0bb8adb846"} err="failed to get container status \"30ca41d2c59c1415741e89ddbb16f0b535d1be80272136ca19488a0bb8adb846\": rpc error: code = NotFound desc = could not find container \"30ca41d2c59c1415741e89ddbb16f0b535d1be80272136ca19488a0bb8adb846\": container with ID starting with 30ca41d2c59c1415741e89ddbb16f0b535d1be80272136ca19488a0bb8adb846 not found: ID does not exist" Nov 23 16:09:31 crc kubenswrapper[5050]: I1123 16:09:31.063437 5050 scope.go:117] "RemoveContainer" containerID="99a55b1738a4944688fc196781ec733d4aee8edfbbc4cd0d9110e8f3d0f1e0be" Nov 23 16:09:31 crc kubenswrapper[5050]: E1123 16:09:31.063858 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99a55b1738a4944688fc196781ec733d4aee8edfbbc4cd0d9110e8f3d0f1e0be\": container with ID starting with 99a55b1738a4944688fc196781ec733d4aee8edfbbc4cd0d9110e8f3d0f1e0be not found: ID does not exist" containerID="99a55b1738a4944688fc196781ec733d4aee8edfbbc4cd0d9110e8f3d0f1e0be" Nov 23 16:09:31 crc kubenswrapper[5050]: I1123 16:09:31.063931 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99a55b1738a4944688fc196781ec733d4aee8edfbbc4cd0d9110e8f3d0f1e0be"} err="failed to get container status \"99a55b1738a4944688fc196781ec733d4aee8edfbbc4cd0d9110e8f3d0f1e0be\": rpc error: code = NotFound desc = could not find container \"99a55b1738a4944688fc196781ec733d4aee8edfbbc4cd0d9110e8f3d0f1e0be\": container with ID starting with 99a55b1738a4944688fc196781ec733d4aee8edfbbc4cd0d9110e8f3d0f1e0be not found: ID does not exist" Nov 23 16:09:31 crc kubenswrapper[5050]: I1123 16:09:31.563677 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83e06022-fb23-4d95-8a79-9feacb515ec8" path="/var/lib/kubelet/pods/83e06022-fb23-4d95-8a79-9feacb515ec8/volumes" Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.563515 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-e940-account-create-q8fld"] Nov 23 16:10:27 crc kubenswrapper[5050]: E1123 16:10:27.564598 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83e06022-fb23-4d95-8a79-9feacb515ec8" containerName="registry-server" Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.564616 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="83e06022-fb23-4d95-8a79-9feacb515ec8" containerName="registry-server" Nov 23 16:10:27 crc kubenswrapper[5050]: E1123 16:10:27.564633 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2d2c8fe-8777-4f9c-8e42-32be7614f595" containerName="extract-utilities" Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.564643 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2d2c8fe-8777-4f9c-8e42-32be7614f595" containerName="extract-utilities" Nov 23 16:10:27 crc kubenswrapper[5050]: E1123 16:10:27.564668 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2d2c8fe-8777-4f9c-8e42-32be7614f595" containerName="extract-content" Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.564676 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2d2c8fe-8777-4f9c-8e42-32be7614f595" containerName="extract-content" Nov 23 16:10:27 crc kubenswrapper[5050]: E1123 16:10:27.564687 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83e06022-fb23-4d95-8a79-9feacb515ec8" containerName="extract-utilities" Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.564693 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="83e06022-fb23-4d95-8a79-9feacb515ec8" containerName="extract-utilities" Nov 23 16:10:27 crc kubenswrapper[5050]: E1123 16:10:27.564716 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2d2c8fe-8777-4f9c-8e42-32be7614f595" containerName="registry-server" Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.564722 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2d2c8fe-8777-4f9c-8e42-32be7614f595" containerName="registry-server" Nov 23 16:10:27 crc kubenswrapper[5050]: E1123 16:10:27.564731 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83e06022-fb23-4d95-8a79-9feacb515ec8" containerName="extract-content" Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.564736 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="83e06022-fb23-4d95-8a79-9feacb515ec8" containerName="extract-content" Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.564897 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2d2c8fe-8777-4f9c-8e42-32be7614f595" containerName="registry-server" Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.564916 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="83e06022-fb23-4d95-8a79-9feacb515ec8" containerName="registry-server" Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.565584 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e940-account-create-q8fld" Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.569070 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-79q9n"] Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.570531 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-79q9n" Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.570948 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.575948 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-e940-account-create-q8fld"] Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.580599 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-79q9n"] Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.725561 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kp4hc\" (UniqueName: \"kubernetes.io/projected/a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c-kube-api-access-kp4hc\") pod \"barbican-db-create-79q9n\" (UID: \"a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c\") " pod="openstack/barbican-db-create-79q9n" Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.725623 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c-operator-scripts\") pod \"barbican-db-create-79q9n\" (UID: \"a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c\") " pod="openstack/barbican-db-create-79q9n" Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.725662 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59aa3498-5147-46f0-9783-f0dbc9845bab-operator-scripts\") pod \"barbican-e940-account-create-q8fld\" (UID: \"59aa3498-5147-46f0-9783-f0dbc9845bab\") " pod="openstack/barbican-e940-account-create-q8fld" Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.725914 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dsd9\" (UniqueName: \"kubernetes.io/projected/59aa3498-5147-46f0-9783-f0dbc9845bab-kube-api-access-8dsd9\") pod \"barbican-e940-account-create-q8fld\" (UID: \"59aa3498-5147-46f0-9783-f0dbc9845bab\") " pod="openstack/barbican-e940-account-create-q8fld" Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.828342 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kp4hc\" (UniqueName: \"kubernetes.io/projected/a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c-kube-api-access-kp4hc\") pod \"barbican-db-create-79q9n\" (UID: \"a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c\") " pod="openstack/barbican-db-create-79q9n" Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.828540 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c-operator-scripts\") pod \"barbican-db-create-79q9n\" (UID: \"a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c\") " pod="openstack/barbican-db-create-79q9n" Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.828604 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59aa3498-5147-46f0-9783-f0dbc9845bab-operator-scripts\") pod \"barbican-e940-account-create-q8fld\" (UID: \"59aa3498-5147-46f0-9783-f0dbc9845bab\") " pod="openstack/barbican-e940-account-create-q8fld" Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.828786 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8dsd9\" (UniqueName: \"kubernetes.io/projected/59aa3498-5147-46f0-9783-f0dbc9845bab-kube-api-access-8dsd9\") pod \"barbican-e940-account-create-q8fld\" (UID: \"59aa3498-5147-46f0-9783-f0dbc9845bab\") " pod="openstack/barbican-e940-account-create-q8fld" Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.830156 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59aa3498-5147-46f0-9783-f0dbc9845bab-operator-scripts\") pod \"barbican-e940-account-create-q8fld\" (UID: \"59aa3498-5147-46f0-9783-f0dbc9845bab\") " pod="openstack/barbican-e940-account-create-q8fld" Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.830249 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c-operator-scripts\") pod \"barbican-db-create-79q9n\" (UID: \"a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c\") " pod="openstack/barbican-db-create-79q9n" Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.864087 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dsd9\" (UniqueName: \"kubernetes.io/projected/59aa3498-5147-46f0-9783-f0dbc9845bab-kube-api-access-8dsd9\") pod \"barbican-e940-account-create-q8fld\" (UID: \"59aa3498-5147-46f0-9783-f0dbc9845bab\") " pod="openstack/barbican-e940-account-create-q8fld" Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.865889 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kp4hc\" (UniqueName: \"kubernetes.io/projected/a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c-kube-api-access-kp4hc\") pod \"barbican-db-create-79q9n\" (UID: \"a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c\") " pod="openstack/barbican-db-create-79q9n" Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.886830 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e940-account-create-q8fld" Nov 23 16:10:27 crc kubenswrapper[5050]: I1123 16:10:27.906097 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-79q9n" Nov 23 16:10:28 crc kubenswrapper[5050]: I1123 16:10:28.377332 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-e940-account-create-q8fld"] Nov 23 16:10:28 crc kubenswrapper[5050]: I1123 16:10:28.449948 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-79q9n"] Nov 23 16:10:28 crc kubenswrapper[5050]: W1123 16:10:28.450610 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda69131bd_581a_4dbc_b6b2_a58ca7d5ef5c.slice/crio-f9884edebae03b4752283764b3e95c721a215ec031e36f770976e466db9f9e7b WatchSource:0}: Error finding container f9884edebae03b4752283764b3e95c721a215ec031e36f770976e466db9f9e7b: Status 404 returned error can't find the container with id f9884edebae03b4752283764b3e95c721a215ec031e36f770976e466db9f9e7b Nov 23 16:10:29 crc kubenswrapper[5050]: I1123 16:10:29.275945 5050 generic.go:334] "Generic (PLEG): container finished" podID="59aa3498-5147-46f0-9783-f0dbc9845bab" containerID="1fa29c3e03df4598e827f87c1230956f3a237e7f5c51dc82b2df8ece9c184337" exitCode=0 Nov 23 16:10:29 crc kubenswrapper[5050]: I1123 16:10:29.276044 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-e940-account-create-q8fld" event={"ID":"59aa3498-5147-46f0-9783-f0dbc9845bab","Type":"ContainerDied","Data":"1fa29c3e03df4598e827f87c1230956f3a237e7f5c51dc82b2df8ece9c184337"} Nov 23 16:10:29 crc kubenswrapper[5050]: I1123 16:10:29.276533 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-e940-account-create-q8fld" event={"ID":"59aa3498-5147-46f0-9783-f0dbc9845bab","Type":"ContainerStarted","Data":"3303c4d5f49ce9faa9e52bbea85fc5fc3bb5d73a7a68f88a4e064d5c82eadd4e"} Nov 23 16:10:29 crc kubenswrapper[5050]: I1123 16:10:29.280209 5050 generic.go:334] "Generic (PLEG): container finished" podID="a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c" containerID="86b0c0be72b5eaea73831a0e2efec4a0ab1c1f8be5e2cc93a1c23fa3a6e79f40" exitCode=0 Nov 23 16:10:29 crc kubenswrapper[5050]: I1123 16:10:29.280269 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-79q9n" event={"ID":"a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c","Type":"ContainerDied","Data":"86b0c0be72b5eaea73831a0e2efec4a0ab1c1f8be5e2cc93a1c23fa3a6e79f40"} Nov 23 16:10:29 crc kubenswrapper[5050]: I1123 16:10:29.280306 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-79q9n" event={"ID":"a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c","Type":"ContainerStarted","Data":"f9884edebae03b4752283764b3e95c721a215ec031e36f770976e466db9f9e7b"} Nov 23 16:10:30 crc kubenswrapper[5050]: I1123 16:10:30.745150 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e940-account-create-q8fld" Nov 23 16:10:30 crc kubenswrapper[5050]: I1123 16:10:30.754810 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-79q9n" Nov 23 16:10:30 crc kubenswrapper[5050]: I1123 16:10:30.917160 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59aa3498-5147-46f0-9783-f0dbc9845bab-operator-scripts\") pod \"59aa3498-5147-46f0-9783-f0dbc9845bab\" (UID: \"59aa3498-5147-46f0-9783-f0dbc9845bab\") " Nov 23 16:10:30 crc kubenswrapper[5050]: I1123 16:10:30.917326 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c-operator-scripts\") pod \"a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c\" (UID: \"a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c\") " Nov 23 16:10:30 crc kubenswrapper[5050]: I1123 16:10:30.917361 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kp4hc\" (UniqueName: \"kubernetes.io/projected/a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c-kube-api-access-kp4hc\") pod \"a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c\" (UID: \"a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c\") " Nov 23 16:10:30 crc kubenswrapper[5050]: I1123 16:10:30.917493 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8dsd9\" (UniqueName: \"kubernetes.io/projected/59aa3498-5147-46f0-9783-f0dbc9845bab-kube-api-access-8dsd9\") pod \"59aa3498-5147-46f0-9783-f0dbc9845bab\" (UID: \"59aa3498-5147-46f0-9783-f0dbc9845bab\") " Nov 23 16:10:30 crc kubenswrapper[5050]: I1123 16:10:30.918319 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c" (UID: "a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:10:30 crc kubenswrapper[5050]: I1123 16:10:30.918367 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59aa3498-5147-46f0-9783-f0dbc9845bab-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "59aa3498-5147-46f0-9783-f0dbc9845bab" (UID: "59aa3498-5147-46f0-9783-f0dbc9845bab"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:10:30 crc kubenswrapper[5050]: I1123 16:10:30.919794 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59aa3498-5147-46f0-9783-f0dbc9845bab-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:10:30 crc kubenswrapper[5050]: I1123 16:10:30.919842 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:10:30 crc kubenswrapper[5050]: I1123 16:10:30.939734 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59aa3498-5147-46f0-9783-f0dbc9845bab-kube-api-access-8dsd9" (OuterVolumeSpecName: "kube-api-access-8dsd9") pod "59aa3498-5147-46f0-9783-f0dbc9845bab" (UID: "59aa3498-5147-46f0-9783-f0dbc9845bab"). InnerVolumeSpecName "kube-api-access-8dsd9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:10:30 crc kubenswrapper[5050]: I1123 16:10:30.940012 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c-kube-api-access-kp4hc" (OuterVolumeSpecName: "kube-api-access-kp4hc") pod "a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c" (UID: "a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c"). InnerVolumeSpecName "kube-api-access-kp4hc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:10:31 crc kubenswrapper[5050]: I1123 16:10:31.022209 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kp4hc\" (UniqueName: \"kubernetes.io/projected/a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c-kube-api-access-kp4hc\") on node \"crc\" DevicePath \"\"" Nov 23 16:10:31 crc kubenswrapper[5050]: I1123 16:10:31.022288 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8dsd9\" (UniqueName: \"kubernetes.io/projected/59aa3498-5147-46f0-9783-f0dbc9845bab-kube-api-access-8dsd9\") on node \"crc\" DevicePath \"\"" Nov 23 16:10:31 crc kubenswrapper[5050]: I1123 16:10:31.305343 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-e940-account-create-q8fld" event={"ID":"59aa3498-5147-46f0-9783-f0dbc9845bab","Type":"ContainerDied","Data":"3303c4d5f49ce9faa9e52bbea85fc5fc3bb5d73a7a68f88a4e064d5c82eadd4e"} Nov 23 16:10:31 crc kubenswrapper[5050]: I1123 16:10:31.305414 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3303c4d5f49ce9faa9e52bbea85fc5fc3bb5d73a7a68f88a4e064d5c82eadd4e" Nov 23 16:10:31 crc kubenswrapper[5050]: I1123 16:10:31.305373 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e940-account-create-q8fld" Nov 23 16:10:31 crc kubenswrapper[5050]: I1123 16:10:31.308060 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-79q9n" event={"ID":"a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c","Type":"ContainerDied","Data":"f9884edebae03b4752283764b3e95c721a215ec031e36f770976e466db9f9e7b"} Nov 23 16:10:31 crc kubenswrapper[5050]: I1123 16:10:31.308144 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f9884edebae03b4752283764b3e95c721a215ec031e36f770976e466db9f9e7b" Nov 23 16:10:31 crc kubenswrapper[5050]: I1123 16:10:31.308096 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-79q9n" Nov 23 16:10:32 crc kubenswrapper[5050]: I1123 16:10:32.900890 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-xtqpj"] Nov 23 16:10:32 crc kubenswrapper[5050]: E1123 16:10:32.901878 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c" containerName="mariadb-database-create" Nov 23 16:10:32 crc kubenswrapper[5050]: I1123 16:10:32.901908 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c" containerName="mariadb-database-create" Nov 23 16:10:32 crc kubenswrapper[5050]: E1123 16:10:32.901937 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59aa3498-5147-46f0-9783-f0dbc9845bab" containerName="mariadb-account-create" Nov 23 16:10:32 crc kubenswrapper[5050]: I1123 16:10:32.901950 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="59aa3498-5147-46f0-9783-f0dbc9845bab" containerName="mariadb-account-create" Nov 23 16:10:32 crc kubenswrapper[5050]: I1123 16:10:32.902260 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c" containerName="mariadb-database-create" Nov 23 16:10:32 crc kubenswrapper[5050]: I1123 16:10:32.902316 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="59aa3498-5147-46f0-9783-f0dbc9845bab" containerName="mariadb-account-create" Nov 23 16:10:32 crc kubenswrapper[5050]: I1123 16:10:32.903298 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-xtqpj" Nov 23 16:10:32 crc kubenswrapper[5050]: I1123 16:10:32.905971 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 23 16:10:32 crc kubenswrapper[5050]: I1123 16:10:32.906728 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-g4lhj" Nov 23 16:10:32 crc kubenswrapper[5050]: I1123 16:10:32.913145 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-xtqpj"] Nov 23 16:10:33 crc kubenswrapper[5050]: I1123 16:10:33.065399 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2730954f-204b-453e-b3a5-e7e50e378343-db-sync-config-data\") pod \"barbican-db-sync-xtqpj\" (UID: \"2730954f-204b-453e-b3a5-e7e50e378343\") " pod="openstack/barbican-db-sync-xtqpj" Nov 23 16:10:33 crc kubenswrapper[5050]: I1123 16:10:33.065601 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2730954f-204b-453e-b3a5-e7e50e378343-combined-ca-bundle\") pod \"barbican-db-sync-xtqpj\" (UID: \"2730954f-204b-453e-b3a5-e7e50e378343\") " pod="openstack/barbican-db-sync-xtqpj" Nov 23 16:10:33 crc kubenswrapper[5050]: I1123 16:10:33.065681 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vgb9z\" (UniqueName: \"kubernetes.io/projected/2730954f-204b-453e-b3a5-e7e50e378343-kube-api-access-vgb9z\") pod \"barbican-db-sync-xtqpj\" (UID: \"2730954f-204b-453e-b3a5-e7e50e378343\") " pod="openstack/barbican-db-sync-xtqpj" Nov 23 16:10:33 crc kubenswrapper[5050]: I1123 16:10:33.167135 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2730954f-204b-453e-b3a5-e7e50e378343-db-sync-config-data\") pod \"barbican-db-sync-xtqpj\" (UID: \"2730954f-204b-453e-b3a5-e7e50e378343\") " pod="openstack/barbican-db-sync-xtqpj" Nov 23 16:10:33 crc kubenswrapper[5050]: I1123 16:10:33.167253 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2730954f-204b-453e-b3a5-e7e50e378343-combined-ca-bundle\") pod \"barbican-db-sync-xtqpj\" (UID: \"2730954f-204b-453e-b3a5-e7e50e378343\") " pod="openstack/barbican-db-sync-xtqpj" Nov 23 16:10:33 crc kubenswrapper[5050]: I1123 16:10:33.167301 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vgb9z\" (UniqueName: \"kubernetes.io/projected/2730954f-204b-453e-b3a5-e7e50e378343-kube-api-access-vgb9z\") pod \"barbican-db-sync-xtqpj\" (UID: \"2730954f-204b-453e-b3a5-e7e50e378343\") " pod="openstack/barbican-db-sync-xtqpj" Nov 23 16:10:33 crc kubenswrapper[5050]: I1123 16:10:33.181084 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2730954f-204b-453e-b3a5-e7e50e378343-combined-ca-bundle\") pod \"barbican-db-sync-xtqpj\" (UID: \"2730954f-204b-453e-b3a5-e7e50e378343\") " pod="openstack/barbican-db-sync-xtqpj" Nov 23 16:10:33 crc kubenswrapper[5050]: I1123 16:10:33.184373 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2730954f-204b-453e-b3a5-e7e50e378343-db-sync-config-data\") pod \"barbican-db-sync-xtqpj\" (UID: \"2730954f-204b-453e-b3a5-e7e50e378343\") " pod="openstack/barbican-db-sync-xtqpj" Nov 23 16:10:33 crc kubenswrapper[5050]: I1123 16:10:33.196229 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vgb9z\" (UniqueName: \"kubernetes.io/projected/2730954f-204b-453e-b3a5-e7e50e378343-kube-api-access-vgb9z\") pod \"barbican-db-sync-xtqpj\" (UID: \"2730954f-204b-453e-b3a5-e7e50e378343\") " pod="openstack/barbican-db-sync-xtqpj" Nov 23 16:10:33 crc kubenswrapper[5050]: I1123 16:10:33.230844 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-xtqpj" Nov 23 16:10:33 crc kubenswrapper[5050]: I1123 16:10:33.790036 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-xtqpj"] Nov 23 16:10:34 crc kubenswrapper[5050]: I1123 16:10:34.343989 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-xtqpj" event={"ID":"2730954f-204b-453e-b3a5-e7e50e378343","Type":"ContainerStarted","Data":"59878a85985966908c44e4a90b5d44cb40a08524c405feaac487f8a639db9856"} Nov 23 16:10:34 crc kubenswrapper[5050]: I1123 16:10:34.344575 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-xtqpj" event={"ID":"2730954f-204b-453e-b3a5-e7e50e378343","Type":"ContainerStarted","Data":"ca183bbf3af487e4c21d85643d0d22c7fe7f6893800668d46a4f75e7d4aac59a"} Nov 23 16:10:34 crc kubenswrapper[5050]: I1123 16:10:34.372175 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-xtqpj" podStartSLOduration=2.372146107 podStartE2EDuration="2.372146107s" podCreationTimestamp="2025-11-23 16:10:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:10:34.369437671 +0000 UTC m=+5329.536434196" watchObservedRunningTime="2025-11-23 16:10:34.372146107 +0000 UTC m=+5329.539142632" Nov 23 16:10:35 crc kubenswrapper[5050]: I1123 16:10:35.358476 5050 generic.go:334] "Generic (PLEG): container finished" podID="2730954f-204b-453e-b3a5-e7e50e378343" containerID="59878a85985966908c44e4a90b5d44cb40a08524c405feaac487f8a639db9856" exitCode=0 Nov 23 16:10:35 crc kubenswrapper[5050]: I1123 16:10:35.358554 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-xtqpj" event={"ID":"2730954f-204b-453e-b3a5-e7e50e378343","Type":"ContainerDied","Data":"59878a85985966908c44e4a90b5d44cb40a08524c405feaac487f8a639db9856"} Nov 23 16:10:36 crc kubenswrapper[5050]: I1123 16:10:36.805553 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-xtqpj" Nov 23 16:10:36 crc kubenswrapper[5050]: I1123 16:10:36.953857 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2730954f-204b-453e-b3a5-e7e50e378343-db-sync-config-data\") pod \"2730954f-204b-453e-b3a5-e7e50e378343\" (UID: \"2730954f-204b-453e-b3a5-e7e50e378343\") " Nov 23 16:10:36 crc kubenswrapper[5050]: I1123 16:10:36.954064 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vgb9z\" (UniqueName: \"kubernetes.io/projected/2730954f-204b-453e-b3a5-e7e50e378343-kube-api-access-vgb9z\") pod \"2730954f-204b-453e-b3a5-e7e50e378343\" (UID: \"2730954f-204b-453e-b3a5-e7e50e378343\") " Nov 23 16:10:36 crc kubenswrapper[5050]: I1123 16:10:36.954148 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2730954f-204b-453e-b3a5-e7e50e378343-combined-ca-bundle\") pod \"2730954f-204b-453e-b3a5-e7e50e378343\" (UID: \"2730954f-204b-453e-b3a5-e7e50e378343\") " Nov 23 16:10:36 crc kubenswrapper[5050]: I1123 16:10:36.962227 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2730954f-204b-453e-b3a5-e7e50e378343-kube-api-access-vgb9z" (OuterVolumeSpecName: "kube-api-access-vgb9z") pod "2730954f-204b-453e-b3a5-e7e50e378343" (UID: "2730954f-204b-453e-b3a5-e7e50e378343"). InnerVolumeSpecName "kube-api-access-vgb9z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:10:36 crc kubenswrapper[5050]: I1123 16:10:36.962899 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2730954f-204b-453e-b3a5-e7e50e378343-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "2730954f-204b-453e-b3a5-e7e50e378343" (UID: "2730954f-204b-453e-b3a5-e7e50e378343"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:10:36 crc kubenswrapper[5050]: I1123 16:10:36.991210 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2730954f-204b-453e-b3a5-e7e50e378343-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2730954f-204b-453e-b3a5-e7e50e378343" (UID: "2730954f-204b-453e-b3a5-e7e50e378343"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.056855 5050 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2730954f-204b-453e-b3a5-e7e50e378343-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.056931 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vgb9z\" (UniqueName: \"kubernetes.io/projected/2730954f-204b-453e-b3a5-e7e50e378343-kube-api-access-vgb9z\") on node \"crc\" DevicePath \"\"" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.056948 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2730954f-204b-453e-b3a5-e7e50e378343-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.390064 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-xtqpj" event={"ID":"2730954f-204b-453e-b3a5-e7e50e378343","Type":"ContainerDied","Data":"ca183bbf3af487e4c21d85643d0d22c7fe7f6893800668d46a4f75e7d4aac59a"} Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.390191 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ca183bbf3af487e4c21d85643d0d22c7fe7f6893800668d46a4f75e7d4aac59a" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.390130 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-xtqpj" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.649785 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-c95654dc7-5rjvf"] Nov 23 16:10:37 crc kubenswrapper[5050]: E1123 16:10:37.650266 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2730954f-204b-453e-b3a5-e7e50e378343" containerName="barbican-db-sync" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.650283 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="2730954f-204b-453e-b3a5-e7e50e378343" containerName="barbican-db-sync" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.650575 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="2730954f-204b-453e-b3a5-e7e50e378343" containerName="barbican-db-sync" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.658702 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-c95654dc7-5rjvf" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.665879 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-g4lhj" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.666093 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.682098 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.693688 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-c95654dc7-5rjvf"] Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.709039 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-6bd5894c98-7q8qw"] Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.710656 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6bd5894c98-7q8qw" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.720027 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.767090 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6bd5894c98-7q8qw"] Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.774478 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd10d96b-8497-4c0b-afe6-5e2027955ec7-logs\") pod \"barbican-worker-c95654dc7-5rjvf\" (UID: \"bd10d96b-8497-4c0b-afe6-5e2027955ec7\") " pod="openstack/barbican-worker-c95654dc7-5rjvf" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.774530 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bd10d96b-8497-4c0b-afe6-5e2027955ec7-config-data-custom\") pod \"barbican-worker-c95654dc7-5rjvf\" (UID: \"bd10d96b-8497-4c0b-afe6-5e2027955ec7\") " pod="openstack/barbican-worker-c95654dc7-5rjvf" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.774600 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8b5qz\" (UniqueName: \"kubernetes.io/projected/bd10d96b-8497-4c0b-afe6-5e2027955ec7-kube-api-access-8b5qz\") pod \"barbican-worker-c95654dc7-5rjvf\" (UID: \"bd10d96b-8497-4c0b-afe6-5e2027955ec7\") " pod="openstack/barbican-worker-c95654dc7-5rjvf" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.774617 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd10d96b-8497-4c0b-afe6-5e2027955ec7-combined-ca-bundle\") pod \"barbican-worker-c95654dc7-5rjvf\" (UID: \"bd10d96b-8497-4c0b-afe6-5e2027955ec7\") " pod="openstack/barbican-worker-c95654dc7-5rjvf" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.774662 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd10d96b-8497-4c0b-afe6-5e2027955ec7-config-data\") pod \"barbican-worker-c95654dc7-5rjvf\" (UID: \"bd10d96b-8497-4c0b-afe6-5e2027955ec7\") " pod="openstack/barbican-worker-c95654dc7-5rjvf" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.788056 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55995c5bd5-m6qlj"] Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.789743 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.825267 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55995c5bd5-m6qlj"] Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.876710 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b0a3cfa8-999b-4cb5-853b-9877d94b5da4-config-data-custom\") pod \"barbican-keystone-listener-6bd5894c98-7q8qw\" (UID: \"b0a3cfa8-999b-4cb5-853b-9877d94b5da4\") " pod="openstack/barbican-keystone-listener-6bd5894c98-7q8qw" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.876776 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-config\") pod \"dnsmasq-dns-55995c5bd5-m6qlj\" (UID: \"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71\") " pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.876812 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-ovsdbserver-sb\") pod \"dnsmasq-dns-55995c5bd5-m6qlj\" (UID: \"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71\") " pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.876849 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8b5qz\" (UniqueName: \"kubernetes.io/projected/bd10d96b-8497-4c0b-afe6-5e2027955ec7-kube-api-access-8b5qz\") pod \"barbican-worker-c95654dc7-5rjvf\" (UID: \"bd10d96b-8497-4c0b-afe6-5e2027955ec7\") " pod="openstack/barbican-worker-c95654dc7-5rjvf" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.876871 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd10d96b-8497-4c0b-afe6-5e2027955ec7-combined-ca-bundle\") pod \"barbican-worker-c95654dc7-5rjvf\" (UID: \"bd10d96b-8497-4c0b-afe6-5e2027955ec7\") " pod="openstack/barbican-worker-c95654dc7-5rjvf" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.876893 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-dns-svc\") pod \"dnsmasq-dns-55995c5bd5-m6qlj\" (UID: \"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71\") " pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.877288 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0a3cfa8-999b-4cb5-853b-9877d94b5da4-config-data\") pod \"barbican-keystone-listener-6bd5894c98-7q8qw\" (UID: \"b0a3cfa8-999b-4cb5-853b-9877d94b5da4\") " pod="openstack/barbican-keystone-listener-6bd5894c98-7q8qw" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.877360 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b0a3cfa8-999b-4cb5-853b-9877d94b5da4-logs\") pod \"barbican-keystone-listener-6bd5894c98-7q8qw\" (UID: \"b0a3cfa8-999b-4cb5-853b-9877d94b5da4\") " pod="openstack/barbican-keystone-listener-6bd5894c98-7q8qw" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.877390 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd10d96b-8497-4c0b-afe6-5e2027955ec7-config-data\") pod \"barbican-worker-c95654dc7-5rjvf\" (UID: \"bd10d96b-8497-4c0b-afe6-5e2027955ec7\") " pod="openstack/barbican-worker-c95654dc7-5rjvf" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.877434 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-ovsdbserver-nb\") pod \"dnsmasq-dns-55995c5bd5-m6qlj\" (UID: \"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71\") " pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.877490 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wff9z\" (UniqueName: \"kubernetes.io/projected/b0a3cfa8-999b-4cb5-853b-9877d94b5da4-kube-api-access-wff9z\") pod \"barbican-keystone-listener-6bd5894c98-7q8qw\" (UID: \"b0a3cfa8-999b-4cb5-853b-9877d94b5da4\") " pod="openstack/barbican-keystone-listener-6bd5894c98-7q8qw" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.877547 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0a3cfa8-999b-4cb5-853b-9877d94b5da4-combined-ca-bundle\") pod \"barbican-keystone-listener-6bd5894c98-7q8qw\" (UID: \"b0a3cfa8-999b-4cb5-853b-9877d94b5da4\") " pod="openstack/barbican-keystone-listener-6bd5894c98-7q8qw" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.877569 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd10d96b-8497-4c0b-afe6-5e2027955ec7-logs\") pod \"barbican-worker-c95654dc7-5rjvf\" (UID: \"bd10d96b-8497-4c0b-afe6-5e2027955ec7\") " pod="openstack/barbican-worker-c95654dc7-5rjvf" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.877593 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bd10d96b-8497-4c0b-afe6-5e2027955ec7-config-data-custom\") pod \"barbican-worker-c95654dc7-5rjvf\" (UID: \"bd10d96b-8497-4c0b-afe6-5e2027955ec7\") " pod="openstack/barbican-worker-c95654dc7-5rjvf" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.877619 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vv7nh\" (UniqueName: \"kubernetes.io/projected/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-kube-api-access-vv7nh\") pod \"dnsmasq-dns-55995c5bd5-m6qlj\" (UID: \"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71\") " pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.882299 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd10d96b-8497-4c0b-afe6-5e2027955ec7-logs\") pod \"barbican-worker-c95654dc7-5rjvf\" (UID: \"bd10d96b-8497-4c0b-afe6-5e2027955ec7\") " pod="openstack/barbican-worker-c95654dc7-5rjvf" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.883260 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bd10d96b-8497-4c0b-afe6-5e2027955ec7-config-data-custom\") pod \"barbican-worker-c95654dc7-5rjvf\" (UID: \"bd10d96b-8497-4c0b-afe6-5e2027955ec7\") " pod="openstack/barbican-worker-c95654dc7-5rjvf" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.884617 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd10d96b-8497-4c0b-afe6-5e2027955ec7-config-data\") pod \"barbican-worker-c95654dc7-5rjvf\" (UID: \"bd10d96b-8497-4c0b-afe6-5e2027955ec7\") " pod="openstack/barbican-worker-c95654dc7-5rjvf" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.885114 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd10d96b-8497-4c0b-afe6-5e2027955ec7-combined-ca-bundle\") pod \"barbican-worker-c95654dc7-5rjvf\" (UID: \"bd10d96b-8497-4c0b-afe6-5e2027955ec7\") " pod="openstack/barbican-worker-c95654dc7-5rjvf" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.905001 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8b5qz\" (UniqueName: \"kubernetes.io/projected/bd10d96b-8497-4c0b-afe6-5e2027955ec7-kube-api-access-8b5qz\") pod \"barbican-worker-c95654dc7-5rjvf\" (UID: \"bd10d96b-8497-4c0b-afe6-5e2027955ec7\") " pod="openstack/barbican-worker-c95654dc7-5rjvf" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.945002 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-5b7fc4d9bd-wd2cv"] Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.946671 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5b7fc4d9bd-wd2cv" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.949833 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.958871 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5b7fc4d9bd-wd2cv"] Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.979961 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-ovsdbserver-sb\") pod \"dnsmasq-dns-55995c5bd5-m6qlj\" (UID: \"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71\") " pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.981412 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-dns-svc\") pod \"dnsmasq-dns-55995c5bd5-m6qlj\" (UID: \"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71\") " pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.981571 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0a3cfa8-999b-4cb5-853b-9877d94b5da4-config-data\") pod \"barbican-keystone-listener-6bd5894c98-7q8qw\" (UID: \"b0a3cfa8-999b-4cb5-853b-9877d94b5da4\") " pod="openstack/barbican-keystone-listener-6bd5894c98-7q8qw" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.982311 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-dns-svc\") pod \"dnsmasq-dns-55995c5bd5-m6qlj\" (UID: \"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71\") " pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.981343 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-ovsdbserver-sb\") pod \"dnsmasq-dns-55995c5bd5-m6qlj\" (UID: \"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71\") " pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.982477 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b0a3cfa8-999b-4cb5-853b-9877d94b5da4-logs\") pod \"barbican-keystone-listener-6bd5894c98-7q8qw\" (UID: \"b0a3cfa8-999b-4cb5-853b-9877d94b5da4\") " pod="openstack/barbican-keystone-listener-6bd5894c98-7q8qw" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.982571 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-ovsdbserver-nb\") pod \"dnsmasq-dns-55995c5bd5-m6qlj\" (UID: \"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71\") " pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.982642 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wff9z\" (UniqueName: \"kubernetes.io/projected/b0a3cfa8-999b-4cb5-853b-9877d94b5da4-kube-api-access-wff9z\") pod \"barbican-keystone-listener-6bd5894c98-7q8qw\" (UID: \"b0a3cfa8-999b-4cb5-853b-9877d94b5da4\") " pod="openstack/barbican-keystone-listener-6bd5894c98-7q8qw" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.982741 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0a3cfa8-999b-4cb5-853b-9877d94b5da4-combined-ca-bundle\") pod \"barbican-keystone-listener-6bd5894c98-7q8qw\" (UID: \"b0a3cfa8-999b-4cb5-853b-9877d94b5da4\") " pod="openstack/barbican-keystone-listener-6bd5894c98-7q8qw" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.983209 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vv7nh\" (UniqueName: \"kubernetes.io/projected/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-kube-api-access-vv7nh\") pod \"dnsmasq-dns-55995c5bd5-m6qlj\" (UID: \"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71\") " pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.983298 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b0a3cfa8-999b-4cb5-853b-9877d94b5da4-config-data-custom\") pod \"barbican-keystone-listener-6bd5894c98-7q8qw\" (UID: \"b0a3cfa8-999b-4cb5-853b-9877d94b5da4\") " pod="openstack/barbican-keystone-listener-6bd5894c98-7q8qw" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.983361 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-ovsdbserver-nb\") pod \"dnsmasq-dns-55995c5bd5-m6qlj\" (UID: \"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71\") " pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.983135 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b0a3cfa8-999b-4cb5-853b-9877d94b5da4-logs\") pod \"barbican-keystone-listener-6bd5894c98-7q8qw\" (UID: \"b0a3cfa8-999b-4cb5-853b-9877d94b5da4\") " pod="openstack/barbican-keystone-listener-6bd5894c98-7q8qw" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.983502 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-config\") pod \"dnsmasq-dns-55995c5bd5-m6qlj\" (UID: \"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71\") " pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.985262 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-config\") pod \"dnsmasq-dns-55995c5bd5-m6qlj\" (UID: \"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71\") " pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.988049 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0a3cfa8-999b-4cb5-853b-9877d94b5da4-config-data\") pod \"barbican-keystone-listener-6bd5894c98-7q8qw\" (UID: \"b0a3cfa8-999b-4cb5-853b-9877d94b5da4\") " pod="openstack/barbican-keystone-listener-6bd5894c98-7q8qw" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.998707 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0a3cfa8-999b-4cb5-853b-9877d94b5da4-combined-ca-bundle\") pod \"barbican-keystone-listener-6bd5894c98-7q8qw\" (UID: \"b0a3cfa8-999b-4cb5-853b-9877d94b5da4\") " pod="openstack/barbican-keystone-listener-6bd5894c98-7q8qw" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.999302 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-c95654dc7-5rjvf" Nov 23 16:10:37 crc kubenswrapper[5050]: I1123 16:10:37.999398 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b0a3cfa8-999b-4cb5-853b-9877d94b5da4-config-data-custom\") pod \"barbican-keystone-listener-6bd5894c98-7q8qw\" (UID: \"b0a3cfa8-999b-4cb5-853b-9877d94b5da4\") " pod="openstack/barbican-keystone-listener-6bd5894c98-7q8qw" Nov 23 16:10:38 crc kubenswrapper[5050]: I1123 16:10:38.003328 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wff9z\" (UniqueName: \"kubernetes.io/projected/b0a3cfa8-999b-4cb5-853b-9877d94b5da4-kube-api-access-wff9z\") pod \"barbican-keystone-listener-6bd5894c98-7q8qw\" (UID: \"b0a3cfa8-999b-4cb5-853b-9877d94b5da4\") " pod="openstack/barbican-keystone-listener-6bd5894c98-7q8qw" Nov 23 16:10:38 crc kubenswrapper[5050]: I1123 16:10:38.005270 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vv7nh\" (UniqueName: \"kubernetes.io/projected/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-kube-api-access-vv7nh\") pod \"dnsmasq-dns-55995c5bd5-m6qlj\" (UID: \"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71\") " pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" Nov 23 16:10:38 crc kubenswrapper[5050]: I1123 16:10:38.042274 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6bd5894c98-7q8qw" Nov 23 16:10:38 crc kubenswrapper[5050]: I1123 16:10:38.086777 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fh5rf\" (UniqueName: \"kubernetes.io/projected/4a008c53-3e3a-46ba-8195-46ffae3aaec5-kube-api-access-fh5rf\") pod \"barbican-api-5b7fc4d9bd-wd2cv\" (UID: \"4a008c53-3e3a-46ba-8195-46ffae3aaec5\") " pod="openstack/barbican-api-5b7fc4d9bd-wd2cv" Nov 23 16:10:38 crc kubenswrapper[5050]: I1123 16:10:38.087234 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a008c53-3e3a-46ba-8195-46ffae3aaec5-logs\") pod \"barbican-api-5b7fc4d9bd-wd2cv\" (UID: \"4a008c53-3e3a-46ba-8195-46ffae3aaec5\") " pod="openstack/barbican-api-5b7fc4d9bd-wd2cv" Nov 23 16:10:38 crc kubenswrapper[5050]: I1123 16:10:38.087271 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a008c53-3e3a-46ba-8195-46ffae3aaec5-combined-ca-bundle\") pod \"barbican-api-5b7fc4d9bd-wd2cv\" (UID: \"4a008c53-3e3a-46ba-8195-46ffae3aaec5\") " pod="openstack/barbican-api-5b7fc4d9bd-wd2cv" Nov 23 16:10:38 crc kubenswrapper[5050]: I1123 16:10:38.087353 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4a008c53-3e3a-46ba-8195-46ffae3aaec5-config-data-custom\") pod \"barbican-api-5b7fc4d9bd-wd2cv\" (UID: \"4a008c53-3e3a-46ba-8195-46ffae3aaec5\") " pod="openstack/barbican-api-5b7fc4d9bd-wd2cv" Nov 23 16:10:38 crc kubenswrapper[5050]: I1123 16:10:38.087410 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a008c53-3e3a-46ba-8195-46ffae3aaec5-config-data\") pod \"barbican-api-5b7fc4d9bd-wd2cv\" (UID: \"4a008c53-3e3a-46ba-8195-46ffae3aaec5\") " pod="openstack/barbican-api-5b7fc4d9bd-wd2cv" Nov 23 16:10:38 crc kubenswrapper[5050]: I1123 16:10:38.111778 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" Nov 23 16:10:38 crc kubenswrapper[5050]: I1123 16:10:38.192883 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4a008c53-3e3a-46ba-8195-46ffae3aaec5-config-data-custom\") pod \"barbican-api-5b7fc4d9bd-wd2cv\" (UID: \"4a008c53-3e3a-46ba-8195-46ffae3aaec5\") " pod="openstack/barbican-api-5b7fc4d9bd-wd2cv" Nov 23 16:10:38 crc kubenswrapper[5050]: I1123 16:10:38.192977 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a008c53-3e3a-46ba-8195-46ffae3aaec5-config-data\") pod \"barbican-api-5b7fc4d9bd-wd2cv\" (UID: \"4a008c53-3e3a-46ba-8195-46ffae3aaec5\") " pod="openstack/barbican-api-5b7fc4d9bd-wd2cv" Nov 23 16:10:38 crc kubenswrapper[5050]: I1123 16:10:38.193022 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fh5rf\" (UniqueName: \"kubernetes.io/projected/4a008c53-3e3a-46ba-8195-46ffae3aaec5-kube-api-access-fh5rf\") pod \"barbican-api-5b7fc4d9bd-wd2cv\" (UID: \"4a008c53-3e3a-46ba-8195-46ffae3aaec5\") " pod="openstack/barbican-api-5b7fc4d9bd-wd2cv" Nov 23 16:10:38 crc kubenswrapper[5050]: I1123 16:10:38.193053 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a008c53-3e3a-46ba-8195-46ffae3aaec5-logs\") pod \"barbican-api-5b7fc4d9bd-wd2cv\" (UID: \"4a008c53-3e3a-46ba-8195-46ffae3aaec5\") " pod="openstack/barbican-api-5b7fc4d9bd-wd2cv" Nov 23 16:10:38 crc kubenswrapper[5050]: I1123 16:10:38.193082 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a008c53-3e3a-46ba-8195-46ffae3aaec5-combined-ca-bundle\") pod \"barbican-api-5b7fc4d9bd-wd2cv\" (UID: \"4a008c53-3e3a-46ba-8195-46ffae3aaec5\") " pod="openstack/barbican-api-5b7fc4d9bd-wd2cv" Nov 23 16:10:38 crc kubenswrapper[5050]: I1123 16:10:38.195746 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a008c53-3e3a-46ba-8195-46ffae3aaec5-logs\") pod \"barbican-api-5b7fc4d9bd-wd2cv\" (UID: \"4a008c53-3e3a-46ba-8195-46ffae3aaec5\") " pod="openstack/barbican-api-5b7fc4d9bd-wd2cv" Nov 23 16:10:38 crc kubenswrapper[5050]: I1123 16:10:38.202243 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4a008c53-3e3a-46ba-8195-46ffae3aaec5-config-data-custom\") pod \"barbican-api-5b7fc4d9bd-wd2cv\" (UID: \"4a008c53-3e3a-46ba-8195-46ffae3aaec5\") " pod="openstack/barbican-api-5b7fc4d9bd-wd2cv" Nov 23 16:10:38 crc kubenswrapper[5050]: I1123 16:10:38.215086 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fh5rf\" (UniqueName: \"kubernetes.io/projected/4a008c53-3e3a-46ba-8195-46ffae3aaec5-kube-api-access-fh5rf\") pod \"barbican-api-5b7fc4d9bd-wd2cv\" (UID: \"4a008c53-3e3a-46ba-8195-46ffae3aaec5\") " pod="openstack/barbican-api-5b7fc4d9bd-wd2cv" Nov 23 16:10:38 crc kubenswrapper[5050]: I1123 16:10:38.215193 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a008c53-3e3a-46ba-8195-46ffae3aaec5-config-data\") pod \"barbican-api-5b7fc4d9bd-wd2cv\" (UID: \"4a008c53-3e3a-46ba-8195-46ffae3aaec5\") " pod="openstack/barbican-api-5b7fc4d9bd-wd2cv" Nov 23 16:10:38 crc kubenswrapper[5050]: I1123 16:10:38.216308 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a008c53-3e3a-46ba-8195-46ffae3aaec5-combined-ca-bundle\") pod \"barbican-api-5b7fc4d9bd-wd2cv\" (UID: \"4a008c53-3e3a-46ba-8195-46ffae3aaec5\") " pod="openstack/barbican-api-5b7fc4d9bd-wd2cv" Nov 23 16:10:38 crc kubenswrapper[5050]: I1123 16:10:38.397940 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5b7fc4d9bd-wd2cv" Nov 23 16:10:38 crc kubenswrapper[5050]: I1123 16:10:38.537985 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-c95654dc7-5rjvf"] Nov 23 16:10:38 crc kubenswrapper[5050]: I1123 16:10:38.671284 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6bd5894c98-7q8qw"] Nov 23 16:10:38 crc kubenswrapper[5050]: I1123 16:10:38.785830 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55995c5bd5-m6qlj"] Nov 23 16:10:38 crc kubenswrapper[5050]: W1123 16:10:38.810211 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5a32fd3_ee3a_4733_ab6d_92560b2dbd71.slice/crio-8009aca9ffc244c4b0c0ba01ea987e4b043804ee247bcf9a9367aa3efc30998a WatchSource:0}: Error finding container 8009aca9ffc244c4b0c0ba01ea987e4b043804ee247bcf9a9367aa3efc30998a: Status 404 returned error can't find the container with id 8009aca9ffc244c4b0c0ba01ea987e4b043804ee247bcf9a9367aa3efc30998a Nov 23 16:10:39 crc kubenswrapper[5050]: I1123 16:10:39.073158 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5b7fc4d9bd-wd2cv"] Nov 23 16:10:39 crc kubenswrapper[5050]: W1123 16:10:39.079571 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4a008c53_3e3a_46ba_8195_46ffae3aaec5.slice/crio-ed73ae0c966c738af2ebc91d4d7cc5d8efc012ac7b02425d433f2ddd29d3417e WatchSource:0}: Error finding container ed73ae0c966c738af2ebc91d4d7cc5d8efc012ac7b02425d433f2ddd29d3417e: Status 404 returned error can't find the container with id ed73ae0c966c738af2ebc91d4d7cc5d8efc012ac7b02425d433f2ddd29d3417e Nov 23 16:10:39 crc kubenswrapper[5050]: I1123 16:10:39.413944 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5b7fc4d9bd-wd2cv" event={"ID":"4a008c53-3e3a-46ba-8195-46ffae3aaec5","Type":"ContainerStarted","Data":"b90a72d89f30e09ec7c2f680067366a92ceb02b0c54fe51a770bcf2d772535c4"} Nov 23 16:10:39 crc kubenswrapper[5050]: I1123 16:10:39.414003 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5b7fc4d9bd-wd2cv" event={"ID":"4a008c53-3e3a-46ba-8195-46ffae3aaec5","Type":"ContainerStarted","Data":"ed73ae0c966c738af2ebc91d4d7cc5d8efc012ac7b02425d433f2ddd29d3417e"} Nov 23 16:10:39 crc kubenswrapper[5050]: I1123 16:10:39.416254 5050 generic.go:334] "Generic (PLEG): container finished" podID="f5a32fd3-ee3a-4733-ab6d-92560b2dbd71" containerID="916e1300c6d72dce22b47536fe1f32a2705ed7a380c524d954cf7ce9d888f283" exitCode=0 Nov 23 16:10:39 crc kubenswrapper[5050]: I1123 16:10:39.416637 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" event={"ID":"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71","Type":"ContainerDied","Data":"916e1300c6d72dce22b47536fe1f32a2705ed7a380c524d954cf7ce9d888f283"} Nov 23 16:10:39 crc kubenswrapper[5050]: I1123 16:10:39.416673 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" event={"ID":"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71","Type":"ContainerStarted","Data":"8009aca9ffc244c4b0c0ba01ea987e4b043804ee247bcf9a9367aa3efc30998a"} Nov 23 16:10:39 crc kubenswrapper[5050]: I1123 16:10:39.423535 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-c95654dc7-5rjvf" event={"ID":"bd10d96b-8497-4c0b-afe6-5e2027955ec7","Type":"ContainerStarted","Data":"f6402018349822d195a0ccbe878e50d2ba9c0c30e3ff4610234cdb4a7517e69d"} Nov 23 16:10:39 crc kubenswrapper[5050]: I1123 16:10:39.423589 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-c95654dc7-5rjvf" event={"ID":"bd10d96b-8497-4c0b-afe6-5e2027955ec7","Type":"ContainerStarted","Data":"3735baa13438240ba3cb99b1cfac7dadcf6f43ca7b859608eb176932e56b99bd"} Nov 23 16:10:39 crc kubenswrapper[5050]: I1123 16:10:39.423605 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-c95654dc7-5rjvf" event={"ID":"bd10d96b-8497-4c0b-afe6-5e2027955ec7","Type":"ContainerStarted","Data":"2bf89d5e813aa0940bcc6a9544c00832b7d5ce8b5abc8bcc0ec4c71429217dbc"} Nov 23 16:10:39 crc kubenswrapper[5050]: I1123 16:10:39.427289 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6bd5894c98-7q8qw" event={"ID":"b0a3cfa8-999b-4cb5-853b-9877d94b5da4","Type":"ContainerStarted","Data":"f3be2948783f56e8536bfb8ffd0eba3ab30c0bf69ebf7e9e53a102ffe2065392"} Nov 23 16:10:39 crc kubenswrapper[5050]: I1123 16:10:39.427341 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6bd5894c98-7q8qw" event={"ID":"b0a3cfa8-999b-4cb5-853b-9877d94b5da4","Type":"ContainerStarted","Data":"13083a200b0083ee17a5cdca2e4eeb430097661268a58eef95e0faf235c5feb3"} Nov 23 16:10:39 crc kubenswrapper[5050]: I1123 16:10:39.427359 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6bd5894c98-7q8qw" event={"ID":"b0a3cfa8-999b-4cb5-853b-9877d94b5da4","Type":"ContainerStarted","Data":"af513d3842c67be69bb0000d1349c53fb2776dae218fc4c05a0a57ce9fdd9b6e"} Nov 23 16:10:39 crc kubenswrapper[5050]: I1123 16:10:39.477410 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-c95654dc7-5rjvf" podStartSLOduration=2.477385529 podStartE2EDuration="2.477385529s" podCreationTimestamp="2025-11-23 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:10:39.469191848 +0000 UTC m=+5334.636188343" watchObservedRunningTime="2025-11-23 16:10:39.477385529 +0000 UTC m=+5334.644382014" Nov 23 16:10:39 crc kubenswrapper[5050]: I1123 16:10:39.501050 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-6bd5894c98-7q8qw" podStartSLOduration=2.501019386 podStartE2EDuration="2.501019386s" podCreationTimestamp="2025-11-23 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:10:39.49475873 +0000 UTC m=+5334.661755235" watchObservedRunningTime="2025-11-23 16:10:39.501019386 +0000 UTC m=+5334.668015871" Nov 23 16:10:40 crc kubenswrapper[5050]: I1123 16:10:40.441854 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" event={"ID":"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71","Type":"ContainerStarted","Data":"d7663179605ac4be880286abb3618037a11f5166eec1e523180355db80284be2"} Nov 23 16:10:40 crc kubenswrapper[5050]: I1123 16:10:40.443897 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" Nov 23 16:10:40 crc kubenswrapper[5050]: I1123 16:10:40.448526 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5b7fc4d9bd-wd2cv" event={"ID":"4a008c53-3e3a-46ba-8195-46ffae3aaec5","Type":"ContainerStarted","Data":"6dcbea2fead8caed559a2ba6dcb639d6f49eeb6247f4b2597c1f1f742b8b049b"} Nov 23 16:10:40 crc kubenswrapper[5050]: I1123 16:10:40.448575 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5b7fc4d9bd-wd2cv" Nov 23 16:10:40 crc kubenswrapper[5050]: I1123 16:10:40.449316 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5b7fc4d9bd-wd2cv" Nov 23 16:10:40 crc kubenswrapper[5050]: I1123 16:10:40.489642 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" podStartSLOduration=3.489613794 podStartE2EDuration="3.489613794s" podCreationTimestamp="2025-11-23 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:10:40.486945678 +0000 UTC m=+5335.653942203" watchObservedRunningTime="2025-11-23 16:10:40.489613794 +0000 UTC m=+5335.656610319" Nov 23 16:10:40 crc kubenswrapper[5050]: I1123 16:10:40.515465 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-5b7fc4d9bd-wd2cv" podStartSLOduration=3.515418372 podStartE2EDuration="3.515418372s" podCreationTimestamp="2025-11-23 16:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:10:40.512966043 +0000 UTC m=+5335.679962538" watchObservedRunningTime="2025-11-23 16:10:40.515418372 +0000 UTC m=+5335.682414867" Nov 23 16:10:48 crc kubenswrapper[5050]: I1123 16:10:48.115230 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" Nov 23 16:10:48 crc kubenswrapper[5050]: I1123 16:10:48.190241 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5db55b45-t7h8n"] Nov 23 16:10:48 crc kubenswrapper[5050]: I1123 16:10:48.190516 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5db55b45-t7h8n" podUID="84ccdf47-4526-4bbe-b9a6-ae44490bd586" containerName="dnsmasq-dns" containerID="cri-o://6335fec393250d969a48eb8dfed45b2b17e53f7fc2c8e8ac2b1d1084abdaab37" gracePeriod=10 Nov 23 16:10:48 crc kubenswrapper[5050]: I1123 16:10:48.587798 5050 generic.go:334] "Generic (PLEG): container finished" podID="84ccdf47-4526-4bbe-b9a6-ae44490bd586" containerID="6335fec393250d969a48eb8dfed45b2b17e53f7fc2c8e8ac2b1d1084abdaab37" exitCode=0 Nov 23 16:10:48 crc kubenswrapper[5050]: I1123 16:10:48.587937 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5db55b45-t7h8n" event={"ID":"84ccdf47-4526-4bbe-b9a6-ae44490bd586","Type":"ContainerDied","Data":"6335fec393250d969a48eb8dfed45b2b17e53f7fc2c8e8ac2b1d1084abdaab37"} Nov 23 16:10:48 crc kubenswrapper[5050]: I1123 16:10:48.717949 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5db55b45-t7h8n" Nov 23 16:10:48 crc kubenswrapper[5050]: I1123 16:10:48.755674 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z7sx4\" (UniqueName: \"kubernetes.io/projected/84ccdf47-4526-4bbe-b9a6-ae44490bd586-kube-api-access-z7sx4\") pod \"84ccdf47-4526-4bbe-b9a6-ae44490bd586\" (UID: \"84ccdf47-4526-4bbe-b9a6-ae44490bd586\") " Nov 23 16:10:48 crc kubenswrapper[5050]: I1123 16:10:48.755800 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84ccdf47-4526-4bbe-b9a6-ae44490bd586-config\") pod \"84ccdf47-4526-4bbe-b9a6-ae44490bd586\" (UID: \"84ccdf47-4526-4bbe-b9a6-ae44490bd586\") " Nov 23 16:10:48 crc kubenswrapper[5050]: I1123 16:10:48.755822 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84ccdf47-4526-4bbe-b9a6-ae44490bd586-ovsdbserver-nb\") pod \"84ccdf47-4526-4bbe-b9a6-ae44490bd586\" (UID: \"84ccdf47-4526-4bbe-b9a6-ae44490bd586\") " Nov 23 16:10:48 crc kubenswrapper[5050]: I1123 16:10:48.755933 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84ccdf47-4526-4bbe-b9a6-ae44490bd586-ovsdbserver-sb\") pod \"84ccdf47-4526-4bbe-b9a6-ae44490bd586\" (UID: \"84ccdf47-4526-4bbe-b9a6-ae44490bd586\") " Nov 23 16:10:48 crc kubenswrapper[5050]: I1123 16:10:48.755996 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84ccdf47-4526-4bbe-b9a6-ae44490bd586-dns-svc\") pod \"84ccdf47-4526-4bbe-b9a6-ae44490bd586\" (UID: \"84ccdf47-4526-4bbe-b9a6-ae44490bd586\") " Nov 23 16:10:48 crc kubenswrapper[5050]: I1123 16:10:48.774895 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84ccdf47-4526-4bbe-b9a6-ae44490bd586-kube-api-access-z7sx4" (OuterVolumeSpecName: "kube-api-access-z7sx4") pod "84ccdf47-4526-4bbe-b9a6-ae44490bd586" (UID: "84ccdf47-4526-4bbe-b9a6-ae44490bd586"). InnerVolumeSpecName "kube-api-access-z7sx4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:10:48 crc kubenswrapper[5050]: I1123 16:10:48.803468 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84ccdf47-4526-4bbe-b9a6-ae44490bd586-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "84ccdf47-4526-4bbe-b9a6-ae44490bd586" (UID: "84ccdf47-4526-4bbe-b9a6-ae44490bd586"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:10:48 crc kubenswrapper[5050]: I1123 16:10:48.803747 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84ccdf47-4526-4bbe-b9a6-ae44490bd586-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "84ccdf47-4526-4bbe-b9a6-ae44490bd586" (UID: "84ccdf47-4526-4bbe-b9a6-ae44490bd586"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:10:48 crc kubenswrapper[5050]: I1123 16:10:48.823557 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84ccdf47-4526-4bbe-b9a6-ae44490bd586-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "84ccdf47-4526-4bbe-b9a6-ae44490bd586" (UID: "84ccdf47-4526-4bbe-b9a6-ae44490bd586"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:10:48 crc kubenswrapper[5050]: I1123 16:10:48.837434 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84ccdf47-4526-4bbe-b9a6-ae44490bd586-config" (OuterVolumeSpecName: "config") pod "84ccdf47-4526-4bbe-b9a6-ae44490bd586" (UID: "84ccdf47-4526-4bbe-b9a6-ae44490bd586"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:10:48 crc kubenswrapper[5050]: I1123 16:10:48.858802 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84ccdf47-4526-4bbe-b9a6-ae44490bd586-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 23 16:10:48 crc kubenswrapper[5050]: I1123 16:10:48.858844 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84ccdf47-4526-4bbe-b9a6-ae44490bd586-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 16:10:48 crc kubenswrapper[5050]: I1123 16:10:48.858874 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z7sx4\" (UniqueName: \"kubernetes.io/projected/84ccdf47-4526-4bbe-b9a6-ae44490bd586-kube-api-access-z7sx4\") on node \"crc\" DevicePath \"\"" Nov 23 16:10:48 crc kubenswrapper[5050]: I1123 16:10:48.858884 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84ccdf47-4526-4bbe-b9a6-ae44490bd586-config\") on node \"crc\" DevicePath \"\"" Nov 23 16:10:48 crc kubenswrapper[5050]: I1123 16:10:48.858894 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84ccdf47-4526-4bbe-b9a6-ae44490bd586-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 23 16:10:49 crc kubenswrapper[5050]: I1123 16:10:49.604993 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5db55b45-t7h8n" event={"ID":"84ccdf47-4526-4bbe-b9a6-ae44490bd586","Type":"ContainerDied","Data":"c5adc79ed7d1e537dbc9f54fda4d77f947914f0bdb29ca51ca363084afa19182"} Nov 23 16:10:49 crc kubenswrapper[5050]: I1123 16:10:49.605573 5050 scope.go:117] "RemoveContainer" containerID="6335fec393250d969a48eb8dfed45b2b17e53f7fc2c8e8ac2b1d1084abdaab37" Nov 23 16:10:49 crc kubenswrapper[5050]: I1123 16:10:49.605858 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5db55b45-t7h8n" Nov 23 16:10:49 crc kubenswrapper[5050]: I1123 16:10:49.653976 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5db55b45-t7h8n"] Nov 23 16:10:49 crc kubenswrapper[5050]: I1123 16:10:49.655587 5050 scope.go:117] "RemoveContainer" containerID="9900fe592641ae76c655fa4413c5caa9d0a21a676e3683fda980933feef158af" Nov 23 16:10:49 crc kubenswrapper[5050]: I1123 16:10:49.670834 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5db55b45-t7h8n"] Nov 23 16:10:49 crc kubenswrapper[5050]: I1123 16:10:49.824707 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5b7fc4d9bd-wd2cv" Nov 23 16:10:49 crc kubenswrapper[5050]: I1123 16:10:49.974128 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5b7fc4d9bd-wd2cv" Nov 23 16:10:51 crc kubenswrapper[5050]: I1123 16:10:51.572796 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84ccdf47-4526-4bbe-b9a6-ae44490bd586" path="/var/lib/kubelet/pods/84ccdf47-4526-4bbe-b9a6-ae44490bd586/volumes" Nov 23 16:10:59 crc kubenswrapper[5050]: I1123 16:10:59.224234 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:10:59 crc kubenswrapper[5050]: I1123 16:10:59.225158 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:11:03 crc kubenswrapper[5050]: I1123 16:11:03.193368 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-6w8pk"] Nov 23 16:11:03 crc kubenswrapper[5050]: E1123 16:11:03.194263 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84ccdf47-4526-4bbe-b9a6-ae44490bd586" containerName="init" Nov 23 16:11:03 crc kubenswrapper[5050]: I1123 16:11:03.194279 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="84ccdf47-4526-4bbe-b9a6-ae44490bd586" containerName="init" Nov 23 16:11:03 crc kubenswrapper[5050]: E1123 16:11:03.194324 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84ccdf47-4526-4bbe-b9a6-ae44490bd586" containerName="dnsmasq-dns" Nov 23 16:11:03 crc kubenswrapper[5050]: I1123 16:11:03.194331 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="84ccdf47-4526-4bbe-b9a6-ae44490bd586" containerName="dnsmasq-dns" Nov 23 16:11:03 crc kubenswrapper[5050]: I1123 16:11:03.194514 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="84ccdf47-4526-4bbe-b9a6-ae44490bd586" containerName="dnsmasq-dns" Nov 23 16:11:03 crc kubenswrapper[5050]: I1123 16:11:03.195164 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-6w8pk" Nov 23 16:11:03 crc kubenswrapper[5050]: I1123 16:11:03.201539 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-6w8pk"] Nov 23 16:11:03 crc kubenswrapper[5050]: I1123 16:11:03.271216 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/469f11cd-455f-4c87-92fd-88a05570426a-operator-scripts\") pod \"neutron-db-create-6w8pk\" (UID: \"469f11cd-455f-4c87-92fd-88a05570426a\") " pod="openstack/neutron-db-create-6w8pk" Nov 23 16:11:03 crc kubenswrapper[5050]: I1123 16:11:03.271540 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rnjn\" (UniqueName: \"kubernetes.io/projected/469f11cd-455f-4c87-92fd-88a05570426a-kube-api-access-2rnjn\") pod \"neutron-db-create-6w8pk\" (UID: \"469f11cd-455f-4c87-92fd-88a05570426a\") " pod="openstack/neutron-db-create-6w8pk" Nov 23 16:11:03 crc kubenswrapper[5050]: I1123 16:11:03.289403 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-8743-account-create-jsxsw"] Nov 23 16:11:03 crc kubenswrapper[5050]: I1123 16:11:03.290590 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8743-account-create-jsxsw" Nov 23 16:11:03 crc kubenswrapper[5050]: I1123 16:11:03.293685 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 23 16:11:03 crc kubenswrapper[5050]: I1123 16:11:03.302640 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8743-account-create-jsxsw"] Nov 23 16:11:03 crc kubenswrapper[5050]: I1123 16:11:03.373700 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9vrbd\" (UniqueName: \"kubernetes.io/projected/e84edb55-d2c4-4f7d-bff8-e47b020fb8ae-kube-api-access-9vrbd\") pod \"neutron-8743-account-create-jsxsw\" (UID: \"e84edb55-d2c4-4f7d-bff8-e47b020fb8ae\") " pod="openstack/neutron-8743-account-create-jsxsw" Nov 23 16:11:03 crc kubenswrapper[5050]: I1123 16:11:03.373792 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rnjn\" (UniqueName: \"kubernetes.io/projected/469f11cd-455f-4c87-92fd-88a05570426a-kube-api-access-2rnjn\") pod \"neutron-db-create-6w8pk\" (UID: \"469f11cd-455f-4c87-92fd-88a05570426a\") " pod="openstack/neutron-db-create-6w8pk" Nov 23 16:11:03 crc kubenswrapper[5050]: I1123 16:11:03.373857 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e84edb55-d2c4-4f7d-bff8-e47b020fb8ae-operator-scripts\") pod \"neutron-8743-account-create-jsxsw\" (UID: \"e84edb55-d2c4-4f7d-bff8-e47b020fb8ae\") " pod="openstack/neutron-8743-account-create-jsxsw" Nov 23 16:11:03 crc kubenswrapper[5050]: I1123 16:11:03.374204 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/469f11cd-455f-4c87-92fd-88a05570426a-operator-scripts\") pod \"neutron-db-create-6w8pk\" (UID: \"469f11cd-455f-4c87-92fd-88a05570426a\") " pod="openstack/neutron-db-create-6w8pk" Nov 23 16:11:03 crc kubenswrapper[5050]: I1123 16:11:03.375218 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/469f11cd-455f-4c87-92fd-88a05570426a-operator-scripts\") pod \"neutron-db-create-6w8pk\" (UID: \"469f11cd-455f-4c87-92fd-88a05570426a\") " pod="openstack/neutron-db-create-6w8pk" Nov 23 16:11:03 crc kubenswrapper[5050]: I1123 16:11:03.416367 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rnjn\" (UniqueName: \"kubernetes.io/projected/469f11cd-455f-4c87-92fd-88a05570426a-kube-api-access-2rnjn\") pod \"neutron-db-create-6w8pk\" (UID: \"469f11cd-455f-4c87-92fd-88a05570426a\") " pod="openstack/neutron-db-create-6w8pk" Nov 23 16:11:03 crc kubenswrapper[5050]: I1123 16:11:03.476875 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9vrbd\" (UniqueName: \"kubernetes.io/projected/e84edb55-d2c4-4f7d-bff8-e47b020fb8ae-kube-api-access-9vrbd\") pod \"neutron-8743-account-create-jsxsw\" (UID: \"e84edb55-d2c4-4f7d-bff8-e47b020fb8ae\") " pod="openstack/neutron-8743-account-create-jsxsw" Nov 23 16:11:03 crc kubenswrapper[5050]: I1123 16:11:03.476964 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e84edb55-d2c4-4f7d-bff8-e47b020fb8ae-operator-scripts\") pod \"neutron-8743-account-create-jsxsw\" (UID: \"e84edb55-d2c4-4f7d-bff8-e47b020fb8ae\") " pod="openstack/neutron-8743-account-create-jsxsw" Nov 23 16:11:03 crc kubenswrapper[5050]: I1123 16:11:03.478060 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e84edb55-d2c4-4f7d-bff8-e47b020fb8ae-operator-scripts\") pod \"neutron-8743-account-create-jsxsw\" (UID: \"e84edb55-d2c4-4f7d-bff8-e47b020fb8ae\") " pod="openstack/neutron-8743-account-create-jsxsw" Nov 23 16:11:03 crc kubenswrapper[5050]: I1123 16:11:03.497376 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9vrbd\" (UniqueName: \"kubernetes.io/projected/e84edb55-d2c4-4f7d-bff8-e47b020fb8ae-kube-api-access-9vrbd\") pod \"neutron-8743-account-create-jsxsw\" (UID: \"e84edb55-d2c4-4f7d-bff8-e47b020fb8ae\") " pod="openstack/neutron-8743-account-create-jsxsw" Nov 23 16:11:03 crc kubenswrapper[5050]: I1123 16:11:03.517355 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-6w8pk" Nov 23 16:11:03 crc kubenswrapper[5050]: I1123 16:11:03.608968 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8743-account-create-jsxsw" Nov 23 16:11:04 crc kubenswrapper[5050]: I1123 16:11:04.109773 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-6w8pk"] Nov 23 16:11:04 crc kubenswrapper[5050]: I1123 16:11:04.193674 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8743-account-create-jsxsw"] Nov 23 16:11:04 crc kubenswrapper[5050]: W1123 16:11:04.197091 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode84edb55_d2c4_4f7d_bff8_e47b020fb8ae.slice/crio-bc93bef33944bb884b5599cacfd1f80c34eefb1d847178916f55e1d6e4f43c1e WatchSource:0}: Error finding container bc93bef33944bb884b5599cacfd1f80c34eefb1d847178916f55e1d6e4f43c1e: Status 404 returned error can't find the container with id bc93bef33944bb884b5599cacfd1f80c34eefb1d847178916f55e1d6e4f43c1e Nov 23 16:11:04 crc kubenswrapper[5050]: I1123 16:11:04.778919 5050 generic.go:334] "Generic (PLEG): container finished" podID="469f11cd-455f-4c87-92fd-88a05570426a" containerID="e3222612ce606cca9d830c9a71106e3a15f0f4ad7e1803fa600d8aea026e4915" exitCode=0 Nov 23 16:11:04 crc kubenswrapper[5050]: I1123 16:11:04.779013 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-6w8pk" event={"ID":"469f11cd-455f-4c87-92fd-88a05570426a","Type":"ContainerDied","Data":"e3222612ce606cca9d830c9a71106e3a15f0f4ad7e1803fa600d8aea026e4915"} Nov 23 16:11:04 crc kubenswrapper[5050]: I1123 16:11:04.779543 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-6w8pk" event={"ID":"469f11cd-455f-4c87-92fd-88a05570426a","Type":"ContainerStarted","Data":"07146de94c6ce5a4aaad4cf27bbb2d8d15bd7f264953b1917701b8717218f909"} Nov 23 16:11:04 crc kubenswrapper[5050]: I1123 16:11:04.781663 5050 generic.go:334] "Generic (PLEG): container finished" podID="e84edb55-d2c4-4f7d-bff8-e47b020fb8ae" containerID="06eeb66169a349a79bdefa5d2af36c8631843103d54a811997de71f01c5933e6" exitCode=0 Nov 23 16:11:04 crc kubenswrapper[5050]: I1123 16:11:04.781759 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8743-account-create-jsxsw" event={"ID":"e84edb55-d2c4-4f7d-bff8-e47b020fb8ae","Type":"ContainerDied","Data":"06eeb66169a349a79bdefa5d2af36c8631843103d54a811997de71f01c5933e6"} Nov 23 16:11:04 crc kubenswrapper[5050]: I1123 16:11:04.781837 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8743-account-create-jsxsw" event={"ID":"e84edb55-d2c4-4f7d-bff8-e47b020fb8ae","Type":"ContainerStarted","Data":"bc93bef33944bb884b5599cacfd1f80c34eefb1d847178916f55e1d6e4f43c1e"} Nov 23 16:11:06 crc kubenswrapper[5050]: I1123 16:11:06.149415 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8743-account-create-jsxsw" Nov 23 16:11:06 crc kubenswrapper[5050]: I1123 16:11:06.246698 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e84edb55-d2c4-4f7d-bff8-e47b020fb8ae-operator-scripts\") pod \"e84edb55-d2c4-4f7d-bff8-e47b020fb8ae\" (UID: \"e84edb55-d2c4-4f7d-bff8-e47b020fb8ae\") " Nov 23 16:11:06 crc kubenswrapper[5050]: I1123 16:11:06.246767 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9vrbd\" (UniqueName: \"kubernetes.io/projected/e84edb55-d2c4-4f7d-bff8-e47b020fb8ae-kube-api-access-9vrbd\") pod \"e84edb55-d2c4-4f7d-bff8-e47b020fb8ae\" (UID: \"e84edb55-d2c4-4f7d-bff8-e47b020fb8ae\") " Nov 23 16:11:06 crc kubenswrapper[5050]: I1123 16:11:06.248096 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e84edb55-d2c4-4f7d-bff8-e47b020fb8ae-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e84edb55-d2c4-4f7d-bff8-e47b020fb8ae" (UID: "e84edb55-d2c4-4f7d-bff8-e47b020fb8ae"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:11:06 crc kubenswrapper[5050]: I1123 16:11:06.263744 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e84edb55-d2c4-4f7d-bff8-e47b020fb8ae-kube-api-access-9vrbd" (OuterVolumeSpecName: "kube-api-access-9vrbd") pod "e84edb55-d2c4-4f7d-bff8-e47b020fb8ae" (UID: "e84edb55-d2c4-4f7d-bff8-e47b020fb8ae"). InnerVolumeSpecName "kube-api-access-9vrbd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:11:06 crc kubenswrapper[5050]: I1123 16:11:06.336522 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-6w8pk" Nov 23 16:11:06 crc kubenswrapper[5050]: I1123 16:11:06.349517 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e84edb55-d2c4-4f7d-bff8-e47b020fb8ae-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:11:06 crc kubenswrapper[5050]: I1123 16:11:06.350075 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9vrbd\" (UniqueName: \"kubernetes.io/projected/e84edb55-d2c4-4f7d-bff8-e47b020fb8ae-kube-api-access-9vrbd\") on node \"crc\" DevicePath \"\"" Nov 23 16:11:06 crc kubenswrapper[5050]: I1123 16:11:06.452824 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2rnjn\" (UniqueName: \"kubernetes.io/projected/469f11cd-455f-4c87-92fd-88a05570426a-kube-api-access-2rnjn\") pod \"469f11cd-455f-4c87-92fd-88a05570426a\" (UID: \"469f11cd-455f-4c87-92fd-88a05570426a\") " Nov 23 16:11:06 crc kubenswrapper[5050]: I1123 16:11:06.453145 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/469f11cd-455f-4c87-92fd-88a05570426a-operator-scripts\") pod \"469f11cd-455f-4c87-92fd-88a05570426a\" (UID: \"469f11cd-455f-4c87-92fd-88a05570426a\") " Nov 23 16:11:06 crc kubenswrapper[5050]: I1123 16:11:06.454080 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/469f11cd-455f-4c87-92fd-88a05570426a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "469f11cd-455f-4c87-92fd-88a05570426a" (UID: "469f11cd-455f-4c87-92fd-88a05570426a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:11:06 crc kubenswrapper[5050]: I1123 16:11:06.458870 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/469f11cd-455f-4c87-92fd-88a05570426a-kube-api-access-2rnjn" (OuterVolumeSpecName: "kube-api-access-2rnjn") pod "469f11cd-455f-4c87-92fd-88a05570426a" (UID: "469f11cd-455f-4c87-92fd-88a05570426a"). InnerVolumeSpecName "kube-api-access-2rnjn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:11:06 crc kubenswrapper[5050]: I1123 16:11:06.556708 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/469f11cd-455f-4c87-92fd-88a05570426a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:11:06 crc kubenswrapper[5050]: I1123 16:11:06.556761 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2rnjn\" (UniqueName: \"kubernetes.io/projected/469f11cd-455f-4c87-92fd-88a05570426a-kube-api-access-2rnjn\") on node \"crc\" DevicePath \"\"" Nov 23 16:11:06 crc kubenswrapper[5050]: I1123 16:11:06.815648 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-6w8pk" Nov 23 16:11:06 crc kubenswrapper[5050]: I1123 16:11:06.815662 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-6w8pk" event={"ID":"469f11cd-455f-4c87-92fd-88a05570426a","Type":"ContainerDied","Data":"07146de94c6ce5a4aaad4cf27bbb2d8d15bd7f264953b1917701b8717218f909"} Nov 23 16:11:06 crc kubenswrapper[5050]: I1123 16:11:06.816206 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="07146de94c6ce5a4aaad4cf27bbb2d8d15bd7f264953b1917701b8717218f909" Nov 23 16:11:06 crc kubenswrapper[5050]: I1123 16:11:06.820825 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8743-account-create-jsxsw" event={"ID":"e84edb55-d2c4-4f7d-bff8-e47b020fb8ae","Type":"ContainerDied","Data":"bc93bef33944bb884b5599cacfd1f80c34eefb1d847178916f55e1d6e4f43c1e"} Nov 23 16:11:06 crc kubenswrapper[5050]: I1123 16:11:06.820880 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bc93bef33944bb884b5599cacfd1f80c34eefb1d847178916f55e1d6e4f43c1e" Nov 23 16:11:06 crc kubenswrapper[5050]: I1123 16:11:06.820890 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8743-account-create-jsxsw" Nov 23 16:11:08 crc kubenswrapper[5050]: I1123 16:11:08.583518 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-4gnmq"] Nov 23 16:11:08 crc kubenswrapper[5050]: E1123 16:11:08.586261 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e84edb55-d2c4-4f7d-bff8-e47b020fb8ae" containerName="mariadb-account-create" Nov 23 16:11:08 crc kubenswrapper[5050]: I1123 16:11:08.586470 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e84edb55-d2c4-4f7d-bff8-e47b020fb8ae" containerName="mariadb-account-create" Nov 23 16:11:08 crc kubenswrapper[5050]: E1123 16:11:08.586657 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="469f11cd-455f-4c87-92fd-88a05570426a" containerName="mariadb-database-create" Nov 23 16:11:08 crc kubenswrapper[5050]: I1123 16:11:08.586791 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="469f11cd-455f-4c87-92fd-88a05570426a" containerName="mariadb-database-create" Nov 23 16:11:08 crc kubenswrapper[5050]: I1123 16:11:08.587261 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="e84edb55-d2c4-4f7d-bff8-e47b020fb8ae" containerName="mariadb-account-create" Nov 23 16:11:08 crc kubenswrapper[5050]: I1123 16:11:08.587620 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="469f11cd-455f-4c87-92fd-88a05570426a" containerName="mariadb-database-create" Nov 23 16:11:08 crc kubenswrapper[5050]: I1123 16:11:08.589125 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-4gnmq" Nov 23 16:11:08 crc kubenswrapper[5050]: I1123 16:11:08.594825 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 23 16:11:08 crc kubenswrapper[5050]: I1123 16:11:08.597732 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 23 16:11:08 crc kubenswrapper[5050]: I1123 16:11:08.598808 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-pg4t2" Nov 23 16:11:08 crc kubenswrapper[5050]: I1123 16:11:08.620976 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-4gnmq"] Nov 23 16:11:08 crc kubenswrapper[5050]: I1123 16:11:08.710958 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05d7c58c-c46a-486e-9853-a880b565240c-combined-ca-bundle\") pod \"neutron-db-sync-4gnmq\" (UID: \"05d7c58c-c46a-486e-9853-a880b565240c\") " pod="openstack/neutron-db-sync-4gnmq" Nov 23 16:11:08 crc kubenswrapper[5050]: I1123 16:11:08.711295 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/05d7c58c-c46a-486e-9853-a880b565240c-config\") pod \"neutron-db-sync-4gnmq\" (UID: \"05d7c58c-c46a-486e-9853-a880b565240c\") " pod="openstack/neutron-db-sync-4gnmq" Nov 23 16:11:08 crc kubenswrapper[5050]: I1123 16:11:08.711479 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzslp\" (UniqueName: \"kubernetes.io/projected/05d7c58c-c46a-486e-9853-a880b565240c-kube-api-access-gzslp\") pod \"neutron-db-sync-4gnmq\" (UID: \"05d7c58c-c46a-486e-9853-a880b565240c\") " pod="openstack/neutron-db-sync-4gnmq" Nov 23 16:11:08 crc kubenswrapper[5050]: I1123 16:11:08.814100 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/05d7c58c-c46a-486e-9853-a880b565240c-config\") pod \"neutron-db-sync-4gnmq\" (UID: \"05d7c58c-c46a-486e-9853-a880b565240c\") " pod="openstack/neutron-db-sync-4gnmq" Nov 23 16:11:08 crc kubenswrapper[5050]: I1123 16:11:08.814168 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzslp\" (UniqueName: \"kubernetes.io/projected/05d7c58c-c46a-486e-9853-a880b565240c-kube-api-access-gzslp\") pod \"neutron-db-sync-4gnmq\" (UID: \"05d7c58c-c46a-486e-9853-a880b565240c\") " pod="openstack/neutron-db-sync-4gnmq" Nov 23 16:11:08 crc kubenswrapper[5050]: I1123 16:11:08.814254 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05d7c58c-c46a-486e-9853-a880b565240c-combined-ca-bundle\") pod \"neutron-db-sync-4gnmq\" (UID: \"05d7c58c-c46a-486e-9853-a880b565240c\") " pod="openstack/neutron-db-sync-4gnmq" Nov 23 16:11:08 crc kubenswrapper[5050]: I1123 16:11:08.823824 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05d7c58c-c46a-486e-9853-a880b565240c-combined-ca-bundle\") pod \"neutron-db-sync-4gnmq\" (UID: \"05d7c58c-c46a-486e-9853-a880b565240c\") " pod="openstack/neutron-db-sync-4gnmq" Nov 23 16:11:08 crc kubenswrapper[5050]: I1123 16:11:08.824433 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/05d7c58c-c46a-486e-9853-a880b565240c-config\") pod \"neutron-db-sync-4gnmq\" (UID: \"05d7c58c-c46a-486e-9853-a880b565240c\") " pod="openstack/neutron-db-sync-4gnmq" Nov 23 16:11:08 crc kubenswrapper[5050]: I1123 16:11:08.834495 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzslp\" (UniqueName: \"kubernetes.io/projected/05d7c58c-c46a-486e-9853-a880b565240c-kube-api-access-gzslp\") pod \"neutron-db-sync-4gnmq\" (UID: \"05d7c58c-c46a-486e-9853-a880b565240c\") " pod="openstack/neutron-db-sync-4gnmq" Nov 23 16:11:08 crc kubenswrapper[5050]: I1123 16:11:08.920136 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-4gnmq" Nov 23 16:11:09 crc kubenswrapper[5050]: I1123 16:11:09.395658 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-4gnmq"] Nov 23 16:11:09 crc kubenswrapper[5050]: I1123 16:11:09.854534 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-4gnmq" event={"ID":"05d7c58c-c46a-486e-9853-a880b565240c","Type":"ContainerStarted","Data":"bc2658aa9d45357ecc17228dc0b9024a120340b6b5351df41f2c2e9536c261b5"} Nov 23 16:11:09 crc kubenswrapper[5050]: I1123 16:11:09.855036 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-4gnmq" event={"ID":"05d7c58c-c46a-486e-9853-a880b565240c","Type":"ContainerStarted","Data":"bda3ac4ce470a83d406296c0eb6848ef527bf0db55d024059a93a813f1e45901"} Nov 23 16:11:09 crc kubenswrapper[5050]: I1123 16:11:09.882244 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-4gnmq" podStartSLOduration=1.882219567 podStartE2EDuration="1.882219567s" podCreationTimestamp="2025-11-23 16:11:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:11:09.871992998 +0000 UTC m=+5365.038989483" watchObservedRunningTime="2025-11-23 16:11:09.882219567 +0000 UTC m=+5365.049216052" Nov 23 16:11:15 crc kubenswrapper[5050]: I1123 16:11:15.038491 5050 generic.go:334] "Generic (PLEG): container finished" podID="05d7c58c-c46a-486e-9853-a880b565240c" containerID="bc2658aa9d45357ecc17228dc0b9024a120340b6b5351df41f2c2e9536c261b5" exitCode=0 Nov 23 16:11:15 crc kubenswrapper[5050]: I1123 16:11:15.038549 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-4gnmq" event={"ID":"05d7c58c-c46a-486e-9853-a880b565240c","Type":"ContainerDied","Data":"bc2658aa9d45357ecc17228dc0b9024a120340b6b5351df41f2c2e9536c261b5"} Nov 23 16:11:16 crc kubenswrapper[5050]: I1123 16:11:16.533759 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-4gnmq" Nov 23 16:11:16 crc kubenswrapper[5050]: I1123 16:11:16.581386 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/05d7c58c-c46a-486e-9853-a880b565240c-config\") pod \"05d7c58c-c46a-486e-9853-a880b565240c\" (UID: \"05d7c58c-c46a-486e-9853-a880b565240c\") " Nov 23 16:11:16 crc kubenswrapper[5050]: I1123 16:11:16.581835 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gzslp\" (UniqueName: \"kubernetes.io/projected/05d7c58c-c46a-486e-9853-a880b565240c-kube-api-access-gzslp\") pod \"05d7c58c-c46a-486e-9853-a880b565240c\" (UID: \"05d7c58c-c46a-486e-9853-a880b565240c\") " Nov 23 16:11:16 crc kubenswrapper[5050]: I1123 16:11:16.582057 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05d7c58c-c46a-486e-9853-a880b565240c-combined-ca-bundle\") pod \"05d7c58c-c46a-486e-9853-a880b565240c\" (UID: \"05d7c58c-c46a-486e-9853-a880b565240c\") " Nov 23 16:11:16 crc kubenswrapper[5050]: I1123 16:11:16.593709 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05d7c58c-c46a-486e-9853-a880b565240c-kube-api-access-gzslp" (OuterVolumeSpecName: "kube-api-access-gzslp") pod "05d7c58c-c46a-486e-9853-a880b565240c" (UID: "05d7c58c-c46a-486e-9853-a880b565240c"). InnerVolumeSpecName "kube-api-access-gzslp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:11:16 crc kubenswrapper[5050]: I1123 16:11:16.615051 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05d7c58c-c46a-486e-9853-a880b565240c-config" (OuterVolumeSpecName: "config") pod "05d7c58c-c46a-486e-9853-a880b565240c" (UID: "05d7c58c-c46a-486e-9853-a880b565240c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:11:16 crc kubenswrapper[5050]: I1123 16:11:16.615993 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05d7c58c-c46a-486e-9853-a880b565240c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "05d7c58c-c46a-486e-9853-a880b565240c" (UID: "05d7c58c-c46a-486e-9853-a880b565240c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:11:16 crc kubenswrapper[5050]: I1123 16:11:16.686965 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/05d7c58c-c46a-486e-9853-a880b565240c-config\") on node \"crc\" DevicePath \"\"" Nov 23 16:11:16 crc kubenswrapper[5050]: I1123 16:11:16.687027 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gzslp\" (UniqueName: \"kubernetes.io/projected/05d7c58c-c46a-486e-9853-a880b565240c-kube-api-access-gzslp\") on node \"crc\" DevicePath \"\"" Nov 23 16:11:16 crc kubenswrapper[5050]: I1123 16:11:16.687051 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05d7c58c-c46a-486e-9853-a880b565240c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.066999 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-4gnmq" event={"ID":"05d7c58c-c46a-486e-9853-a880b565240c","Type":"ContainerDied","Data":"bda3ac4ce470a83d406296c0eb6848ef527bf0db55d024059a93a813f1e45901"} Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.067053 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bda3ac4ce470a83d406296c0eb6848ef527bf0db55d024059a93a813f1e45901" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.067171 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-4gnmq" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.363107 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-79b55cc78f-gq4jd"] Nov 23 16:11:17 crc kubenswrapper[5050]: E1123 16:11:17.365061 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05d7c58c-c46a-486e-9853-a880b565240c" containerName="neutron-db-sync" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.365177 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="05d7c58c-c46a-486e-9853-a880b565240c" containerName="neutron-db-sync" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.365592 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="05d7c58c-c46a-486e-9853-a880b565240c" containerName="neutron-db-sync" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.367200 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.393683 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79b55cc78f-gq4jd"] Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.403530 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f2532463-ea34-450a-9c35-337de82cdef5-dns-svc\") pod \"dnsmasq-dns-79b55cc78f-gq4jd\" (UID: \"f2532463-ea34-450a-9c35-337de82cdef5\") " pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.403576 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2532463-ea34-450a-9c35-337de82cdef5-config\") pod \"dnsmasq-dns-79b55cc78f-gq4jd\" (UID: \"f2532463-ea34-450a-9c35-337de82cdef5\") " pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.403621 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f2532463-ea34-450a-9c35-337de82cdef5-ovsdbserver-nb\") pod \"dnsmasq-dns-79b55cc78f-gq4jd\" (UID: \"f2532463-ea34-450a-9c35-337de82cdef5\") " pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.403663 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxrvr\" (UniqueName: \"kubernetes.io/projected/f2532463-ea34-450a-9c35-337de82cdef5-kube-api-access-bxrvr\") pod \"dnsmasq-dns-79b55cc78f-gq4jd\" (UID: \"f2532463-ea34-450a-9c35-337de82cdef5\") " pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.403684 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f2532463-ea34-450a-9c35-337de82cdef5-ovsdbserver-sb\") pod \"dnsmasq-dns-79b55cc78f-gq4jd\" (UID: \"f2532463-ea34-450a-9c35-337de82cdef5\") " pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.472781 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-58b6b75955-wvrqr"] Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.476285 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-58b6b75955-wvrqr" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.479949 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.480625 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.480894 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-pg4t2" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.507591 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f2532463-ea34-450a-9c35-337de82cdef5-dns-svc\") pod \"dnsmasq-dns-79b55cc78f-gq4jd\" (UID: \"f2532463-ea34-450a-9c35-337de82cdef5\") " pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.507664 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2532463-ea34-450a-9c35-337de82cdef5-config\") pod \"dnsmasq-dns-79b55cc78f-gq4jd\" (UID: \"f2532463-ea34-450a-9c35-337de82cdef5\") " pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.507721 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/f8a537cf-2631-4d9c-8cee-4de81f9e53a4-httpd-config\") pod \"neutron-58b6b75955-wvrqr\" (UID: \"f8a537cf-2631-4d9c-8cee-4de81f9e53a4\") " pod="openstack/neutron-58b6b75955-wvrqr" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.507762 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f2532463-ea34-450a-9c35-337de82cdef5-ovsdbserver-nb\") pod \"dnsmasq-dns-79b55cc78f-gq4jd\" (UID: \"f2532463-ea34-450a-9c35-337de82cdef5\") " pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.507783 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/f8a537cf-2631-4d9c-8cee-4de81f9e53a4-config\") pod \"neutron-58b6b75955-wvrqr\" (UID: \"f8a537cf-2631-4d9c-8cee-4de81f9e53a4\") " pod="openstack/neutron-58b6b75955-wvrqr" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.507841 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxrvr\" (UniqueName: \"kubernetes.io/projected/f2532463-ea34-450a-9c35-337de82cdef5-kube-api-access-bxrvr\") pod \"dnsmasq-dns-79b55cc78f-gq4jd\" (UID: \"f2532463-ea34-450a-9c35-337de82cdef5\") " pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.508136 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f2532463-ea34-450a-9c35-337de82cdef5-ovsdbserver-sb\") pod \"dnsmasq-dns-79b55cc78f-gq4jd\" (UID: \"f2532463-ea34-450a-9c35-337de82cdef5\") " pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.509684 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8a537cf-2631-4d9c-8cee-4de81f9e53a4-combined-ca-bundle\") pod \"neutron-58b6b75955-wvrqr\" (UID: \"f8a537cf-2631-4d9c-8cee-4de81f9e53a4\") " pod="openstack/neutron-58b6b75955-wvrqr" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.509798 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f2532463-ea34-450a-9c35-337de82cdef5-dns-svc\") pod \"dnsmasq-dns-79b55cc78f-gq4jd\" (UID: \"f2532463-ea34-450a-9c35-337de82cdef5\") " pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.510421 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f2532463-ea34-450a-9c35-337de82cdef5-ovsdbserver-sb\") pod \"dnsmasq-dns-79b55cc78f-gq4jd\" (UID: \"f2532463-ea34-450a-9c35-337de82cdef5\") " pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.511030 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2532463-ea34-450a-9c35-337de82cdef5-config\") pod \"dnsmasq-dns-79b55cc78f-gq4jd\" (UID: \"f2532463-ea34-450a-9c35-337de82cdef5\") " pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.511192 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f2532463-ea34-450a-9c35-337de82cdef5-ovsdbserver-nb\") pod \"dnsmasq-dns-79b55cc78f-gq4jd\" (UID: \"f2532463-ea34-450a-9c35-337de82cdef5\") " pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.521222 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-58b6b75955-wvrqr"] Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.566621 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxrvr\" (UniqueName: \"kubernetes.io/projected/f2532463-ea34-450a-9c35-337de82cdef5-kube-api-access-bxrvr\") pod \"dnsmasq-dns-79b55cc78f-gq4jd\" (UID: \"f2532463-ea34-450a-9c35-337de82cdef5\") " pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.611922 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9crz\" (UniqueName: \"kubernetes.io/projected/f8a537cf-2631-4d9c-8cee-4de81f9e53a4-kube-api-access-l9crz\") pod \"neutron-58b6b75955-wvrqr\" (UID: \"f8a537cf-2631-4d9c-8cee-4de81f9e53a4\") " pod="openstack/neutron-58b6b75955-wvrqr" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.612024 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8a537cf-2631-4d9c-8cee-4de81f9e53a4-combined-ca-bundle\") pod \"neutron-58b6b75955-wvrqr\" (UID: \"f8a537cf-2631-4d9c-8cee-4de81f9e53a4\") " pod="openstack/neutron-58b6b75955-wvrqr" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.612505 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/f8a537cf-2631-4d9c-8cee-4de81f9e53a4-httpd-config\") pod \"neutron-58b6b75955-wvrqr\" (UID: \"f8a537cf-2631-4d9c-8cee-4de81f9e53a4\") " pod="openstack/neutron-58b6b75955-wvrqr" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.612626 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/f8a537cf-2631-4d9c-8cee-4de81f9e53a4-config\") pod \"neutron-58b6b75955-wvrqr\" (UID: \"f8a537cf-2631-4d9c-8cee-4de81f9e53a4\") " pod="openstack/neutron-58b6b75955-wvrqr" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.617151 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8a537cf-2631-4d9c-8cee-4de81f9e53a4-combined-ca-bundle\") pod \"neutron-58b6b75955-wvrqr\" (UID: \"f8a537cf-2631-4d9c-8cee-4de81f9e53a4\") " pod="openstack/neutron-58b6b75955-wvrqr" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.618135 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/f8a537cf-2631-4d9c-8cee-4de81f9e53a4-httpd-config\") pod \"neutron-58b6b75955-wvrqr\" (UID: \"f8a537cf-2631-4d9c-8cee-4de81f9e53a4\") " pod="openstack/neutron-58b6b75955-wvrqr" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.622044 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/f8a537cf-2631-4d9c-8cee-4de81f9e53a4-config\") pod \"neutron-58b6b75955-wvrqr\" (UID: \"f8a537cf-2631-4d9c-8cee-4de81f9e53a4\") " pod="openstack/neutron-58b6b75955-wvrqr" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.687313 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.715156 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9crz\" (UniqueName: \"kubernetes.io/projected/f8a537cf-2631-4d9c-8cee-4de81f9e53a4-kube-api-access-l9crz\") pod \"neutron-58b6b75955-wvrqr\" (UID: \"f8a537cf-2631-4d9c-8cee-4de81f9e53a4\") " pod="openstack/neutron-58b6b75955-wvrqr" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.744586 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9crz\" (UniqueName: \"kubernetes.io/projected/f8a537cf-2631-4d9c-8cee-4de81f9e53a4-kube-api-access-l9crz\") pod \"neutron-58b6b75955-wvrqr\" (UID: \"f8a537cf-2631-4d9c-8cee-4de81f9e53a4\") " pod="openstack/neutron-58b6b75955-wvrqr" Nov 23 16:11:17 crc kubenswrapper[5050]: I1123 16:11:17.809486 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-58b6b75955-wvrqr" Nov 23 16:11:18 crc kubenswrapper[5050]: I1123 16:11:18.215045 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79b55cc78f-gq4jd"] Nov 23 16:11:18 crc kubenswrapper[5050]: I1123 16:11:18.447838 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-58b6b75955-wvrqr"] Nov 23 16:11:19 crc kubenswrapper[5050]: I1123 16:11:19.099241 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-58b6b75955-wvrqr" event={"ID":"f8a537cf-2631-4d9c-8cee-4de81f9e53a4","Type":"ContainerStarted","Data":"8c8595b0b915332fd94fc28d32672fece3f00e56d7be3320c7e828b3a47f8b25"} Nov 23 16:11:19 crc kubenswrapper[5050]: I1123 16:11:19.099763 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-58b6b75955-wvrqr" event={"ID":"f8a537cf-2631-4d9c-8cee-4de81f9e53a4","Type":"ContainerStarted","Data":"11b55fa301092d22e458dab36a6b29287f5611bcc1ad840a4e0f4e96dc7cff0e"} Nov 23 16:11:19 crc kubenswrapper[5050]: I1123 16:11:19.100584 5050 generic.go:334] "Generic (PLEG): container finished" podID="f2532463-ea34-450a-9c35-337de82cdef5" containerID="380dc681924a7646a4d712b56c74e3d9b24f950ff21ab0650121b8d90e83673a" exitCode=0 Nov 23 16:11:19 crc kubenswrapper[5050]: I1123 16:11:19.100619 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" event={"ID":"f2532463-ea34-450a-9c35-337de82cdef5","Type":"ContainerDied","Data":"380dc681924a7646a4d712b56c74e3d9b24f950ff21ab0650121b8d90e83673a"} Nov 23 16:11:19 crc kubenswrapper[5050]: I1123 16:11:19.100641 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" event={"ID":"f2532463-ea34-450a-9c35-337de82cdef5","Type":"ContainerStarted","Data":"f7a6f7085558c6b05f0324c4751905ebcb9967b3ac10d8189093bd9d3beaa98d"} Nov 23 16:11:20 crc kubenswrapper[5050]: I1123 16:11:20.116331 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-58b6b75955-wvrqr" event={"ID":"f8a537cf-2631-4d9c-8cee-4de81f9e53a4","Type":"ContainerStarted","Data":"f6f35787294084753841066d045d4a4e73fa917a41c7d374fa0631332c941e48"} Nov 23 16:11:20 crc kubenswrapper[5050]: I1123 16:11:20.116922 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-58b6b75955-wvrqr" Nov 23 16:11:20 crc kubenswrapper[5050]: I1123 16:11:20.119852 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" event={"ID":"f2532463-ea34-450a-9c35-337de82cdef5","Type":"ContainerStarted","Data":"b81fea2723f6f0fb471e7c6206c7cc5b1cfd453a9eead97ade91ac94b79c878e"} Nov 23 16:11:20 crc kubenswrapper[5050]: I1123 16:11:20.120368 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" Nov 23 16:11:20 crc kubenswrapper[5050]: I1123 16:11:20.150687 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-58b6b75955-wvrqr" podStartSLOduration=3.150658305 podStartE2EDuration="3.150658305s" podCreationTimestamp="2025-11-23 16:11:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:11:20.140343424 +0000 UTC m=+5375.307339919" watchObservedRunningTime="2025-11-23 16:11:20.150658305 +0000 UTC m=+5375.317654790" Nov 23 16:11:20 crc kubenswrapper[5050]: I1123 16:11:20.186501 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" podStartSLOduration=3.186465656 podStartE2EDuration="3.186465656s" podCreationTimestamp="2025-11-23 16:11:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:11:20.180358333 +0000 UTC m=+5375.347354828" watchObservedRunningTime="2025-11-23 16:11:20.186465656 +0000 UTC m=+5375.353462141" Nov 23 16:11:27 crc kubenswrapper[5050]: I1123 16:11:27.689724 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" Nov 23 16:11:27 crc kubenswrapper[5050]: I1123 16:11:27.771298 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55995c5bd5-m6qlj"] Nov 23 16:11:27 crc kubenswrapper[5050]: I1123 16:11:27.771636 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" podUID="f5a32fd3-ee3a-4733-ab6d-92560b2dbd71" containerName="dnsmasq-dns" containerID="cri-o://d7663179605ac4be880286abb3618037a11f5166eec1e523180355db80284be2" gracePeriod=10 Nov 23 16:11:28 crc kubenswrapper[5050]: I1123 16:11:28.201903 5050 generic.go:334] "Generic (PLEG): container finished" podID="f5a32fd3-ee3a-4733-ab6d-92560b2dbd71" containerID="d7663179605ac4be880286abb3618037a11f5166eec1e523180355db80284be2" exitCode=0 Nov 23 16:11:28 crc kubenswrapper[5050]: I1123 16:11:28.202388 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" event={"ID":"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71","Type":"ContainerDied","Data":"d7663179605ac4be880286abb3618037a11f5166eec1e523180355db80284be2"} Nov 23 16:11:28 crc kubenswrapper[5050]: I1123 16:11:28.202415 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" event={"ID":"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71","Type":"ContainerDied","Data":"8009aca9ffc244c4b0c0ba01ea987e4b043804ee247bcf9a9367aa3efc30998a"} Nov 23 16:11:28 crc kubenswrapper[5050]: I1123 16:11:28.202426 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8009aca9ffc244c4b0c0ba01ea987e4b043804ee247bcf9a9367aa3efc30998a" Nov 23 16:11:28 crc kubenswrapper[5050]: I1123 16:11:28.262712 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" Nov 23 16:11:28 crc kubenswrapper[5050]: I1123 16:11:28.356739 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-config\") pod \"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71\" (UID: \"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71\") " Nov 23 16:11:28 crc kubenswrapper[5050]: I1123 16:11:28.357233 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vv7nh\" (UniqueName: \"kubernetes.io/projected/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-kube-api-access-vv7nh\") pod \"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71\" (UID: \"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71\") " Nov 23 16:11:28 crc kubenswrapper[5050]: I1123 16:11:28.357373 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-ovsdbserver-nb\") pod \"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71\" (UID: \"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71\") " Nov 23 16:11:28 crc kubenswrapper[5050]: I1123 16:11:28.357612 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-ovsdbserver-sb\") pod \"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71\" (UID: \"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71\") " Nov 23 16:11:28 crc kubenswrapper[5050]: I1123 16:11:28.357820 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-dns-svc\") pod \"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71\" (UID: \"f5a32fd3-ee3a-4733-ab6d-92560b2dbd71\") " Nov 23 16:11:28 crc kubenswrapper[5050]: I1123 16:11:28.387814 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-kube-api-access-vv7nh" (OuterVolumeSpecName: "kube-api-access-vv7nh") pod "f5a32fd3-ee3a-4733-ab6d-92560b2dbd71" (UID: "f5a32fd3-ee3a-4733-ab6d-92560b2dbd71"). InnerVolumeSpecName "kube-api-access-vv7nh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:11:28 crc kubenswrapper[5050]: I1123 16:11:28.403384 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f5a32fd3-ee3a-4733-ab6d-92560b2dbd71" (UID: "f5a32fd3-ee3a-4733-ab6d-92560b2dbd71"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:11:28 crc kubenswrapper[5050]: I1123 16:11:28.410000 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f5a32fd3-ee3a-4733-ab6d-92560b2dbd71" (UID: "f5a32fd3-ee3a-4733-ab6d-92560b2dbd71"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:11:28 crc kubenswrapper[5050]: I1123 16:11:28.421922 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f5a32fd3-ee3a-4733-ab6d-92560b2dbd71" (UID: "f5a32fd3-ee3a-4733-ab6d-92560b2dbd71"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:11:28 crc kubenswrapper[5050]: I1123 16:11:28.436795 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-config" (OuterVolumeSpecName: "config") pod "f5a32fd3-ee3a-4733-ab6d-92560b2dbd71" (UID: "f5a32fd3-ee3a-4733-ab6d-92560b2dbd71"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:11:28 crc kubenswrapper[5050]: I1123 16:11:28.460494 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vv7nh\" (UniqueName: \"kubernetes.io/projected/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-kube-api-access-vv7nh\") on node \"crc\" DevicePath \"\"" Nov 23 16:11:28 crc kubenswrapper[5050]: I1123 16:11:28.460548 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 23 16:11:28 crc kubenswrapper[5050]: I1123 16:11:28.460560 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 23 16:11:28 crc kubenswrapper[5050]: I1123 16:11:28.460572 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 16:11:28 crc kubenswrapper[5050]: I1123 16:11:28.460584 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71-config\") on node \"crc\" DevicePath \"\"" Nov 23 16:11:29 crc kubenswrapper[5050]: I1123 16:11:29.210715 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" Nov 23 16:11:29 crc kubenswrapper[5050]: I1123 16:11:29.224062 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:11:29 crc kubenswrapper[5050]: I1123 16:11:29.224219 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:11:29 crc kubenswrapper[5050]: I1123 16:11:29.245434 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55995c5bd5-m6qlj"] Nov 23 16:11:29 crc kubenswrapper[5050]: I1123 16:11:29.254442 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-55995c5bd5-m6qlj"] Nov 23 16:11:29 crc kubenswrapper[5050]: I1123 16:11:29.567976 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5a32fd3-ee3a-4733-ab6d-92560b2dbd71" path="/var/lib/kubelet/pods/f5a32fd3-ee3a-4733-ab6d-92560b2dbd71/volumes" Nov 23 16:11:33 crc kubenswrapper[5050]: I1123 16:11:33.113388 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-55995c5bd5-m6qlj" podUID="f5a32fd3-ee3a-4733-ab6d-92560b2dbd71" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.1.30:5353: i/o timeout" Nov 23 16:11:47 crc kubenswrapper[5050]: I1123 16:11:47.831003 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-58b6b75955-wvrqr" Nov 23 16:11:56 crc kubenswrapper[5050]: I1123 16:11:56.446089 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-b5m5p"] Nov 23 16:11:56 crc kubenswrapper[5050]: E1123 16:11:56.447438 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5a32fd3-ee3a-4733-ab6d-92560b2dbd71" containerName="dnsmasq-dns" Nov 23 16:11:56 crc kubenswrapper[5050]: I1123 16:11:56.447461 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5a32fd3-ee3a-4733-ab6d-92560b2dbd71" containerName="dnsmasq-dns" Nov 23 16:11:56 crc kubenswrapper[5050]: E1123 16:11:56.447509 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5a32fd3-ee3a-4733-ab6d-92560b2dbd71" containerName="init" Nov 23 16:11:56 crc kubenswrapper[5050]: I1123 16:11:56.447518 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5a32fd3-ee3a-4733-ab6d-92560b2dbd71" containerName="init" Nov 23 16:11:56 crc kubenswrapper[5050]: I1123 16:11:56.448331 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5a32fd3-ee3a-4733-ab6d-92560b2dbd71" containerName="dnsmasq-dns" Nov 23 16:11:56 crc kubenswrapper[5050]: I1123 16:11:56.449223 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-b5m5p" Nov 23 16:11:56 crc kubenswrapper[5050]: I1123 16:11:56.457642 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-b5m5p"] Nov 23 16:11:56 crc kubenswrapper[5050]: I1123 16:11:56.519522 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0702c0ea-259a-411a-994b-47da1140eea2-operator-scripts\") pod \"glance-db-create-b5m5p\" (UID: \"0702c0ea-259a-411a-994b-47da1140eea2\") " pod="openstack/glance-db-create-b5m5p" Nov 23 16:11:56 crc kubenswrapper[5050]: I1123 16:11:56.520068 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwhjl\" (UniqueName: \"kubernetes.io/projected/0702c0ea-259a-411a-994b-47da1140eea2-kube-api-access-dwhjl\") pod \"glance-db-create-b5m5p\" (UID: \"0702c0ea-259a-411a-994b-47da1140eea2\") " pod="openstack/glance-db-create-b5m5p" Nov 23 16:11:56 crc kubenswrapper[5050]: I1123 16:11:56.579353 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-d123-account-create-vrjwv"] Nov 23 16:11:56 crc kubenswrapper[5050]: I1123 16:11:56.581798 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-d123-account-create-vrjwv" Nov 23 16:11:56 crc kubenswrapper[5050]: I1123 16:11:56.584534 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 23 16:11:56 crc kubenswrapper[5050]: I1123 16:11:56.588741 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-d123-account-create-vrjwv"] Nov 23 16:11:56 crc kubenswrapper[5050]: I1123 16:11:56.622374 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwhjl\" (UniqueName: \"kubernetes.io/projected/0702c0ea-259a-411a-994b-47da1140eea2-kube-api-access-dwhjl\") pod \"glance-db-create-b5m5p\" (UID: \"0702c0ea-259a-411a-994b-47da1140eea2\") " pod="openstack/glance-db-create-b5m5p" Nov 23 16:11:56 crc kubenswrapper[5050]: I1123 16:11:56.622997 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2pdmr\" (UniqueName: \"kubernetes.io/projected/571ac77e-9976-4957-a214-035ad47339d8-kube-api-access-2pdmr\") pod \"glance-d123-account-create-vrjwv\" (UID: \"571ac77e-9976-4957-a214-035ad47339d8\") " pod="openstack/glance-d123-account-create-vrjwv" Nov 23 16:11:56 crc kubenswrapper[5050]: I1123 16:11:56.623508 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0702c0ea-259a-411a-994b-47da1140eea2-operator-scripts\") pod \"glance-db-create-b5m5p\" (UID: \"0702c0ea-259a-411a-994b-47da1140eea2\") " pod="openstack/glance-db-create-b5m5p" Nov 23 16:11:56 crc kubenswrapper[5050]: I1123 16:11:56.624626 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/571ac77e-9976-4957-a214-035ad47339d8-operator-scripts\") pod \"glance-d123-account-create-vrjwv\" (UID: \"571ac77e-9976-4957-a214-035ad47339d8\") " pod="openstack/glance-d123-account-create-vrjwv" Nov 23 16:11:56 crc kubenswrapper[5050]: I1123 16:11:56.624774 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0702c0ea-259a-411a-994b-47da1140eea2-operator-scripts\") pod \"glance-db-create-b5m5p\" (UID: \"0702c0ea-259a-411a-994b-47da1140eea2\") " pod="openstack/glance-db-create-b5m5p" Nov 23 16:11:56 crc kubenswrapper[5050]: I1123 16:11:56.646413 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwhjl\" (UniqueName: \"kubernetes.io/projected/0702c0ea-259a-411a-994b-47da1140eea2-kube-api-access-dwhjl\") pod \"glance-db-create-b5m5p\" (UID: \"0702c0ea-259a-411a-994b-47da1140eea2\") " pod="openstack/glance-db-create-b5m5p" Nov 23 16:11:56 crc kubenswrapper[5050]: I1123 16:11:56.727146 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2pdmr\" (UniqueName: \"kubernetes.io/projected/571ac77e-9976-4957-a214-035ad47339d8-kube-api-access-2pdmr\") pod \"glance-d123-account-create-vrjwv\" (UID: \"571ac77e-9976-4957-a214-035ad47339d8\") " pod="openstack/glance-d123-account-create-vrjwv" Nov 23 16:11:56 crc kubenswrapper[5050]: I1123 16:11:56.727239 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/571ac77e-9976-4957-a214-035ad47339d8-operator-scripts\") pod \"glance-d123-account-create-vrjwv\" (UID: \"571ac77e-9976-4957-a214-035ad47339d8\") " pod="openstack/glance-d123-account-create-vrjwv" Nov 23 16:11:56 crc kubenswrapper[5050]: I1123 16:11:56.728469 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/571ac77e-9976-4957-a214-035ad47339d8-operator-scripts\") pod \"glance-d123-account-create-vrjwv\" (UID: \"571ac77e-9976-4957-a214-035ad47339d8\") " pod="openstack/glance-d123-account-create-vrjwv" Nov 23 16:11:56 crc kubenswrapper[5050]: I1123 16:11:56.771303 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2pdmr\" (UniqueName: \"kubernetes.io/projected/571ac77e-9976-4957-a214-035ad47339d8-kube-api-access-2pdmr\") pod \"glance-d123-account-create-vrjwv\" (UID: \"571ac77e-9976-4957-a214-035ad47339d8\") " pod="openstack/glance-d123-account-create-vrjwv" Nov 23 16:11:56 crc kubenswrapper[5050]: I1123 16:11:56.779026 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-b5m5p" Nov 23 16:11:56 crc kubenswrapper[5050]: I1123 16:11:56.900644 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-d123-account-create-vrjwv" Nov 23 16:11:57 crc kubenswrapper[5050]: I1123 16:11:57.467367 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-b5m5p"] Nov 23 16:11:57 crc kubenswrapper[5050]: I1123 16:11:57.570828 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-b5m5p" event={"ID":"0702c0ea-259a-411a-994b-47da1140eea2","Type":"ContainerStarted","Data":"d17ac5c37c29a48f9330bc0a92865ef7b3a5d58c8f8fa1a297746bc2fd407cd3"} Nov 23 16:11:57 crc kubenswrapper[5050]: W1123 16:11:57.634937 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod571ac77e_9976_4957_a214_035ad47339d8.slice/crio-ce8112265bb552fa8469281cbb8cc155250a657d1ebb2e87556af2de6df4dce1 WatchSource:0}: Error finding container ce8112265bb552fa8469281cbb8cc155250a657d1ebb2e87556af2de6df4dce1: Status 404 returned error can't find the container with id ce8112265bb552fa8469281cbb8cc155250a657d1ebb2e87556af2de6df4dce1 Nov 23 16:11:57 crc kubenswrapper[5050]: I1123 16:11:57.635887 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-d123-account-create-vrjwv"] Nov 23 16:11:58 crc kubenswrapper[5050]: I1123 16:11:58.584979 5050 generic.go:334] "Generic (PLEG): container finished" podID="0702c0ea-259a-411a-994b-47da1140eea2" containerID="f89498a27c03b95319123445e203cf9965a3db21fba29a4926e46a8623aeca3b" exitCode=0 Nov 23 16:11:58 crc kubenswrapper[5050]: I1123 16:11:58.585161 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-b5m5p" event={"ID":"0702c0ea-259a-411a-994b-47da1140eea2","Type":"ContainerDied","Data":"f89498a27c03b95319123445e203cf9965a3db21fba29a4926e46a8623aeca3b"} Nov 23 16:11:58 crc kubenswrapper[5050]: I1123 16:11:58.588295 5050 generic.go:334] "Generic (PLEG): container finished" podID="571ac77e-9976-4957-a214-035ad47339d8" containerID="4ea39488e0657df8a49260e7ffa852bf3494ba997b3337ca5f7daa319b480709" exitCode=0 Nov 23 16:11:58 crc kubenswrapper[5050]: I1123 16:11:58.588434 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-d123-account-create-vrjwv" event={"ID":"571ac77e-9976-4957-a214-035ad47339d8","Type":"ContainerDied","Data":"4ea39488e0657df8a49260e7ffa852bf3494ba997b3337ca5f7daa319b480709"} Nov 23 16:11:58 crc kubenswrapper[5050]: I1123 16:11:58.588512 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-d123-account-create-vrjwv" event={"ID":"571ac77e-9976-4957-a214-035ad47339d8","Type":"ContainerStarted","Data":"ce8112265bb552fa8469281cbb8cc155250a657d1ebb2e87556af2de6df4dce1"} Nov 23 16:11:59 crc kubenswrapper[5050]: I1123 16:11:59.224871 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:11:59 crc kubenswrapper[5050]: I1123 16:11:59.224973 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:11:59 crc kubenswrapper[5050]: I1123 16:11:59.225055 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 16:11:59 crc kubenswrapper[5050]: I1123 16:11:59.226715 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b87f9772414b4630105844191caea96d4b6e03191eb9f84073781caf3da21f1e"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 16:11:59 crc kubenswrapper[5050]: I1123 16:11:59.226865 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://b87f9772414b4630105844191caea96d4b6e03191eb9f84073781caf3da21f1e" gracePeriod=600 Nov 23 16:11:59 crc kubenswrapper[5050]: I1123 16:11:59.602550 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="b87f9772414b4630105844191caea96d4b6e03191eb9f84073781caf3da21f1e" exitCode=0 Nov 23 16:11:59 crc kubenswrapper[5050]: I1123 16:11:59.602606 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"b87f9772414b4630105844191caea96d4b6e03191eb9f84073781caf3da21f1e"} Nov 23 16:11:59 crc kubenswrapper[5050]: I1123 16:11:59.603080 5050 scope.go:117] "RemoveContainer" containerID="3fc70d33f56eea45a48b3911893d3dc4f60ad925817000cb324b1423c665b64b" Nov 23 16:12:00 crc kubenswrapper[5050]: I1123 16:12:00.069196 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-b5m5p" Nov 23 16:12:00 crc kubenswrapper[5050]: I1123 16:12:00.084589 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-d123-account-create-vrjwv" Nov 23 16:12:00 crc kubenswrapper[5050]: I1123 16:12:00.208994 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/571ac77e-9976-4957-a214-035ad47339d8-operator-scripts\") pod \"571ac77e-9976-4957-a214-035ad47339d8\" (UID: \"571ac77e-9976-4957-a214-035ad47339d8\") " Nov 23 16:12:00 crc kubenswrapper[5050]: I1123 16:12:00.209115 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2pdmr\" (UniqueName: \"kubernetes.io/projected/571ac77e-9976-4957-a214-035ad47339d8-kube-api-access-2pdmr\") pod \"571ac77e-9976-4957-a214-035ad47339d8\" (UID: \"571ac77e-9976-4957-a214-035ad47339d8\") " Nov 23 16:12:00 crc kubenswrapper[5050]: I1123 16:12:00.209192 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0702c0ea-259a-411a-994b-47da1140eea2-operator-scripts\") pod \"0702c0ea-259a-411a-994b-47da1140eea2\" (UID: \"0702c0ea-259a-411a-994b-47da1140eea2\") " Nov 23 16:12:00 crc kubenswrapper[5050]: I1123 16:12:00.209247 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dwhjl\" (UniqueName: \"kubernetes.io/projected/0702c0ea-259a-411a-994b-47da1140eea2-kube-api-access-dwhjl\") pod \"0702c0ea-259a-411a-994b-47da1140eea2\" (UID: \"0702c0ea-259a-411a-994b-47da1140eea2\") " Nov 23 16:12:00 crc kubenswrapper[5050]: I1123 16:12:00.209557 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/571ac77e-9976-4957-a214-035ad47339d8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "571ac77e-9976-4957-a214-035ad47339d8" (UID: "571ac77e-9976-4957-a214-035ad47339d8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:12:00 crc kubenswrapper[5050]: I1123 16:12:00.210642 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/571ac77e-9976-4957-a214-035ad47339d8-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:00 crc kubenswrapper[5050]: I1123 16:12:00.210704 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0702c0ea-259a-411a-994b-47da1140eea2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0702c0ea-259a-411a-994b-47da1140eea2" (UID: "0702c0ea-259a-411a-994b-47da1140eea2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:12:00 crc kubenswrapper[5050]: I1123 16:12:00.216375 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0702c0ea-259a-411a-994b-47da1140eea2-kube-api-access-dwhjl" (OuterVolumeSpecName: "kube-api-access-dwhjl") pod "0702c0ea-259a-411a-994b-47da1140eea2" (UID: "0702c0ea-259a-411a-994b-47da1140eea2"). InnerVolumeSpecName "kube-api-access-dwhjl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:12:00 crc kubenswrapper[5050]: I1123 16:12:00.216438 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/571ac77e-9976-4957-a214-035ad47339d8-kube-api-access-2pdmr" (OuterVolumeSpecName: "kube-api-access-2pdmr") pod "571ac77e-9976-4957-a214-035ad47339d8" (UID: "571ac77e-9976-4957-a214-035ad47339d8"). InnerVolumeSpecName "kube-api-access-2pdmr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:12:00 crc kubenswrapper[5050]: I1123 16:12:00.344407 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2pdmr\" (UniqueName: \"kubernetes.io/projected/571ac77e-9976-4957-a214-035ad47339d8-kube-api-access-2pdmr\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:00 crc kubenswrapper[5050]: I1123 16:12:00.344510 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0702c0ea-259a-411a-994b-47da1140eea2-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:00 crc kubenswrapper[5050]: I1123 16:12:00.344527 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dwhjl\" (UniqueName: \"kubernetes.io/projected/0702c0ea-259a-411a-994b-47da1140eea2-kube-api-access-dwhjl\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:00 crc kubenswrapper[5050]: I1123 16:12:00.618743 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-b5m5p" event={"ID":"0702c0ea-259a-411a-994b-47da1140eea2","Type":"ContainerDied","Data":"d17ac5c37c29a48f9330bc0a92865ef7b3a5d58c8f8fa1a297746bc2fd407cd3"} Nov 23 16:12:00 crc kubenswrapper[5050]: I1123 16:12:00.618799 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d17ac5c37c29a48f9330bc0a92865ef7b3a5d58c8f8fa1a297746bc2fd407cd3" Nov 23 16:12:00 crc kubenswrapper[5050]: I1123 16:12:00.618870 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-b5m5p" Nov 23 16:12:00 crc kubenswrapper[5050]: I1123 16:12:00.621141 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00"} Nov 23 16:12:00 crc kubenswrapper[5050]: I1123 16:12:00.629052 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-d123-account-create-vrjwv" event={"ID":"571ac77e-9976-4957-a214-035ad47339d8","Type":"ContainerDied","Data":"ce8112265bb552fa8469281cbb8cc155250a657d1ebb2e87556af2de6df4dce1"} Nov 23 16:12:00 crc kubenswrapper[5050]: I1123 16:12:00.629111 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ce8112265bb552fa8469281cbb8cc155250a657d1ebb2e87556af2de6df4dce1" Nov 23 16:12:00 crc kubenswrapper[5050]: I1123 16:12:00.629132 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-d123-account-create-vrjwv" Nov 23 16:12:01 crc kubenswrapper[5050]: I1123 16:12:01.806366 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-rwxg2"] Nov 23 16:12:01 crc kubenswrapper[5050]: E1123 16:12:01.815505 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="571ac77e-9976-4957-a214-035ad47339d8" containerName="mariadb-account-create" Nov 23 16:12:01 crc kubenswrapper[5050]: I1123 16:12:01.815599 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="571ac77e-9976-4957-a214-035ad47339d8" containerName="mariadb-account-create" Nov 23 16:12:01 crc kubenswrapper[5050]: E1123 16:12:01.815695 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0702c0ea-259a-411a-994b-47da1140eea2" containerName="mariadb-database-create" Nov 23 16:12:01 crc kubenswrapper[5050]: I1123 16:12:01.815746 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="0702c0ea-259a-411a-994b-47da1140eea2" containerName="mariadb-database-create" Nov 23 16:12:01 crc kubenswrapper[5050]: I1123 16:12:01.815990 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="571ac77e-9976-4957-a214-035ad47339d8" containerName="mariadb-account-create" Nov 23 16:12:01 crc kubenswrapper[5050]: I1123 16:12:01.816051 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="0702c0ea-259a-411a-994b-47da1140eea2" containerName="mariadb-database-create" Nov 23 16:12:01 crc kubenswrapper[5050]: I1123 16:12:01.816910 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-rwxg2" Nov 23 16:12:01 crc kubenswrapper[5050]: I1123 16:12:01.820184 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-pxmqj" Nov 23 16:12:01 crc kubenswrapper[5050]: I1123 16:12:01.820557 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 23 16:12:01 crc kubenswrapper[5050]: I1123 16:12:01.821283 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-rwxg2"] Nov 23 16:12:01 crc kubenswrapper[5050]: I1123 16:12:01.985265 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/94b5a9ae-4d31-4397-bec3-313a3012f2b8-db-sync-config-data\") pod \"glance-db-sync-rwxg2\" (UID: \"94b5a9ae-4d31-4397-bec3-313a3012f2b8\") " pod="openstack/glance-db-sync-rwxg2" Nov 23 16:12:01 crc kubenswrapper[5050]: I1123 16:12:01.985788 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94b5a9ae-4d31-4397-bec3-313a3012f2b8-config-data\") pod \"glance-db-sync-rwxg2\" (UID: \"94b5a9ae-4d31-4397-bec3-313a3012f2b8\") " pod="openstack/glance-db-sync-rwxg2" Nov 23 16:12:01 crc kubenswrapper[5050]: I1123 16:12:01.986062 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94b5a9ae-4d31-4397-bec3-313a3012f2b8-combined-ca-bundle\") pod \"glance-db-sync-rwxg2\" (UID: \"94b5a9ae-4d31-4397-bec3-313a3012f2b8\") " pod="openstack/glance-db-sync-rwxg2" Nov 23 16:12:01 crc kubenswrapper[5050]: I1123 16:12:01.986250 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p2q8q\" (UniqueName: \"kubernetes.io/projected/94b5a9ae-4d31-4397-bec3-313a3012f2b8-kube-api-access-p2q8q\") pod \"glance-db-sync-rwxg2\" (UID: \"94b5a9ae-4d31-4397-bec3-313a3012f2b8\") " pod="openstack/glance-db-sync-rwxg2" Nov 23 16:12:02 crc kubenswrapper[5050]: I1123 16:12:02.088398 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p2q8q\" (UniqueName: \"kubernetes.io/projected/94b5a9ae-4d31-4397-bec3-313a3012f2b8-kube-api-access-p2q8q\") pod \"glance-db-sync-rwxg2\" (UID: \"94b5a9ae-4d31-4397-bec3-313a3012f2b8\") " pod="openstack/glance-db-sync-rwxg2" Nov 23 16:12:02 crc kubenswrapper[5050]: I1123 16:12:02.088538 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/94b5a9ae-4d31-4397-bec3-313a3012f2b8-db-sync-config-data\") pod \"glance-db-sync-rwxg2\" (UID: \"94b5a9ae-4d31-4397-bec3-313a3012f2b8\") " pod="openstack/glance-db-sync-rwxg2" Nov 23 16:12:02 crc kubenswrapper[5050]: I1123 16:12:02.088565 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94b5a9ae-4d31-4397-bec3-313a3012f2b8-config-data\") pod \"glance-db-sync-rwxg2\" (UID: \"94b5a9ae-4d31-4397-bec3-313a3012f2b8\") " pod="openstack/glance-db-sync-rwxg2" Nov 23 16:12:02 crc kubenswrapper[5050]: I1123 16:12:02.088628 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94b5a9ae-4d31-4397-bec3-313a3012f2b8-combined-ca-bundle\") pod \"glance-db-sync-rwxg2\" (UID: \"94b5a9ae-4d31-4397-bec3-313a3012f2b8\") " pod="openstack/glance-db-sync-rwxg2" Nov 23 16:12:02 crc kubenswrapper[5050]: I1123 16:12:02.095948 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/94b5a9ae-4d31-4397-bec3-313a3012f2b8-db-sync-config-data\") pod \"glance-db-sync-rwxg2\" (UID: \"94b5a9ae-4d31-4397-bec3-313a3012f2b8\") " pod="openstack/glance-db-sync-rwxg2" Nov 23 16:12:02 crc kubenswrapper[5050]: I1123 16:12:02.096637 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94b5a9ae-4d31-4397-bec3-313a3012f2b8-combined-ca-bundle\") pod \"glance-db-sync-rwxg2\" (UID: \"94b5a9ae-4d31-4397-bec3-313a3012f2b8\") " pod="openstack/glance-db-sync-rwxg2" Nov 23 16:12:02 crc kubenswrapper[5050]: I1123 16:12:02.096959 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94b5a9ae-4d31-4397-bec3-313a3012f2b8-config-data\") pod \"glance-db-sync-rwxg2\" (UID: \"94b5a9ae-4d31-4397-bec3-313a3012f2b8\") " pod="openstack/glance-db-sync-rwxg2" Nov 23 16:12:02 crc kubenswrapper[5050]: I1123 16:12:02.112839 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p2q8q\" (UniqueName: \"kubernetes.io/projected/94b5a9ae-4d31-4397-bec3-313a3012f2b8-kube-api-access-p2q8q\") pod \"glance-db-sync-rwxg2\" (UID: \"94b5a9ae-4d31-4397-bec3-313a3012f2b8\") " pod="openstack/glance-db-sync-rwxg2" Nov 23 16:12:02 crc kubenswrapper[5050]: I1123 16:12:02.164160 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-rwxg2" Nov 23 16:12:02 crc kubenswrapper[5050]: W1123 16:12:02.816157 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod94b5a9ae_4d31_4397_bec3_313a3012f2b8.slice/crio-03c1883267b664bdcbb7646c7d99ff27436e78cf533ff1970f444e57401449da WatchSource:0}: Error finding container 03c1883267b664bdcbb7646c7d99ff27436e78cf533ff1970f444e57401449da: Status 404 returned error can't find the container with id 03c1883267b664bdcbb7646c7d99ff27436e78cf533ff1970f444e57401449da Nov 23 16:12:02 crc kubenswrapper[5050]: I1123 16:12:02.817920 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-rwxg2"] Nov 23 16:12:03 crc kubenswrapper[5050]: I1123 16:12:03.657933 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-rwxg2" event={"ID":"94b5a9ae-4d31-4397-bec3-313a3012f2b8","Type":"ContainerStarted","Data":"6acb5eb612d76d8b9edb1f3a7e88d2fd0a36a42b5a47f8b9f2c6bd3f1cd24b4c"} Nov 23 16:12:03 crc kubenswrapper[5050]: I1123 16:12:03.658461 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-rwxg2" event={"ID":"94b5a9ae-4d31-4397-bec3-313a3012f2b8","Type":"ContainerStarted","Data":"03c1883267b664bdcbb7646c7d99ff27436e78cf533ff1970f444e57401449da"} Nov 23 16:12:03 crc kubenswrapper[5050]: I1123 16:12:03.676501 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-rwxg2" podStartSLOduration=2.676437087 podStartE2EDuration="2.676437087s" podCreationTimestamp="2025-11-23 16:12:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:12:03.675659845 +0000 UTC m=+5418.842656420" watchObservedRunningTime="2025-11-23 16:12:03.676437087 +0000 UTC m=+5418.843433582" Nov 23 16:12:07 crc kubenswrapper[5050]: I1123 16:12:07.707742 5050 generic.go:334] "Generic (PLEG): container finished" podID="94b5a9ae-4d31-4397-bec3-313a3012f2b8" containerID="6acb5eb612d76d8b9edb1f3a7e88d2fd0a36a42b5a47f8b9f2c6bd3f1cd24b4c" exitCode=0 Nov 23 16:12:07 crc kubenswrapper[5050]: I1123 16:12:07.707891 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-rwxg2" event={"ID":"94b5a9ae-4d31-4397-bec3-313a3012f2b8","Type":"ContainerDied","Data":"6acb5eb612d76d8b9edb1f3a7e88d2fd0a36a42b5a47f8b9f2c6bd3f1cd24b4c"} Nov 23 16:12:09 crc kubenswrapper[5050]: I1123 16:12:09.127754 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-rwxg2" Nov 23 16:12:09 crc kubenswrapper[5050]: I1123 16:12:09.169652 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p2q8q\" (UniqueName: \"kubernetes.io/projected/94b5a9ae-4d31-4397-bec3-313a3012f2b8-kube-api-access-p2q8q\") pod \"94b5a9ae-4d31-4397-bec3-313a3012f2b8\" (UID: \"94b5a9ae-4d31-4397-bec3-313a3012f2b8\") " Nov 23 16:12:09 crc kubenswrapper[5050]: I1123 16:12:09.169863 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94b5a9ae-4d31-4397-bec3-313a3012f2b8-config-data\") pod \"94b5a9ae-4d31-4397-bec3-313a3012f2b8\" (UID: \"94b5a9ae-4d31-4397-bec3-313a3012f2b8\") " Nov 23 16:12:09 crc kubenswrapper[5050]: I1123 16:12:09.169932 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/94b5a9ae-4d31-4397-bec3-313a3012f2b8-db-sync-config-data\") pod \"94b5a9ae-4d31-4397-bec3-313a3012f2b8\" (UID: \"94b5a9ae-4d31-4397-bec3-313a3012f2b8\") " Nov 23 16:12:09 crc kubenswrapper[5050]: I1123 16:12:09.170126 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94b5a9ae-4d31-4397-bec3-313a3012f2b8-combined-ca-bundle\") pod \"94b5a9ae-4d31-4397-bec3-313a3012f2b8\" (UID: \"94b5a9ae-4d31-4397-bec3-313a3012f2b8\") " Nov 23 16:12:09 crc kubenswrapper[5050]: I1123 16:12:09.176488 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94b5a9ae-4d31-4397-bec3-313a3012f2b8-kube-api-access-p2q8q" (OuterVolumeSpecName: "kube-api-access-p2q8q") pod "94b5a9ae-4d31-4397-bec3-313a3012f2b8" (UID: "94b5a9ae-4d31-4397-bec3-313a3012f2b8"). InnerVolumeSpecName "kube-api-access-p2q8q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:12:09 crc kubenswrapper[5050]: I1123 16:12:09.183662 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94b5a9ae-4d31-4397-bec3-313a3012f2b8-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "94b5a9ae-4d31-4397-bec3-313a3012f2b8" (UID: "94b5a9ae-4d31-4397-bec3-313a3012f2b8"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:12:09 crc kubenswrapper[5050]: I1123 16:12:09.195119 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94b5a9ae-4d31-4397-bec3-313a3012f2b8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "94b5a9ae-4d31-4397-bec3-313a3012f2b8" (UID: "94b5a9ae-4d31-4397-bec3-313a3012f2b8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:12:09 crc kubenswrapper[5050]: I1123 16:12:09.236591 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94b5a9ae-4d31-4397-bec3-313a3012f2b8-config-data" (OuterVolumeSpecName: "config-data") pod "94b5a9ae-4d31-4397-bec3-313a3012f2b8" (UID: "94b5a9ae-4d31-4397-bec3-313a3012f2b8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:12:09 crc kubenswrapper[5050]: I1123 16:12:09.272008 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94b5a9ae-4d31-4397-bec3-313a3012f2b8-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:09 crc kubenswrapper[5050]: I1123 16:12:09.272348 5050 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/94b5a9ae-4d31-4397-bec3-313a3012f2b8-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:09 crc kubenswrapper[5050]: I1123 16:12:09.272415 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94b5a9ae-4d31-4397-bec3-313a3012f2b8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:09 crc kubenswrapper[5050]: I1123 16:12:09.272496 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p2q8q\" (UniqueName: \"kubernetes.io/projected/94b5a9ae-4d31-4397-bec3-313a3012f2b8-kube-api-access-p2q8q\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:09 crc kubenswrapper[5050]: I1123 16:12:09.727505 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-rwxg2" event={"ID":"94b5a9ae-4d31-4397-bec3-313a3012f2b8","Type":"ContainerDied","Data":"03c1883267b664bdcbb7646c7d99ff27436e78cf533ff1970f444e57401449da"} Nov 23 16:12:09 crc kubenswrapper[5050]: I1123 16:12:09.727897 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="03c1883267b664bdcbb7646c7d99ff27436e78cf533ff1970f444e57401449da" Nov 23 16:12:09 crc kubenswrapper[5050]: I1123 16:12:09.727850 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-rwxg2" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.051472 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 16:12:10 crc kubenswrapper[5050]: E1123 16:12:10.051889 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94b5a9ae-4d31-4397-bec3-313a3012f2b8" containerName="glance-db-sync" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.051900 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="94b5a9ae-4d31-4397-bec3-313a3012f2b8" containerName="glance-db-sync" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.052085 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="94b5a9ae-4d31-4397-bec3-313a3012f2b8" containerName="glance-db-sync" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.053774 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.057040 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.057274 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.057476 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.057716 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-pxmqj" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.081302 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.093571 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-scripts\") pod \"glance-default-external-api-0\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.093624 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-ceph\") pod \"glance-default-external-api-0\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.093655 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.093684 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-config-data\") pod \"glance-default-external-api-0\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.093721 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-logs\") pod \"glance-default-external-api-0\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.093744 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tcz7v\" (UniqueName: \"kubernetes.io/projected/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-kube-api-access-tcz7v\") pod \"glance-default-external-api-0\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.093787 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.169000 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-85f66497c-m9hrl"] Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.170485 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85f66497c-m9hrl" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.186509 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85f66497c-m9hrl"] Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.198304 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8blw\" (UniqueName: \"kubernetes.io/projected/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-kube-api-access-x8blw\") pod \"dnsmasq-dns-85f66497c-m9hrl\" (UID: \"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd\") " pod="openstack/dnsmasq-dns-85f66497c-m9hrl" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.198357 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-ovsdbserver-sb\") pod \"dnsmasq-dns-85f66497c-m9hrl\" (UID: \"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd\") " pod="openstack/dnsmasq-dns-85f66497c-m9hrl" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.198581 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-scripts\") pod \"glance-default-external-api-0\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.199638 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-ceph\") pod \"glance-default-external-api-0\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.199696 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.199718 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-config-data\") pod \"glance-default-external-api-0\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.199825 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-logs\") pod \"glance-default-external-api-0\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.199862 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tcz7v\" (UniqueName: \"kubernetes.io/projected/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-kube-api-access-tcz7v\") pod \"glance-default-external-api-0\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.199960 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-ovsdbserver-nb\") pod \"dnsmasq-dns-85f66497c-m9hrl\" (UID: \"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd\") " pod="openstack/dnsmasq-dns-85f66497c-m9hrl" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.199995 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-dns-svc\") pod \"dnsmasq-dns-85f66497c-m9hrl\" (UID: \"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd\") " pod="openstack/dnsmasq-dns-85f66497c-m9hrl" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.200029 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-config\") pod \"dnsmasq-dns-85f66497c-m9hrl\" (UID: \"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd\") " pod="openstack/dnsmasq-dns-85f66497c-m9hrl" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.200072 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.202016 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.202167 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-logs\") pod \"glance-default-external-api-0\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.213505 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-scripts\") pod \"glance-default-external-api-0\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.213678 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-ceph\") pod \"glance-default-external-api-0\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.213696 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-config-data\") pod \"glance-default-external-api-0\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.214186 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.237491 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tcz7v\" (UniqueName: \"kubernetes.io/projected/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-kube-api-access-tcz7v\") pod \"glance-default-external-api-0\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.302002 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-ovsdbserver-nb\") pod \"dnsmasq-dns-85f66497c-m9hrl\" (UID: \"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd\") " pod="openstack/dnsmasq-dns-85f66497c-m9hrl" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.302073 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-dns-svc\") pod \"dnsmasq-dns-85f66497c-m9hrl\" (UID: \"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd\") " pod="openstack/dnsmasq-dns-85f66497c-m9hrl" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.302108 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-config\") pod \"dnsmasq-dns-85f66497c-m9hrl\" (UID: \"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd\") " pod="openstack/dnsmasq-dns-85f66497c-m9hrl" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.302174 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8blw\" (UniqueName: \"kubernetes.io/projected/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-kube-api-access-x8blw\") pod \"dnsmasq-dns-85f66497c-m9hrl\" (UID: \"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd\") " pod="openstack/dnsmasq-dns-85f66497c-m9hrl" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.302214 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-ovsdbserver-sb\") pod \"dnsmasq-dns-85f66497c-m9hrl\" (UID: \"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd\") " pod="openstack/dnsmasq-dns-85f66497c-m9hrl" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.303231 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-config\") pod \"dnsmasq-dns-85f66497c-m9hrl\" (UID: \"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd\") " pod="openstack/dnsmasq-dns-85f66497c-m9hrl" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.303249 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-ovsdbserver-nb\") pod \"dnsmasq-dns-85f66497c-m9hrl\" (UID: \"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd\") " pod="openstack/dnsmasq-dns-85f66497c-m9hrl" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.303473 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-dns-svc\") pod \"dnsmasq-dns-85f66497c-m9hrl\" (UID: \"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd\") " pod="openstack/dnsmasq-dns-85f66497c-m9hrl" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.303539 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-ovsdbserver-sb\") pod \"dnsmasq-dns-85f66497c-m9hrl\" (UID: \"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd\") " pod="openstack/dnsmasq-dns-85f66497c-m9hrl" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.322211 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8blw\" (UniqueName: \"kubernetes.io/projected/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-kube-api-access-x8blw\") pod \"dnsmasq-dns-85f66497c-m9hrl\" (UID: \"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd\") " pod="openstack/dnsmasq-dns-85f66497c-m9hrl" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.371069 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.374239 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.376571 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.385406 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.386336 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.403320 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b42051f4-57e8-41b8-8717-890ed310eaba-config-data\") pod \"glance-default-internal-api-0\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.403379 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5fpl7\" (UniqueName: \"kubernetes.io/projected/b42051f4-57e8-41b8-8717-890ed310eaba-kube-api-access-5fpl7\") pod \"glance-default-internal-api-0\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.403408 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b42051f4-57e8-41b8-8717-890ed310eaba-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.403432 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b42051f4-57e8-41b8-8717-890ed310eaba-scripts\") pod \"glance-default-internal-api-0\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.403477 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b42051f4-57e8-41b8-8717-890ed310eaba-logs\") pod \"glance-default-internal-api-0\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.403510 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/b42051f4-57e8-41b8-8717-890ed310eaba-ceph\") pod \"glance-default-internal-api-0\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.403546 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b42051f4-57e8-41b8-8717-890ed310eaba-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.506945 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b42051f4-57e8-41b8-8717-890ed310eaba-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.507117 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b42051f4-57e8-41b8-8717-890ed310eaba-config-data\") pod \"glance-default-internal-api-0\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.507171 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5fpl7\" (UniqueName: \"kubernetes.io/projected/b42051f4-57e8-41b8-8717-890ed310eaba-kube-api-access-5fpl7\") pod \"glance-default-internal-api-0\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.507207 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b42051f4-57e8-41b8-8717-890ed310eaba-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.507239 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b42051f4-57e8-41b8-8717-890ed310eaba-scripts\") pod \"glance-default-internal-api-0\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.507284 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b42051f4-57e8-41b8-8717-890ed310eaba-logs\") pod \"glance-default-internal-api-0\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.507337 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/b42051f4-57e8-41b8-8717-890ed310eaba-ceph\") pod \"glance-default-internal-api-0\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.508433 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b42051f4-57e8-41b8-8717-890ed310eaba-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.508670 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b42051f4-57e8-41b8-8717-890ed310eaba-logs\") pod \"glance-default-internal-api-0\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.514420 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b42051f4-57e8-41b8-8717-890ed310eaba-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.514794 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b42051f4-57e8-41b8-8717-890ed310eaba-config-data\") pod \"glance-default-internal-api-0\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.515265 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b42051f4-57e8-41b8-8717-890ed310eaba-scripts\") pod \"glance-default-internal-api-0\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.518632 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/b42051f4-57e8-41b8-8717-890ed310eaba-ceph\") pod \"glance-default-internal-api-0\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.536168 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5fpl7\" (UniqueName: \"kubernetes.io/projected/b42051f4-57e8-41b8-8717-890ed310eaba-kube-api-access-5fpl7\") pod \"glance-default-internal-api-0\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.582345 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85f66497c-m9hrl" Nov 23 16:12:10 crc kubenswrapper[5050]: I1123 16:12:10.699346 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 23 16:12:11 crc kubenswrapper[5050]: I1123 16:12:11.013255 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 16:12:11 crc kubenswrapper[5050]: I1123 16:12:11.061302 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 16:12:11 crc kubenswrapper[5050]: I1123 16:12:11.070592 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85f66497c-m9hrl"] Nov 23 16:12:11 crc kubenswrapper[5050]: I1123 16:12:11.319055 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 16:12:11 crc kubenswrapper[5050]: W1123 16:12:11.325727 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb42051f4_57e8_41b8_8717_890ed310eaba.slice/crio-aaa950a1e0a6c04f4ff31e6016d500ca2bd8bd524ec8c52b743ea11b6c9238a1 WatchSource:0}: Error finding container aaa950a1e0a6c04f4ff31e6016d500ca2bd8bd524ec8c52b743ea11b6c9238a1: Status 404 returned error can't find the container with id aaa950a1e0a6c04f4ff31e6016d500ca2bd8bd524ec8c52b743ea11b6c9238a1 Nov 23 16:12:11 crc kubenswrapper[5050]: I1123 16:12:11.822732 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cfc26537-aa18-4da2-bcaf-baedff2b7ba5","Type":"ContainerStarted","Data":"6979d2cb3bf672e54677d24d239e788cfd2cf31bc31c48a53056676f3ba6c0b2"} Nov 23 16:12:11 crc kubenswrapper[5050]: I1123 16:12:11.823214 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cfc26537-aa18-4da2-bcaf-baedff2b7ba5","Type":"ContainerStarted","Data":"c125c619b32669921babab4c5860cd23e0265a3135dcf787172c04a2022ec119"} Nov 23 16:12:11 crc kubenswrapper[5050]: I1123 16:12:11.844940 5050 generic.go:334] "Generic (PLEG): container finished" podID="e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd" containerID="8b6511c59b9ded13dc8f8973d2988a403cf7161450962651e53b801a286d9fce" exitCode=0 Nov 23 16:12:11 crc kubenswrapper[5050]: I1123 16:12:11.846921 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85f66497c-m9hrl" event={"ID":"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd","Type":"ContainerDied","Data":"8b6511c59b9ded13dc8f8973d2988a403cf7161450962651e53b801a286d9fce"} Nov 23 16:12:11 crc kubenswrapper[5050]: I1123 16:12:11.846959 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85f66497c-m9hrl" event={"ID":"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd","Type":"ContainerStarted","Data":"5187919526be9913641157db0a3b0c9573a5d82e1084833fd8147993e0d15008"} Nov 23 16:12:11 crc kubenswrapper[5050]: I1123 16:12:11.861340 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b42051f4-57e8-41b8-8717-890ed310eaba","Type":"ContainerStarted","Data":"aaa950a1e0a6c04f4ff31e6016d500ca2bd8bd524ec8c52b743ea11b6c9238a1"} Nov 23 16:12:12 crc kubenswrapper[5050]: I1123 16:12:12.875852 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cfc26537-aa18-4da2-bcaf-baedff2b7ba5","Type":"ContainerStarted","Data":"4795be7c7338bdbcac3b7c1d6df99c2ef5adb7bf7bfc9f62caa228879789e237"} Nov 23 16:12:12 crc kubenswrapper[5050]: I1123 16:12:12.876030 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="cfc26537-aa18-4da2-bcaf-baedff2b7ba5" containerName="glance-log" containerID="cri-o://6979d2cb3bf672e54677d24d239e788cfd2cf31bc31c48a53056676f3ba6c0b2" gracePeriod=30 Nov 23 16:12:12 crc kubenswrapper[5050]: I1123 16:12:12.876063 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="cfc26537-aa18-4da2-bcaf-baedff2b7ba5" containerName="glance-httpd" containerID="cri-o://4795be7c7338bdbcac3b7c1d6df99c2ef5adb7bf7bfc9f62caa228879789e237" gracePeriod=30 Nov 23 16:12:12 crc kubenswrapper[5050]: I1123 16:12:12.882515 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85f66497c-m9hrl" event={"ID":"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd","Type":"ContainerStarted","Data":"b698ee0e904d845afe64535f9fef08651dd2e75e3cb4da768ffa89d1f59fe66c"} Nov 23 16:12:12 crc kubenswrapper[5050]: I1123 16:12:12.882690 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-85f66497c-m9hrl" Nov 23 16:12:12 crc kubenswrapper[5050]: I1123 16:12:12.888780 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b42051f4-57e8-41b8-8717-890ed310eaba","Type":"ContainerStarted","Data":"a10163e4cbdcfe451f66990bb963e35381994b37b34fa1b3392283ed116fe333"} Nov 23 16:12:12 crc kubenswrapper[5050]: I1123 16:12:12.888836 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b42051f4-57e8-41b8-8717-890ed310eaba","Type":"ContainerStarted","Data":"7b2a96ddf06e01e08ea3c43c99bc68a1d7d424717df7f2a9be57f01b5571eecc"} Nov 23 16:12:12 crc kubenswrapper[5050]: I1123 16:12:12.911181 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=2.911156303 podStartE2EDuration="2.911156303s" podCreationTimestamp="2025-11-23 16:12:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:12:12.902295983 +0000 UTC m=+5428.069292478" watchObservedRunningTime="2025-11-23 16:12:12.911156303 +0000 UTC m=+5428.078152798" Nov 23 16:12:12 crc kubenswrapper[5050]: I1123 16:12:12.930153 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-85f66497c-m9hrl" podStartSLOduration=2.930133399 podStartE2EDuration="2.930133399s" podCreationTimestamp="2025-11-23 16:12:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:12:12.924177911 +0000 UTC m=+5428.091174406" watchObservedRunningTime="2025-11-23 16:12:12.930133399 +0000 UTC m=+5428.097129894" Nov 23 16:12:12 crc kubenswrapper[5050]: I1123 16:12:12.954174 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=2.954145767 podStartE2EDuration="2.954145767s" podCreationTimestamp="2025-11-23 16:12:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:12:12.944805903 +0000 UTC m=+5428.111802388" watchObservedRunningTime="2025-11-23 16:12:12.954145767 +0000 UTC m=+5428.121142252" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.397753 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.575724 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.688748 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-httpd-run\") pod \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.688845 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-config-data\") pod \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.688888 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tcz7v\" (UniqueName: \"kubernetes.io/projected/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-kube-api-access-tcz7v\") pod \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.688938 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-ceph\") pod \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.690035 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-combined-ca-bundle\") pod \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.690114 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-logs\") pod \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.690231 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-scripts\") pod \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\" (UID: \"cfc26537-aa18-4da2-bcaf-baedff2b7ba5\") " Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.689305 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "cfc26537-aa18-4da2-bcaf-baedff2b7ba5" (UID: "cfc26537-aa18-4da2-bcaf-baedff2b7ba5"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.691830 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-logs" (OuterVolumeSpecName: "logs") pod "cfc26537-aa18-4da2-bcaf-baedff2b7ba5" (UID: "cfc26537-aa18-4da2-bcaf-baedff2b7ba5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.697795 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-scripts" (OuterVolumeSpecName: "scripts") pod "cfc26537-aa18-4da2-bcaf-baedff2b7ba5" (UID: "cfc26537-aa18-4da2-bcaf-baedff2b7ba5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.697854 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-ceph" (OuterVolumeSpecName: "ceph") pod "cfc26537-aa18-4da2-bcaf-baedff2b7ba5" (UID: "cfc26537-aa18-4da2-bcaf-baedff2b7ba5"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.704992 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-kube-api-access-tcz7v" (OuterVolumeSpecName: "kube-api-access-tcz7v") pod "cfc26537-aa18-4da2-bcaf-baedff2b7ba5" (UID: "cfc26537-aa18-4da2-bcaf-baedff2b7ba5"). InnerVolumeSpecName "kube-api-access-tcz7v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.717014 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cfc26537-aa18-4da2-bcaf-baedff2b7ba5" (UID: "cfc26537-aa18-4da2-bcaf-baedff2b7ba5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.761171 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-config-data" (OuterVolumeSpecName: "config-data") pod "cfc26537-aa18-4da2-bcaf-baedff2b7ba5" (UID: "cfc26537-aa18-4da2-bcaf-baedff2b7ba5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.793231 5050 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.793270 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.793281 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tcz7v\" (UniqueName: \"kubernetes.io/projected/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-kube-api-access-tcz7v\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.793294 5050 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-ceph\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.793307 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.793317 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-logs\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.793326 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cfc26537-aa18-4da2-bcaf-baedff2b7ba5-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.899769 5050 generic.go:334] "Generic (PLEG): container finished" podID="cfc26537-aa18-4da2-bcaf-baedff2b7ba5" containerID="4795be7c7338bdbcac3b7c1d6df99c2ef5adb7bf7bfc9f62caa228879789e237" exitCode=0 Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.899810 5050 generic.go:334] "Generic (PLEG): container finished" podID="cfc26537-aa18-4da2-bcaf-baedff2b7ba5" containerID="6979d2cb3bf672e54677d24d239e788cfd2cf31bc31c48a53056676f3ba6c0b2" exitCode=143 Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.899879 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cfc26537-aa18-4da2-bcaf-baedff2b7ba5","Type":"ContainerDied","Data":"4795be7c7338bdbcac3b7c1d6df99c2ef5adb7bf7bfc9f62caa228879789e237"} Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.899958 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cfc26537-aa18-4da2-bcaf-baedff2b7ba5","Type":"ContainerDied","Data":"6979d2cb3bf672e54677d24d239e788cfd2cf31bc31c48a53056676f3ba6c0b2"} Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.899984 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cfc26537-aa18-4da2-bcaf-baedff2b7ba5","Type":"ContainerDied","Data":"c125c619b32669921babab4c5860cd23e0265a3135dcf787172c04a2022ec119"} Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.900013 5050 scope.go:117] "RemoveContainer" containerID="4795be7c7338bdbcac3b7c1d6df99c2ef5adb7bf7bfc9f62caa228879789e237" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.900015 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.927880 5050 scope.go:117] "RemoveContainer" containerID="6979d2cb3bf672e54677d24d239e788cfd2cf31bc31c48a53056676f3ba6c0b2" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.952176 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.953549 5050 scope.go:117] "RemoveContainer" containerID="4795be7c7338bdbcac3b7c1d6df99c2ef5adb7bf7bfc9f62caa228879789e237" Nov 23 16:12:13 crc kubenswrapper[5050]: E1123 16:12:13.954114 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4795be7c7338bdbcac3b7c1d6df99c2ef5adb7bf7bfc9f62caa228879789e237\": container with ID starting with 4795be7c7338bdbcac3b7c1d6df99c2ef5adb7bf7bfc9f62caa228879789e237 not found: ID does not exist" containerID="4795be7c7338bdbcac3b7c1d6df99c2ef5adb7bf7bfc9f62caa228879789e237" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.954165 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4795be7c7338bdbcac3b7c1d6df99c2ef5adb7bf7bfc9f62caa228879789e237"} err="failed to get container status \"4795be7c7338bdbcac3b7c1d6df99c2ef5adb7bf7bfc9f62caa228879789e237\": rpc error: code = NotFound desc = could not find container \"4795be7c7338bdbcac3b7c1d6df99c2ef5adb7bf7bfc9f62caa228879789e237\": container with ID starting with 4795be7c7338bdbcac3b7c1d6df99c2ef5adb7bf7bfc9f62caa228879789e237 not found: ID does not exist" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.954197 5050 scope.go:117] "RemoveContainer" containerID="6979d2cb3bf672e54677d24d239e788cfd2cf31bc31c48a53056676f3ba6c0b2" Nov 23 16:12:13 crc kubenswrapper[5050]: E1123 16:12:13.954932 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6979d2cb3bf672e54677d24d239e788cfd2cf31bc31c48a53056676f3ba6c0b2\": container with ID starting with 6979d2cb3bf672e54677d24d239e788cfd2cf31bc31c48a53056676f3ba6c0b2 not found: ID does not exist" containerID="6979d2cb3bf672e54677d24d239e788cfd2cf31bc31c48a53056676f3ba6c0b2" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.954978 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6979d2cb3bf672e54677d24d239e788cfd2cf31bc31c48a53056676f3ba6c0b2"} err="failed to get container status \"6979d2cb3bf672e54677d24d239e788cfd2cf31bc31c48a53056676f3ba6c0b2\": rpc error: code = NotFound desc = could not find container \"6979d2cb3bf672e54677d24d239e788cfd2cf31bc31c48a53056676f3ba6c0b2\": container with ID starting with 6979d2cb3bf672e54677d24d239e788cfd2cf31bc31c48a53056676f3ba6c0b2 not found: ID does not exist" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.954998 5050 scope.go:117] "RemoveContainer" containerID="4795be7c7338bdbcac3b7c1d6df99c2ef5adb7bf7bfc9f62caa228879789e237" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.955681 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4795be7c7338bdbcac3b7c1d6df99c2ef5adb7bf7bfc9f62caa228879789e237"} err="failed to get container status \"4795be7c7338bdbcac3b7c1d6df99c2ef5adb7bf7bfc9f62caa228879789e237\": rpc error: code = NotFound desc = could not find container \"4795be7c7338bdbcac3b7c1d6df99c2ef5adb7bf7bfc9f62caa228879789e237\": container with ID starting with 4795be7c7338bdbcac3b7c1d6df99c2ef5adb7bf7bfc9f62caa228879789e237 not found: ID does not exist" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.955742 5050 scope.go:117] "RemoveContainer" containerID="6979d2cb3bf672e54677d24d239e788cfd2cf31bc31c48a53056676f3ba6c0b2" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.965754 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6979d2cb3bf672e54677d24d239e788cfd2cf31bc31c48a53056676f3ba6c0b2"} err="failed to get container status \"6979d2cb3bf672e54677d24d239e788cfd2cf31bc31c48a53056676f3ba6c0b2\": rpc error: code = NotFound desc = could not find container \"6979d2cb3bf672e54677d24d239e788cfd2cf31bc31c48a53056676f3ba6c0b2\": container with ID starting with 6979d2cb3bf672e54677d24d239e788cfd2cf31bc31c48a53056676f3ba6c0b2 not found: ID does not exist" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.971476 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.979847 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 16:12:13 crc kubenswrapper[5050]: E1123 16:12:13.980431 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfc26537-aa18-4da2-bcaf-baedff2b7ba5" containerName="glance-httpd" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.980549 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfc26537-aa18-4da2-bcaf-baedff2b7ba5" containerName="glance-httpd" Nov 23 16:12:13 crc kubenswrapper[5050]: E1123 16:12:13.980628 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfc26537-aa18-4da2-bcaf-baedff2b7ba5" containerName="glance-log" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.980682 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfc26537-aa18-4da2-bcaf-baedff2b7ba5" containerName="glance-log" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.980977 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="cfc26537-aa18-4da2-bcaf-baedff2b7ba5" containerName="glance-log" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.981059 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="cfc26537-aa18-4da2-bcaf-baedff2b7ba5" containerName="glance-httpd" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.982373 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.985230 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 23 16:12:13 crc kubenswrapper[5050]: I1123 16:12:13.989701 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 16:12:14 crc kubenswrapper[5050]: I1123 16:12:14.099875 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bffs\" (UniqueName: \"kubernetes.io/projected/54c7819b-2cfc-4c76-8777-ec8ca37e418e-kube-api-access-2bffs\") pod \"glance-default-external-api-0\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:14 crc kubenswrapper[5050]: I1123 16:12:14.099995 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54c7819b-2cfc-4c76-8777-ec8ca37e418e-scripts\") pod \"glance-default-external-api-0\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:14 crc kubenswrapper[5050]: I1123 16:12:14.100047 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54c7819b-2cfc-4c76-8777-ec8ca37e418e-config-data\") pod \"glance-default-external-api-0\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:14 crc kubenswrapper[5050]: I1123 16:12:14.100071 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/54c7819b-2cfc-4c76-8777-ec8ca37e418e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:14 crc kubenswrapper[5050]: I1123 16:12:14.100110 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/54c7819b-2cfc-4c76-8777-ec8ca37e418e-ceph\") pod \"glance-default-external-api-0\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:14 crc kubenswrapper[5050]: I1123 16:12:14.100162 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54c7819b-2cfc-4c76-8777-ec8ca37e418e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:14 crc kubenswrapper[5050]: I1123 16:12:14.100202 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/54c7819b-2cfc-4c76-8777-ec8ca37e418e-logs\") pod \"glance-default-external-api-0\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:14 crc kubenswrapper[5050]: I1123 16:12:14.201847 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54c7819b-2cfc-4c76-8777-ec8ca37e418e-scripts\") pod \"glance-default-external-api-0\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:14 crc kubenswrapper[5050]: I1123 16:12:14.201955 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54c7819b-2cfc-4c76-8777-ec8ca37e418e-config-data\") pod \"glance-default-external-api-0\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:14 crc kubenswrapper[5050]: I1123 16:12:14.202008 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/54c7819b-2cfc-4c76-8777-ec8ca37e418e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:14 crc kubenswrapper[5050]: I1123 16:12:14.202075 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/54c7819b-2cfc-4c76-8777-ec8ca37e418e-ceph\") pod \"glance-default-external-api-0\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:14 crc kubenswrapper[5050]: I1123 16:12:14.202151 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54c7819b-2cfc-4c76-8777-ec8ca37e418e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:14 crc kubenswrapper[5050]: I1123 16:12:14.202205 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/54c7819b-2cfc-4c76-8777-ec8ca37e418e-logs\") pod \"glance-default-external-api-0\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:14 crc kubenswrapper[5050]: I1123 16:12:14.202464 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bffs\" (UniqueName: \"kubernetes.io/projected/54c7819b-2cfc-4c76-8777-ec8ca37e418e-kube-api-access-2bffs\") pod \"glance-default-external-api-0\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:14 crc kubenswrapper[5050]: I1123 16:12:14.202840 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/54c7819b-2cfc-4c76-8777-ec8ca37e418e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:14 crc kubenswrapper[5050]: I1123 16:12:14.203683 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/54c7819b-2cfc-4c76-8777-ec8ca37e418e-logs\") pod \"glance-default-external-api-0\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:14 crc kubenswrapper[5050]: I1123 16:12:14.208722 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54c7819b-2cfc-4c76-8777-ec8ca37e418e-scripts\") pod \"glance-default-external-api-0\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:14 crc kubenswrapper[5050]: I1123 16:12:14.208872 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54c7819b-2cfc-4c76-8777-ec8ca37e418e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:14 crc kubenswrapper[5050]: I1123 16:12:14.210153 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54c7819b-2cfc-4c76-8777-ec8ca37e418e-config-data\") pod \"glance-default-external-api-0\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:14 crc kubenswrapper[5050]: I1123 16:12:14.211225 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/54c7819b-2cfc-4c76-8777-ec8ca37e418e-ceph\") pod \"glance-default-external-api-0\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:14 crc kubenswrapper[5050]: I1123 16:12:14.226811 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bffs\" (UniqueName: \"kubernetes.io/projected/54c7819b-2cfc-4c76-8777-ec8ca37e418e-kube-api-access-2bffs\") pod \"glance-default-external-api-0\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " pod="openstack/glance-default-external-api-0" Nov 23 16:12:14 crc kubenswrapper[5050]: I1123 16:12:14.308850 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 23 16:12:14 crc kubenswrapper[5050]: I1123 16:12:14.931523 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="b42051f4-57e8-41b8-8717-890ed310eaba" containerName="glance-log" containerID="cri-o://7b2a96ddf06e01e08ea3c43c99bc68a1d7d424717df7f2a9be57f01b5571eecc" gracePeriod=30 Nov 23 16:12:14 crc kubenswrapper[5050]: I1123 16:12:14.932159 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="b42051f4-57e8-41b8-8717-890ed310eaba" containerName="glance-httpd" containerID="cri-o://a10163e4cbdcfe451f66990bb963e35381994b37b34fa1b3392283ed116fe333" gracePeriod=30 Nov 23 16:12:15 crc kubenswrapper[5050]: I1123 16:12:15.055023 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 16:12:15 crc kubenswrapper[5050]: I1123 16:12:15.838395 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cfc26537-aa18-4da2-bcaf-baedff2b7ba5" path="/var/lib/kubelet/pods/cfc26537-aa18-4da2-bcaf-baedff2b7ba5/volumes" Nov 23 16:12:15 crc kubenswrapper[5050]: I1123 16:12:15.950876 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"54c7819b-2cfc-4c76-8777-ec8ca37e418e","Type":"ContainerStarted","Data":"0123ddf672b0c9cd788e8ae216a52c37b57380502b85282f4f6d904a093eade8"} Nov 23 16:12:15 crc kubenswrapper[5050]: I1123 16:12:15.950931 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"54c7819b-2cfc-4c76-8777-ec8ca37e418e","Type":"ContainerStarted","Data":"e32fb52b7342bd2da4178f8ee9f9b81bddff10c652ab4daea2f45296d2f026cc"} Nov 23 16:12:15 crc kubenswrapper[5050]: I1123 16:12:15.953119 5050 generic.go:334] "Generic (PLEG): container finished" podID="b42051f4-57e8-41b8-8717-890ed310eaba" containerID="a10163e4cbdcfe451f66990bb963e35381994b37b34fa1b3392283ed116fe333" exitCode=0 Nov 23 16:12:15 crc kubenswrapper[5050]: I1123 16:12:15.953157 5050 generic.go:334] "Generic (PLEG): container finished" podID="b42051f4-57e8-41b8-8717-890ed310eaba" containerID="7b2a96ddf06e01e08ea3c43c99bc68a1d7d424717df7f2a9be57f01b5571eecc" exitCode=143 Nov 23 16:12:15 crc kubenswrapper[5050]: I1123 16:12:15.953190 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b42051f4-57e8-41b8-8717-890ed310eaba","Type":"ContainerDied","Data":"a10163e4cbdcfe451f66990bb963e35381994b37b34fa1b3392283ed116fe333"} Nov 23 16:12:15 crc kubenswrapper[5050]: I1123 16:12:15.953227 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b42051f4-57e8-41b8-8717-890ed310eaba","Type":"ContainerDied","Data":"7b2a96ddf06e01e08ea3c43c99bc68a1d7d424717df7f2a9be57f01b5571eecc"} Nov 23 16:12:16 crc kubenswrapper[5050]: I1123 16:12:16.217883 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 23 16:12:16 crc kubenswrapper[5050]: I1123 16:12:16.355859 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b42051f4-57e8-41b8-8717-890ed310eaba-combined-ca-bundle\") pod \"b42051f4-57e8-41b8-8717-890ed310eaba\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " Nov 23 16:12:16 crc kubenswrapper[5050]: I1123 16:12:16.355944 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b42051f4-57e8-41b8-8717-890ed310eaba-scripts\") pod \"b42051f4-57e8-41b8-8717-890ed310eaba\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " Nov 23 16:12:16 crc kubenswrapper[5050]: I1123 16:12:16.356211 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5fpl7\" (UniqueName: \"kubernetes.io/projected/b42051f4-57e8-41b8-8717-890ed310eaba-kube-api-access-5fpl7\") pod \"b42051f4-57e8-41b8-8717-890ed310eaba\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " Nov 23 16:12:16 crc kubenswrapper[5050]: I1123 16:12:16.356272 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b42051f4-57e8-41b8-8717-890ed310eaba-config-data\") pod \"b42051f4-57e8-41b8-8717-890ed310eaba\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " Nov 23 16:12:16 crc kubenswrapper[5050]: I1123 16:12:16.356371 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b42051f4-57e8-41b8-8717-890ed310eaba-httpd-run\") pod \"b42051f4-57e8-41b8-8717-890ed310eaba\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " Nov 23 16:12:16 crc kubenswrapper[5050]: I1123 16:12:16.356546 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b42051f4-57e8-41b8-8717-890ed310eaba-logs\") pod \"b42051f4-57e8-41b8-8717-890ed310eaba\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " Nov 23 16:12:16 crc kubenswrapper[5050]: I1123 16:12:16.356601 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/b42051f4-57e8-41b8-8717-890ed310eaba-ceph\") pod \"b42051f4-57e8-41b8-8717-890ed310eaba\" (UID: \"b42051f4-57e8-41b8-8717-890ed310eaba\") " Nov 23 16:12:16 crc kubenswrapper[5050]: I1123 16:12:16.357042 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b42051f4-57e8-41b8-8717-890ed310eaba-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "b42051f4-57e8-41b8-8717-890ed310eaba" (UID: "b42051f4-57e8-41b8-8717-890ed310eaba"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:12:16 crc kubenswrapper[5050]: I1123 16:12:16.357171 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b42051f4-57e8-41b8-8717-890ed310eaba-logs" (OuterVolumeSpecName: "logs") pod "b42051f4-57e8-41b8-8717-890ed310eaba" (UID: "b42051f4-57e8-41b8-8717-890ed310eaba"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:12:16 crc kubenswrapper[5050]: I1123 16:12:16.357904 5050 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b42051f4-57e8-41b8-8717-890ed310eaba-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:16 crc kubenswrapper[5050]: I1123 16:12:16.357933 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b42051f4-57e8-41b8-8717-890ed310eaba-logs\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:16 crc kubenswrapper[5050]: I1123 16:12:16.362654 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b42051f4-57e8-41b8-8717-890ed310eaba-scripts" (OuterVolumeSpecName: "scripts") pod "b42051f4-57e8-41b8-8717-890ed310eaba" (UID: "b42051f4-57e8-41b8-8717-890ed310eaba"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:12:16 crc kubenswrapper[5050]: I1123 16:12:16.363558 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b42051f4-57e8-41b8-8717-890ed310eaba-ceph" (OuterVolumeSpecName: "ceph") pod "b42051f4-57e8-41b8-8717-890ed310eaba" (UID: "b42051f4-57e8-41b8-8717-890ed310eaba"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:12:16 crc kubenswrapper[5050]: I1123 16:12:16.363893 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b42051f4-57e8-41b8-8717-890ed310eaba-kube-api-access-5fpl7" (OuterVolumeSpecName: "kube-api-access-5fpl7") pod "b42051f4-57e8-41b8-8717-890ed310eaba" (UID: "b42051f4-57e8-41b8-8717-890ed310eaba"). InnerVolumeSpecName "kube-api-access-5fpl7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:12:16 crc kubenswrapper[5050]: I1123 16:12:16.393800 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b42051f4-57e8-41b8-8717-890ed310eaba-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b42051f4-57e8-41b8-8717-890ed310eaba" (UID: "b42051f4-57e8-41b8-8717-890ed310eaba"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:12:16 crc kubenswrapper[5050]: I1123 16:12:16.405290 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b42051f4-57e8-41b8-8717-890ed310eaba-config-data" (OuterVolumeSpecName: "config-data") pod "b42051f4-57e8-41b8-8717-890ed310eaba" (UID: "b42051f4-57e8-41b8-8717-890ed310eaba"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:12:16 crc kubenswrapper[5050]: I1123 16:12:16.460000 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5fpl7\" (UniqueName: \"kubernetes.io/projected/b42051f4-57e8-41b8-8717-890ed310eaba-kube-api-access-5fpl7\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:16 crc kubenswrapper[5050]: I1123 16:12:16.460063 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b42051f4-57e8-41b8-8717-890ed310eaba-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:16 crc kubenswrapper[5050]: I1123 16:12:16.460078 5050 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/b42051f4-57e8-41b8-8717-890ed310eaba-ceph\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:16 crc kubenswrapper[5050]: I1123 16:12:16.460095 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b42051f4-57e8-41b8-8717-890ed310eaba-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:16 crc kubenswrapper[5050]: I1123 16:12:16.460106 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b42051f4-57e8-41b8-8717-890ed310eaba-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:16 crc kubenswrapper[5050]: I1123 16:12:16.966656 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b42051f4-57e8-41b8-8717-890ed310eaba","Type":"ContainerDied","Data":"aaa950a1e0a6c04f4ff31e6016d500ca2bd8bd524ec8c52b743ea11b6c9238a1"} Nov 23 16:12:16 crc kubenswrapper[5050]: I1123 16:12:16.966707 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 23 16:12:16 crc kubenswrapper[5050]: I1123 16:12:16.967249 5050 scope.go:117] "RemoveContainer" containerID="a10163e4cbdcfe451f66990bb963e35381994b37b34fa1b3392283ed116fe333" Nov 23 16:12:16 crc kubenswrapper[5050]: I1123 16:12:16.973827 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"54c7819b-2cfc-4c76-8777-ec8ca37e418e","Type":"ContainerStarted","Data":"44bfe94e167510c54c0a0ff25886435bcb8b77e0e097f04e95214b3e4cbc1175"} Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.001440 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.001414732 podStartE2EDuration="4.001414732s" podCreationTimestamp="2025-11-23 16:12:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:12:16.995568607 +0000 UTC m=+5432.162565092" watchObservedRunningTime="2025-11-23 16:12:17.001414732 +0000 UTC m=+5432.168411217" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.033926 5050 scope.go:117] "RemoveContainer" containerID="7b2a96ddf06e01e08ea3c43c99bc68a1d7d424717df7f2a9be57f01b5571eecc" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.053263 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.066024 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.076803 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 16:12:17 crc kubenswrapper[5050]: E1123 16:12:17.077217 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b42051f4-57e8-41b8-8717-890ed310eaba" containerName="glance-httpd" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.077236 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="b42051f4-57e8-41b8-8717-890ed310eaba" containerName="glance-httpd" Nov 23 16:12:17 crc kubenswrapper[5050]: E1123 16:12:17.077255 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b42051f4-57e8-41b8-8717-890ed310eaba" containerName="glance-log" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.077263 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="b42051f4-57e8-41b8-8717-890ed310eaba" containerName="glance-log" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.077485 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="b42051f4-57e8-41b8-8717-890ed310eaba" containerName="glance-httpd" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.077516 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="b42051f4-57e8-41b8-8717-890ed310eaba" containerName="glance-log" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.078465 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.085082 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.093688 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.171614 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.171761 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wtzrr\" (UniqueName: \"kubernetes.io/projected/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-kube-api-access-wtzrr\") pod \"glance-default-internal-api-0\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.171809 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.171855 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-ceph\") pod \"glance-default-internal-api-0\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.171881 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.171985 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.172030 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-logs\") pod \"glance-default-internal-api-0\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.274289 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.274363 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-logs\") pod \"glance-default-internal-api-0\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.274401 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.274524 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wtzrr\" (UniqueName: \"kubernetes.io/projected/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-kube-api-access-wtzrr\") pod \"glance-default-internal-api-0\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.274572 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.274618 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-ceph\") pod \"glance-default-internal-api-0\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.274646 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.275969 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.276069 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-logs\") pod \"glance-default-internal-api-0\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.281340 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.281528 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.282947 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-ceph\") pod \"glance-default-internal-api-0\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.284387 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.306492 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wtzrr\" (UniqueName: \"kubernetes.io/projected/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-kube-api-access-wtzrr\") pod \"glance-default-internal-api-0\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.439604 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 23 16:12:17 crc kubenswrapper[5050]: I1123 16:12:17.565285 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b42051f4-57e8-41b8-8717-890ed310eaba" path="/var/lib/kubelet/pods/b42051f4-57e8-41b8-8717-890ed310eaba/volumes" Nov 23 16:12:18 crc kubenswrapper[5050]: I1123 16:12:18.227362 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 16:12:18 crc kubenswrapper[5050]: I1123 16:12:18.996180 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"dd6ddc23-bc16-45ee-adc2-4e354e707c2d","Type":"ContainerStarted","Data":"01a7d8a04e112b855020224eb1edacb1527bc653d06ca87278860e7afd6a3cac"} Nov 23 16:12:18 crc kubenswrapper[5050]: I1123 16:12:18.996727 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"dd6ddc23-bc16-45ee-adc2-4e354e707c2d","Type":"ContainerStarted","Data":"4f8f883108c7994e47117d265eafb1e516c08f05f30f3178f6bddc87826fabfe"} Nov 23 16:12:20 crc kubenswrapper[5050]: I1123 16:12:20.012494 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"dd6ddc23-bc16-45ee-adc2-4e354e707c2d","Type":"ContainerStarted","Data":"d224f6bcfdea9e3c42bcb919fd0586bca9651f6ed43080511d95e49dac735628"} Nov 23 16:12:20 crc kubenswrapper[5050]: I1123 16:12:20.065579 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.065542942 podStartE2EDuration="3.065542942s" podCreationTimestamp="2025-11-23 16:12:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:12:20.0463349 +0000 UTC m=+5435.213331395" watchObservedRunningTime="2025-11-23 16:12:20.065542942 +0000 UTC m=+5435.232539467" Nov 23 16:12:20 crc kubenswrapper[5050]: I1123 16:12:20.585890 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-85f66497c-m9hrl" Nov 23 16:12:20 crc kubenswrapper[5050]: I1123 16:12:20.748690 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79b55cc78f-gq4jd"] Nov 23 16:12:20 crc kubenswrapper[5050]: I1123 16:12:20.749610 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" podUID="f2532463-ea34-450a-9c35-337de82cdef5" containerName="dnsmasq-dns" containerID="cri-o://b81fea2723f6f0fb471e7c6206c7cc5b1cfd453a9eead97ade91ac94b79c878e" gracePeriod=10 Nov 23 16:12:21 crc kubenswrapper[5050]: I1123 16:12:21.030818 5050 generic.go:334] "Generic (PLEG): container finished" podID="f2532463-ea34-450a-9c35-337de82cdef5" containerID="b81fea2723f6f0fb471e7c6206c7cc5b1cfd453a9eead97ade91ac94b79c878e" exitCode=0 Nov 23 16:12:21 crc kubenswrapper[5050]: I1123 16:12:21.030894 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" event={"ID":"f2532463-ea34-450a-9c35-337de82cdef5","Type":"ContainerDied","Data":"b81fea2723f6f0fb471e7c6206c7cc5b1cfd453a9eead97ade91ac94b79c878e"} Nov 23 16:12:21 crc kubenswrapper[5050]: I1123 16:12:21.286347 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" Nov 23 16:12:21 crc kubenswrapper[5050]: I1123 16:12:21.467946 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f2532463-ea34-450a-9c35-337de82cdef5-dns-svc\") pod \"f2532463-ea34-450a-9c35-337de82cdef5\" (UID: \"f2532463-ea34-450a-9c35-337de82cdef5\") " Nov 23 16:12:21 crc kubenswrapper[5050]: I1123 16:12:21.468004 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bxrvr\" (UniqueName: \"kubernetes.io/projected/f2532463-ea34-450a-9c35-337de82cdef5-kube-api-access-bxrvr\") pod \"f2532463-ea34-450a-9c35-337de82cdef5\" (UID: \"f2532463-ea34-450a-9c35-337de82cdef5\") " Nov 23 16:12:21 crc kubenswrapper[5050]: I1123 16:12:21.468112 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2532463-ea34-450a-9c35-337de82cdef5-config\") pod \"f2532463-ea34-450a-9c35-337de82cdef5\" (UID: \"f2532463-ea34-450a-9c35-337de82cdef5\") " Nov 23 16:12:21 crc kubenswrapper[5050]: I1123 16:12:21.468164 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f2532463-ea34-450a-9c35-337de82cdef5-ovsdbserver-nb\") pod \"f2532463-ea34-450a-9c35-337de82cdef5\" (UID: \"f2532463-ea34-450a-9c35-337de82cdef5\") " Nov 23 16:12:21 crc kubenswrapper[5050]: I1123 16:12:21.468227 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f2532463-ea34-450a-9c35-337de82cdef5-ovsdbserver-sb\") pod \"f2532463-ea34-450a-9c35-337de82cdef5\" (UID: \"f2532463-ea34-450a-9c35-337de82cdef5\") " Nov 23 16:12:21 crc kubenswrapper[5050]: I1123 16:12:21.475805 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2532463-ea34-450a-9c35-337de82cdef5-kube-api-access-bxrvr" (OuterVolumeSpecName: "kube-api-access-bxrvr") pod "f2532463-ea34-450a-9c35-337de82cdef5" (UID: "f2532463-ea34-450a-9c35-337de82cdef5"). InnerVolumeSpecName "kube-api-access-bxrvr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:12:21 crc kubenswrapper[5050]: I1123 16:12:21.525603 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2532463-ea34-450a-9c35-337de82cdef5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f2532463-ea34-450a-9c35-337de82cdef5" (UID: "f2532463-ea34-450a-9c35-337de82cdef5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:12:21 crc kubenswrapper[5050]: I1123 16:12:21.532010 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2532463-ea34-450a-9c35-337de82cdef5-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f2532463-ea34-450a-9c35-337de82cdef5" (UID: "f2532463-ea34-450a-9c35-337de82cdef5"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:12:21 crc kubenswrapper[5050]: I1123 16:12:21.549637 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2532463-ea34-450a-9c35-337de82cdef5-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f2532463-ea34-450a-9c35-337de82cdef5" (UID: "f2532463-ea34-450a-9c35-337de82cdef5"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:12:21 crc kubenswrapper[5050]: I1123 16:12:21.553027 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2532463-ea34-450a-9c35-337de82cdef5-config" (OuterVolumeSpecName: "config") pod "f2532463-ea34-450a-9c35-337de82cdef5" (UID: "f2532463-ea34-450a-9c35-337de82cdef5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:12:21 crc kubenswrapper[5050]: I1123 16:12:21.571516 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f2532463-ea34-450a-9c35-337de82cdef5-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:21 crc kubenswrapper[5050]: I1123 16:12:21.571595 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bxrvr\" (UniqueName: \"kubernetes.io/projected/f2532463-ea34-450a-9c35-337de82cdef5-kube-api-access-bxrvr\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:21 crc kubenswrapper[5050]: I1123 16:12:21.571629 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2532463-ea34-450a-9c35-337de82cdef5-config\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:21 crc kubenswrapper[5050]: I1123 16:12:21.571659 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f2532463-ea34-450a-9c35-337de82cdef5-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:21 crc kubenswrapper[5050]: I1123 16:12:21.571684 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f2532463-ea34-450a-9c35-337de82cdef5-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:22 crc kubenswrapper[5050]: I1123 16:12:22.050469 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" event={"ID":"f2532463-ea34-450a-9c35-337de82cdef5","Type":"ContainerDied","Data":"f7a6f7085558c6b05f0324c4751905ebcb9967b3ac10d8189093bd9d3beaa98d"} Nov 23 16:12:22 crc kubenswrapper[5050]: I1123 16:12:22.050564 5050 scope.go:117] "RemoveContainer" containerID="b81fea2723f6f0fb471e7c6206c7cc5b1cfd453a9eead97ade91ac94b79c878e" Nov 23 16:12:22 crc kubenswrapper[5050]: I1123 16:12:22.050617 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b55cc78f-gq4jd" Nov 23 16:12:22 crc kubenswrapper[5050]: I1123 16:12:22.090245 5050 scope.go:117] "RemoveContainer" containerID="380dc681924a7646a4d712b56c74e3d9b24f950ff21ab0650121b8d90e83673a" Nov 23 16:12:22 crc kubenswrapper[5050]: I1123 16:12:22.091808 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79b55cc78f-gq4jd"] Nov 23 16:12:22 crc kubenswrapper[5050]: I1123 16:12:22.101833 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-79b55cc78f-gq4jd"] Nov 23 16:12:23 crc kubenswrapper[5050]: I1123 16:12:23.571257 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2532463-ea34-450a-9c35-337de82cdef5" path="/var/lib/kubelet/pods/f2532463-ea34-450a-9c35-337de82cdef5/volumes" Nov 23 16:12:24 crc kubenswrapper[5050]: I1123 16:12:24.309709 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 23 16:12:24 crc kubenswrapper[5050]: I1123 16:12:24.309804 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 23 16:12:24 crc kubenswrapper[5050]: I1123 16:12:24.375958 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 23 16:12:24 crc kubenswrapper[5050]: I1123 16:12:24.383039 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 23 16:12:25 crc kubenswrapper[5050]: I1123 16:12:25.097083 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 23 16:12:25 crc kubenswrapper[5050]: I1123 16:12:25.097150 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 23 16:12:27 crc kubenswrapper[5050]: I1123 16:12:27.099089 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 23 16:12:27 crc kubenswrapper[5050]: I1123 16:12:27.126513 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 23 16:12:27 crc kubenswrapper[5050]: I1123 16:12:27.440328 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 23 16:12:27 crc kubenswrapper[5050]: I1123 16:12:27.440883 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 23 16:12:27 crc kubenswrapper[5050]: I1123 16:12:27.483493 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 23 16:12:27 crc kubenswrapper[5050]: I1123 16:12:27.490247 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 23 16:12:28 crc kubenswrapper[5050]: I1123 16:12:28.163768 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 23 16:12:28 crc kubenswrapper[5050]: I1123 16:12:28.164189 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 23 16:12:30 crc kubenswrapper[5050]: I1123 16:12:30.164400 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 23 16:12:30 crc kubenswrapper[5050]: I1123 16:12:30.181920 5050 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 23 16:12:30 crc kubenswrapper[5050]: I1123 16:12:30.268582 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 23 16:12:39 crc kubenswrapper[5050]: I1123 16:12:39.092818 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-28bd-account-create-jrbzz"] Nov 23 16:12:39 crc kubenswrapper[5050]: E1123 16:12:39.097936 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2532463-ea34-450a-9c35-337de82cdef5" containerName="init" Nov 23 16:12:39 crc kubenswrapper[5050]: I1123 16:12:39.097963 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2532463-ea34-450a-9c35-337de82cdef5" containerName="init" Nov 23 16:12:39 crc kubenswrapper[5050]: E1123 16:12:39.098012 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2532463-ea34-450a-9c35-337de82cdef5" containerName="dnsmasq-dns" Nov 23 16:12:39 crc kubenswrapper[5050]: I1123 16:12:39.098023 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2532463-ea34-450a-9c35-337de82cdef5" containerName="dnsmasq-dns" Nov 23 16:12:39 crc kubenswrapper[5050]: I1123 16:12:39.098384 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2532463-ea34-450a-9c35-337de82cdef5" containerName="dnsmasq-dns" Nov 23 16:12:39 crc kubenswrapper[5050]: I1123 16:12:39.099316 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-28bd-account-create-jrbzz" Nov 23 16:12:39 crc kubenswrapper[5050]: I1123 16:12:39.101892 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 23 16:12:39 crc kubenswrapper[5050]: I1123 16:12:39.103321 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-4rkt4"] Nov 23 16:12:39 crc kubenswrapper[5050]: I1123 16:12:39.105200 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-4rkt4" Nov 23 16:12:39 crc kubenswrapper[5050]: I1123 16:12:39.113503 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-4rkt4"] Nov 23 16:12:39 crc kubenswrapper[5050]: I1123 16:12:39.122887 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-28bd-account-create-jrbzz"] Nov 23 16:12:39 crc kubenswrapper[5050]: I1123 16:12:39.140305 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9qwjp\" (UniqueName: \"kubernetes.io/projected/6bfb09d3-730b-4a95-ac71-574889caabe0-kube-api-access-9qwjp\") pod \"placement-db-create-4rkt4\" (UID: \"6bfb09d3-730b-4a95-ac71-574889caabe0\") " pod="openstack/placement-db-create-4rkt4" Nov 23 16:12:39 crc kubenswrapper[5050]: I1123 16:12:39.140515 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6bfb09d3-730b-4a95-ac71-574889caabe0-operator-scripts\") pod \"placement-db-create-4rkt4\" (UID: \"6bfb09d3-730b-4a95-ac71-574889caabe0\") " pod="openstack/placement-db-create-4rkt4" Nov 23 16:12:39 crc kubenswrapper[5050]: I1123 16:12:39.140603 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d1d2c6c7-ea81-4348-a921-8ab8bf07bd55-operator-scripts\") pod \"placement-28bd-account-create-jrbzz\" (UID: \"d1d2c6c7-ea81-4348-a921-8ab8bf07bd55\") " pod="openstack/placement-28bd-account-create-jrbzz" Nov 23 16:12:39 crc kubenswrapper[5050]: I1123 16:12:39.140658 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nzkv4\" (UniqueName: \"kubernetes.io/projected/d1d2c6c7-ea81-4348-a921-8ab8bf07bd55-kube-api-access-nzkv4\") pod \"placement-28bd-account-create-jrbzz\" (UID: \"d1d2c6c7-ea81-4348-a921-8ab8bf07bd55\") " pod="openstack/placement-28bd-account-create-jrbzz" Nov 23 16:12:39 crc kubenswrapper[5050]: I1123 16:12:39.242815 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9qwjp\" (UniqueName: \"kubernetes.io/projected/6bfb09d3-730b-4a95-ac71-574889caabe0-kube-api-access-9qwjp\") pod \"placement-db-create-4rkt4\" (UID: \"6bfb09d3-730b-4a95-ac71-574889caabe0\") " pod="openstack/placement-db-create-4rkt4" Nov 23 16:12:39 crc kubenswrapper[5050]: I1123 16:12:39.242995 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6bfb09d3-730b-4a95-ac71-574889caabe0-operator-scripts\") pod \"placement-db-create-4rkt4\" (UID: \"6bfb09d3-730b-4a95-ac71-574889caabe0\") " pod="openstack/placement-db-create-4rkt4" Nov 23 16:12:39 crc kubenswrapper[5050]: I1123 16:12:39.243104 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d1d2c6c7-ea81-4348-a921-8ab8bf07bd55-operator-scripts\") pod \"placement-28bd-account-create-jrbzz\" (UID: \"d1d2c6c7-ea81-4348-a921-8ab8bf07bd55\") " pod="openstack/placement-28bd-account-create-jrbzz" Nov 23 16:12:39 crc kubenswrapper[5050]: I1123 16:12:39.243148 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nzkv4\" (UniqueName: \"kubernetes.io/projected/d1d2c6c7-ea81-4348-a921-8ab8bf07bd55-kube-api-access-nzkv4\") pod \"placement-28bd-account-create-jrbzz\" (UID: \"d1d2c6c7-ea81-4348-a921-8ab8bf07bd55\") " pod="openstack/placement-28bd-account-create-jrbzz" Nov 23 16:12:39 crc kubenswrapper[5050]: I1123 16:12:39.244718 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d1d2c6c7-ea81-4348-a921-8ab8bf07bd55-operator-scripts\") pod \"placement-28bd-account-create-jrbzz\" (UID: \"d1d2c6c7-ea81-4348-a921-8ab8bf07bd55\") " pod="openstack/placement-28bd-account-create-jrbzz" Nov 23 16:12:39 crc kubenswrapper[5050]: I1123 16:12:39.244880 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6bfb09d3-730b-4a95-ac71-574889caabe0-operator-scripts\") pod \"placement-db-create-4rkt4\" (UID: \"6bfb09d3-730b-4a95-ac71-574889caabe0\") " pod="openstack/placement-db-create-4rkt4" Nov 23 16:12:39 crc kubenswrapper[5050]: I1123 16:12:39.267189 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9qwjp\" (UniqueName: \"kubernetes.io/projected/6bfb09d3-730b-4a95-ac71-574889caabe0-kube-api-access-9qwjp\") pod \"placement-db-create-4rkt4\" (UID: \"6bfb09d3-730b-4a95-ac71-574889caabe0\") " pod="openstack/placement-db-create-4rkt4" Nov 23 16:12:39 crc kubenswrapper[5050]: I1123 16:12:39.271111 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nzkv4\" (UniqueName: \"kubernetes.io/projected/d1d2c6c7-ea81-4348-a921-8ab8bf07bd55-kube-api-access-nzkv4\") pod \"placement-28bd-account-create-jrbzz\" (UID: \"d1d2c6c7-ea81-4348-a921-8ab8bf07bd55\") " pod="openstack/placement-28bd-account-create-jrbzz" Nov 23 16:12:39 crc kubenswrapper[5050]: I1123 16:12:39.431432 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-28bd-account-create-jrbzz" Nov 23 16:12:39 crc kubenswrapper[5050]: I1123 16:12:39.439718 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-4rkt4" Nov 23 16:12:39 crc kubenswrapper[5050]: I1123 16:12:39.948371 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-28bd-account-create-jrbzz"] Nov 23 16:12:39 crc kubenswrapper[5050]: W1123 16:12:39.951786 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd1d2c6c7_ea81_4348_a921_8ab8bf07bd55.slice/crio-f8760ac78186a5eb21a9956a36c9f2df5791047e940c0da817f80a758eb46002 WatchSource:0}: Error finding container f8760ac78186a5eb21a9956a36c9f2df5791047e940c0da817f80a758eb46002: Status 404 returned error can't find the container with id f8760ac78186a5eb21a9956a36c9f2df5791047e940c0da817f80a758eb46002 Nov 23 16:12:40 crc kubenswrapper[5050]: I1123 16:12:40.063568 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-4rkt4"] Nov 23 16:12:40 crc kubenswrapper[5050]: W1123 16:12:40.076641 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6bfb09d3_730b_4a95_ac71_574889caabe0.slice/crio-7b90e66935d38aa08aae9d744e28b862786120f956bf993fe14823d532259e33 WatchSource:0}: Error finding container 7b90e66935d38aa08aae9d744e28b862786120f956bf993fe14823d532259e33: Status 404 returned error can't find the container with id 7b90e66935d38aa08aae9d744e28b862786120f956bf993fe14823d532259e33 Nov 23 16:12:40 crc kubenswrapper[5050]: I1123 16:12:40.313372 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-4rkt4" event={"ID":"6bfb09d3-730b-4a95-ac71-574889caabe0","Type":"ContainerStarted","Data":"c8104aa956841f96d01e75b04873b116aeed3e3eed953c633101027759d9fa54"} Nov 23 16:12:40 crc kubenswrapper[5050]: I1123 16:12:40.313840 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-4rkt4" event={"ID":"6bfb09d3-730b-4a95-ac71-574889caabe0","Type":"ContainerStarted","Data":"7b90e66935d38aa08aae9d744e28b862786120f956bf993fe14823d532259e33"} Nov 23 16:12:40 crc kubenswrapper[5050]: I1123 16:12:40.316680 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-28bd-account-create-jrbzz" event={"ID":"d1d2c6c7-ea81-4348-a921-8ab8bf07bd55","Type":"ContainerStarted","Data":"7c8fc80449ca0c19e8661ad6179aa994e213e0322e6fd99d9e8f23105750256b"} Nov 23 16:12:40 crc kubenswrapper[5050]: I1123 16:12:40.316715 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-28bd-account-create-jrbzz" event={"ID":"d1d2c6c7-ea81-4348-a921-8ab8bf07bd55","Type":"ContainerStarted","Data":"f8760ac78186a5eb21a9956a36c9f2df5791047e940c0da817f80a758eb46002"} Nov 23 16:12:40 crc kubenswrapper[5050]: I1123 16:12:40.333911 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-create-4rkt4" podStartSLOduration=1.3338866569999999 podStartE2EDuration="1.333886657s" podCreationTimestamp="2025-11-23 16:12:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:12:40.328708801 +0000 UTC m=+5455.495705286" watchObservedRunningTime="2025-11-23 16:12:40.333886657 +0000 UTC m=+5455.500883142" Nov 23 16:12:40 crc kubenswrapper[5050]: I1123 16:12:40.359861 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-28bd-account-create-jrbzz" podStartSLOduration=1.359829339 podStartE2EDuration="1.359829339s" podCreationTimestamp="2025-11-23 16:12:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:12:40.350545707 +0000 UTC m=+5455.517542212" watchObservedRunningTime="2025-11-23 16:12:40.359829339 +0000 UTC m=+5455.526825824" Nov 23 16:12:41 crc kubenswrapper[5050]: I1123 16:12:41.329900 5050 generic.go:334] "Generic (PLEG): container finished" podID="6bfb09d3-730b-4a95-ac71-574889caabe0" containerID="c8104aa956841f96d01e75b04873b116aeed3e3eed953c633101027759d9fa54" exitCode=0 Nov 23 16:12:41 crc kubenswrapper[5050]: I1123 16:12:41.330023 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-4rkt4" event={"ID":"6bfb09d3-730b-4a95-ac71-574889caabe0","Type":"ContainerDied","Data":"c8104aa956841f96d01e75b04873b116aeed3e3eed953c633101027759d9fa54"} Nov 23 16:12:41 crc kubenswrapper[5050]: I1123 16:12:41.331409 5050 generic.go:334] "Generic (PLEG): container finished" podID="d1d2c6c7-ea81-4348-a921-8ab8bf07bd55" containerID="7c8fc80449ca0c19e8661ad6179aa994e213e0322e6fd99d9e8f23105750256b" exitCode=0 Nov 23 16:12:41 crc kubenswrapper[5050]: I1123 16:12:41.331489 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-28bd-account-create-jrbzz" event={"ID":"d1d2c6c7-ea81-4348-a921-8ab8bf07bd55","Type":"ContainerDied","Data":"7c8fc80449ca0c19e8661ad6179aa994e213e0322e6fd99d9e8f23105750256b"} Nov 23 16:12:42 crc kubenswrapper[5050]: I1123 16:12:42.884350 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-4rkt4" Nov 23 16:12:42 crc kubenswrapper[5050]: I1123 16:12:42.897827 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-28bd-account-create-jrbzz" Nov 23 16:12:43 crc kubenswrapper[5050]: I1123 16:12:43.041058 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6bfb09d3-730b-4a95-ac71-574889caabe0-operator-scripts\") pod \"6bfb09d3-730b-4a95-ac71-574889caabe0\" (UID: \"6bfb09d3-730b-4a95-ac71-574889caabe0\") " Nov 23 16:12:43 crc kubenswrapper[5050]: I1123 16:12:43.041279 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9qwjp\" (UniqueName: \"kubernetes.io/projected/6bfb09d3-730b-4a95-ac71-574889caabe0-kube-api-access-9qwjp\") pod \"6bfb09d3-730b-4a95-ac71-574889caabe0\" (UID: \"6bfb09d3-730b-4a95-ac71-574889caabe0\") " Nov 23 16:12:43 crc kubenswrapper[5050]: I1123 16:12:43.041412 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d1d2c6c7-ea81-4348-a921-8ab8bf07bd55-operator-scripts\") pod \"d1d2c6c7-ea81-4348-a921-8ab8bf07bd55\" (UID: \"d1d2c6c7-ea81-4348-a921-8ab8bf07bd55\") " Nov 23 16:12:43 crc kubenswrapper[5050]: I1123 16:12:43.041530 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzkv4\" (UniqueName: \"kubernetes.io/projected/d1d2c6c7-ea81-4348-a921-8ab8bf07bd55-kube-api-access-nzkv4\") pod \"d1d2c6c7-ea81-4348-a921-8ab8bf07bd55\" (UID: \"d1d2c6c7-ea81-4348-a921-8ab8bf07bd55\") " Nov 23 16:12:43 crc kubenswrapper[5050]: I1123 16:12:43.042606 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d1d2c6c7-ea81-4348-a921-8ab8bf07bd55-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d1d2c6c7-ea81-4348-a921-8ab8bf07bd55" (UID: "d1d2c6c7-ea81-4348-a921-8ab8bf07bd55"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:12:43 crc kubenswrapper[5050]: I1123 16:12:43.043017 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6bfb09d3-730b-4a95-ac71-574889caabe0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6bfb09d3-730b-4a95-ac71-574889caabe0" (UID: "6bfb09d3-730b-4a95-ac71-574889caabe0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:12:43 crc kubenswrapper[5050]: I1123 16:12:43.048676 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1d2c6c7-ea81-4348-a921-8ab8bf07bd55-kube-api-access-nzkv4" (OuterVolumeSpecName: "kube-api-access-nzkv4") pod "d1d2c6c7-ea81-4348-a921-8ab8bf07bd55" (UID: "d1d2c6c7-ea81-4348-a921-8ab8bf07bd55"). InnerVolumeSpecName "kube-api-access-nzkv4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:12:43 crc kubenswrapper[5050]: I1123 16:12:43.049381 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6bfb09d3-730b-4a95-ac71-574889caabe0-kube-api-access-9qwjp" (OuterVolumeSpecName: "kube-api-access-9qwjp") pod "6bfb09d3-730b-4a95-ac71-574889caabe0" (UID: "6bfb09d3-730b-4a95-ac71-574889caabe0"). InnerVolumeSpecName "kube-api-access-9qwjp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:12:43 crc kubenswrapper[5050]: I1123 16:12:43.144839 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6bfb09d3-730b-4a95-ac71-574889caabe0-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:43 crc kubenswrapper[5050]: I1123 16:12:43.144894 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9qwjp\" (UniqueName: \"kubernetes.io/projected/6bfb09d3-730b-4a95-ac71-574889caabe0-kube-api-access-9qwjp\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:43 crc kubenswrapper[5050]: I1123 16:12:43.144917 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d1d2c6c7-ea81-4348-a921-8ab8bf07bd55-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:43 crc kubenswrapper[5050]: I1123 16:12:43.144937 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzkv4\" (UniqueName: \"kubernetes.io/projected/d1d2c6c7-ea81-4348-a921-8ab8bf07bd55-kube-api-access-nzkv4\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:43 crc kubenswrapper[5050]: I1123 16:12:43.361371 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-4rkt4" event={"ID":"6bfb09d3-730b-4a95-ac71-574889caabe0","Type":"ContainerDied","Data":"7b90e66935d38aa08aae9d744e28b862786120f956bf993fe14823d532259e33"} Nov 23 16:12:43 crc kubenswrapper[5050]: I1123 16:12:43.361491 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7b90e66935d38aa08aae9d744e28b862786120f956bf993fe14823d532259e33" Nov 23 16:12:43 crc kubenswrapper[5050]: I1123 16:12:43.361398 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-4rkt4" Nov 23 16:12:43 crc kubenswrapper[5050]: I1123 16:12:43.374044 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-28bd-account-create-jrbzz" event={"ID":"d1d2c6c7-ea81-4348-a921-8ab8bf07bd55","Type":"ContainerDied","Data":"f8760ac78186a5eb21a9956a36c9f2df5791047e940c0da817f80a758eb46002"} Nov 23 16:12:43 crc kubenswrapper[5050]: I1123 16:12:43.374106 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-28bd-account-create-jrbzz" Nov 23 16:12:43 crc kubenswrapper[5050]: I1123 16:12:43.374131 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f8760ac78186a5eb21a9956a36c9f2df5791047e940c0da817f80a758eb46002" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.467579 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6cf4bbfb7f-v86xr"] Nov 23 16:12:44 crc kubenswrapper[5050]: E1123 16:12:44.468660 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bfb09d3-730b-4a95-ac71-574889caabe0" containerName="mariadb-database-create" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.468690 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bfb09d3-730b-4a95-ac71-574889caabe0" containerName="mariadb-database-create" Nov 23 16:12:44 crc kubenswrapper[5050]: E1123 16:12:44.468734 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1d2c6c7-ea81-4348-a921-8ab8bf07bd55" containerName="mariadb-account-create" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.468743 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1d2c6c7-ea81-4348-a921-8ab8bf07bd55" containerName="mariadb-account-create" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.469004 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1d2c6c7-ea81-4348-a921-8ab8bf07bd55" containerName="mariadb-account-create" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.469065 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="6bfb09d3-730b-4a95-ac71-574889caabe0" containerName="mariadb-database-create" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.471500 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.481059 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/515fa021-1f02-423a-91df-2495dbfb5e2f-config\") pod \"dnsmasq-dns-6cf4bbfb7f-v86xr\" (UID: \"515fa021-1f02-423a-91df-2495dbfb5e2f\") " pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.481132 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-87ns8\" (UniqueName: \"kubernetes.io/projected/515fa021-1f02-423a-91df-2495dbfb5e2f-kube-api-access-87ns8\") pod \"dnsmasq-dns-6cf4bbfb7f-v86xr\" (UID: \"515fa021-1f02-423a-91df-2495dbfb5e2f\") " pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.481161 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/515fa021-1f02-423a-91df-2495dbfb5e2f-dns-svc\") pod \"dnsmasq-dns-6cf4bbfb7f-v86xr\" (UID: \"515fa021-1f02-423a-91df-2495dbfb5e2f\") " pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.481210 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/515fa021-1f02-423a-91df-2495dbfb5e2f-ovsdbserver-nb\") pod \"dnsmasq-dns-6cf4bbfb7f-v86xr\" (UID: \"515fa021-1f02-423a-91df-2495dbfb5e2f\") " pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.481265 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/515fa021-1f02-423a-91df-2495dbfb5e2f-ovsdbserver-sb\") pod \"dnsmasq-dns-6cf4bbfb7f-v86xr\" (UID: \"515fa021-1f02-423a-91df-2495dbfb5e2f\") " pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.515044 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6cf4bbfb7f-v86xr"] Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.539820 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-fz8ng"] Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.541950 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-fz8ng" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.545612 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-pdwn5" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.545774 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.546155 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.580968 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-fz8ng"] Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.583724 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/515fa021-1f02-423a-91df-2495dbfb5e2f-dns-svc\") pod \"dnsmasq-dns-6cf4bbfb7f-v86xr\" (UID: \"515fa021-1f02-423a-91df-2495dbfb5e2f\") " pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.583805 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/248de275-6c68-4a0d-934d-af45b35a9320-logs\") pod \"placement-db-sync-fz8ng\" (UID: \"248de275-6c68-4a0d-934d-af45b35a9320\") " pod="openstack/placement-db-sync-fz8ng" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.583844 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/515fa021-1f02-423a-91df-2495dbfb5e2f-ovsdbserver-nb\") pod \"dnsmasq-dns-6cf4bbfb7f-v86xr\" (UID: \"515fa021-1f02-423a-91df-2495dbfb5e2f\") " pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.583903 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/248de275-6c68-4a0d-934d-af45b35a9320-config-data\") pod \"placement-db-sync-fz8ng\" (UID: \"248de275-6c68-4a0d-934d-af45b35a9320\") " pod="openstack/placement-db-sync-fz8ng" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.583964 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/248de275-6c68-4a0d-934d-af45b35a9320-combined-ca-bundle\") pod \"placement-db-sync-fz8ng\" (UID: \"248de275-6c68-4a0d-934d-af45b35a9320\") " pod="openstack/placement-db-sync-fz8ng" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.584006 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/515fa021-1f02-423a-91df-2495dbfb5e2f-ovsdbserver-sb\") pod \"dnsmasq-dns-6cf4bbfb7f-v86xr\" (UID: \"515fa021-1f02-423a-91df-2495dbfb5e2f\") " pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.584087 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzb58\" (UniqueName: \"kubernetes.io/projected/248de275-6c68-4a0d-934d-af45b35a9320-kube-api-access-rzb58\") pod \"placement-db-sync-fz8ng\" (UID: \"248de275-6c68-4a0d-934d-af45b35a9320\") " pod="openstack/placement-db-sync-fz8ng" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.584135 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/515fa021-1f02-423a-91df-2495dbfb5e2f-config\") pod \"dnsmasq-dns-6cf4bbfb7f-v86xr\" (UID: \"515fa021-1f02-423a-91df-2495dbfb5e2f\") " pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.584158 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/248de275-6c68-4a0d-934d-af45b35a9320-scripts\") pod \"placement-db-sync-fz8ng\" (UID: \"248de275-6c68-4a0d-934d-af45b35a9320\") " pod="openstack/placement-db-sync-fz8ng" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.584186 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-87ns8\" (UniqueName: \"kubernetes.io/projected/515fa021-1f02-423a-91df-2495dbfb5e2f-kube-api-access-87ns8\") pod \"dnsmasq-dns-6cf4bbfb7f-v86xr\" (UID: \"515fa021-1f02-423a-91df-2495dbfb5e2f\") " pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.586476 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/515fa021-1f02-423a-91df-2495dbfb5e2f-config\") pod \"dnsmasq-dns-6cf4bbfb7f-v86xr\" (UID: \"515fa021-1f02-423a-91df-2495dbfb5e2f\") " pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.587099 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/515fa021-1f02-423a-91df-2495dbfb5e2f-ovsdbserver-nb\") pod \"dnsmasq-dns-6cf4bbfb7f-v86xr\" (UID: \"515fa021-1f02-423a-91df-2495dbfb5e2f\") " pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.587126 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/515fa021-1f02-423a-91df-2495dbfb5e2f-dns-svc\") pod \"dnsmasq-dns-6cf4bbfb7f-v86xr\" (UID: \"515fa021-1f02-423a-91df-2495dbfb5e2f\") " pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.587587 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/515fa021-1f02-423a-91df-2495dbfb5e2f-ovsdbserver-sb\") pod \"dnsmasq-dns-6cf4bbfb7f-v86xr\" (UID: \"515fa021-1f02-423a-91df-2495dbfb5e2f\") " pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.624694 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-87ns8\" (UniqueName: \"kubernetes.io/projected/515fa021-1f02-423a-91df-2495dbfb5e2f-kube-api-access-87ns8\") pod \"dnsmasq-dns-6cf4bbfb7f-v86xr\" (UID: \"515fa021-1f02-423a-91df-2495dbfb5e2f\") " pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.685489 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/248de275-6c68-4a0d-934d-af45b35a9320-combined-ca-bundle\") pod \"placement-db-sync-fz8ng\" (UID: \"248de275-6c68-4a0d-934d-af45b35a9320\") " pod="openstack/placement-db-sync-fz8ng" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.685572 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzb58\" (UniqueName: \"kubernetes.io/projected/248de275-6c68-4a0d-934d-af45b35a9320-kube-api-access-rzb58\") pod \"placement-db-sync-fz8ng\" (UID: \"248de275-6c68-4a0d-934d-af45b35a9320\") " pod="openstack/placement-db-sync-fz8ng" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.685612 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/248de275-6c68-4a0d-934d-af45b35a9320-scripts\") pod \"placement-db-sync-fz8ng\" (UID: \"248de275-6c68-4a0d-934d-af45b35a9320\") " pod="openstack/placement-db-sync-fz8ng" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.685653 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/248de275-6c68-4a0d-934d-af45b35a9320-logs\") pod \"placement-db-sync-fz8ng\" (UID: \"248de275-6c68-4a0d-934d-af45b35a9320\") " pod="openstack/placement-db-sync-fz8ng" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.685685 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/248de275-6c68-4a0d-934d-af45b35a9320-config-data\") pod \"placement-db-sync-fz8ng\" (UID: \"248de275-6c68-4a0d-934d-af45b35a9320\") " pod="openstack/placement-db-sync-fz8ng" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.687032 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/248de275-6c68-4a0d-934d-af45b35a9320-logs\") pod \"placement-db-sync-fz8ng\" (UID: \"248de275-6c68-4a0d-934d-af45b35a9320\") " pod="openstack/placement-db-sync-fz8ng" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.691066 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/248de275-6c68-4a0d-934d-af45b35a9320-scripts\") pod \"placement-db-sync-fz8ng\" (UID: \"248de275-6c68-4a0d-934d-af45b35a9320\") " pod="openstack/placement-db-sync-fz8ng" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.692953 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/248de275-6c68-4a0d-934d-af45b35a9320-combined-ca-bundle\") pod \"placement-db-sync-fz8ng\" (UID: \"248de275-6c68-4a0d-934d-af45b35a9320\") " pod="openstack/placement-db-sync-fz8ng" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.702369 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/248de275-6c68-4a0d-934d-af45b35a9320-config-data\") pod \"placement-db-sync-fz8ng\" (UID: \"248de275-6c68-4a0d-934d-af45b35a9320\") " pod="openstack/placement-db-sync-fz8ng" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.713236 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzb58\" (UniqueName: \"kubernetes.io/projected/248de275-6c68-4a0d-934d-af45b35a9320-kube-api-access-rzb58\") pod \"placement-db-sync-fz8ng\" (UID: \"248de275-6c68-4a0d-934d-af45b35a9320\") " pod="openstack/placement-db-sync-fz8ng" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.816132 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" Nov 23 16:12:44 crc kubenswrapper[5050]: I1123 16:12:44.865039 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-fz8ng" Nov 23 16:12:45 crc kubenswrapper[5050]: I1123 16:12:45.256866 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6cf4bbfb7f-v86xr"] Nov 23 16:12:45 crc kubenswrapper[5050]: I1123 16:12:45.396226 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" event={"ID":"515fa021-1f02-423a-91df-2495dbfb5e2f","Type":"ContainerStarted","Data":"bf27b8ea16a2b862acd3f23375157d410876eb2bc60b1e08c5596b25645e390c"} Nov 23 16:12:45 crc kubenswrapper[5050]: I1123 16:12:45.613695 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-fz8ng"] Nov 23 16:12:46 crc kubenswrapper[5050]: I1123 16:12:46.430272 5050 generic.go:334] "Generic (PLEG): container finished" podID="515fa021-1f02-423a-91df-2495dbfb5e2f" containerID="0ed09b738c44845aff44e59e2d6691123ae25253b595f9203d65428d4580cdea" exitCode=0 Nov 23 16:12:46 crc kubenswrapper[5050]: I1123 16:12:46.430398 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" event={"ID":"515fa021-1f02-423a-91df-2495dbfb5e2f","Type":"ContainerDied","Data":"0ed09b738c44845aff44e59e2d6691123ae25253b595f9203d65428d4580cdea"} Nov 23 16:12:46 crc kubenswrapper[5050]: I1123 16:12:46.437219 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-fz8ng" event={"ID":"248de275-6c68-4a0d-934d-af45b35a9320","Type":"ContainerStarted","Data":"43c8751ce43e53af7dc46687f352a575aa9b444ef0876b79bdf802d51b7ea0d1"} Nov 23 16:12:46 crc kubenswrapper[5050]: I1123 16:12:46.437280 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-fz8ng" event={"ID":"248de275-6c68-4a0d-934d-af45b35a9320","Type":"ContainerStarted","Data":"1c15fac8622e9273e0e0cadb082f0865631b7ca9a8cece8ce3e9b965e38f522e"} Nov 23 16:12:46 crc kubenswrapper[5050]: I1123 16:12:46.498180 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-fz8ng" podStartSLOduration=2.498155293 podStartE2EDuration="2.498155293s" podCreationTimestamp="2025-11-23 16:12:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:12:46.496663171 +0000 UTC m=+5461.663659666" watchObservedRunningTime="2025-11-23 16:12:46.498155293 +0000 UTC m=+5461.665151788" Nov 23 16:12:47 crc kubenswrapper[5050]: I1123 16:12:47.456852 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" event={"ID":"515fa021-1f02-423a-91df-2495dbfb5e2f","Type":"ContainerStarted","Data":"22ee77d4e7cfee7671ec8e47bda4aefe5c23243a37db03375ff410a00cea304e"} Nov 23 16:12:47 crc kubenswrapper[5050]: I1123 16:12:47.458338 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" Nov 23 16:12:47 crc kubenswrapper[5050]: I1123 16:12:47.461731 5050 generic.go:334] "Generic (PLEG): container finished" podID="248de275-6c68-4a0d-934d-af45b35a9320" containerID="43c8751ce43e53af7dc46687f352a575aa9b444ef0876b79bdf802d51b7ea0d1" exitCode=0 Nov 23 16:12:47 crc kubenswrapper[5050]: I1123 16:12:47.461817 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-fz8ng" event={"ID":"248de275-6c68-4a0d-934d-af45b35a9320","Type":"ContainerDied","Data":"43c8751ce43e53af7dc46687f352a575aa9b444ef0876b79bdf802d51b7ea0d1"} Nov 23 16:12:47 crc kubenswrapper[5050]: I1123 16:12:47.508779 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" podStartSLOduration=3.508747532 podStartE2EDuration="3.508747532s" podCreationTimestamp="2025-11-23 16:12:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:12:47.498486263 +0000 UTC m=+5462.665482758" watchObservedRunningTime="2025-11-23 16:12:47.508747532 +0000 UTC m=+5462.675744047" Nov 23 16:12:48 crc kubenswrapper[5050]: I1123 16:12:48.895807 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-fz8ng" Nov 23 16:12:48 crc kubenswrapper[5050]: I1123 16:12:48.987583 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rzb58\" (UniqueName: \"kubernetes.io/projected/248de275-6c68-4a0d-934d-af45b35a9320-kube-api-access-rzb58\") pod \"248de275-6c68-4a0d-934d-af45b35a9320\" (UID: \"248de275-6c68-4a0d-934d-af45b35a9320\") " Nov 23 16:12:48 crc kubenswrapper[5050]: I1123 16:12:48.987780 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/248de275-6c68-4a0d-934d-af45b35a9320-combined-ca-bundle\") pod \"248de275-6c68-4a0d-934d-af45b35a9320\" (UID: \"248de275-6c68-4a0d-934d-af45b35a9320\") " Nov 23 16:12:48 crc kubenswrapper[5050]: I1123 16:12:48.987928 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/248de275-6c68-4a0d-934d-af45b35a9320-logs\") pod \"248de275-6c68-4a0d-934d-af45b35a9320\" (UID: \"248de275-6c68-4a0d-934d-af45b35a9320\") " Nov 23 16:12:48 crc kubenswrapper[5050]: I1123 16:12:48.987955 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/248de275-6c68-4a0d-934d-af45b35a9320-scripts\") pod \"248de275-6c68-4a0d-934d-af45b35a9320\" (UID: \"248de275-6c68-4a0d-934d-af45b35a9320\") " Nov 23 16:12:48 crc kubenswrapper[5050]: I1123 16:12:48.988147 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/248de275-6c68-4a0d-934d-af45b35a9320-config-data\") pod \"248de275-6c68-4a0d-934d-af45b35a9320\" (UID: \"248de275-6c68-4a0d-934d-af45b35a9320\") " Nov 23 16:12:48 crc kubenswrapper[5050]: I1123 16:12:48.988915 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/248de275-6c68-4a0d-934d-af45b35a9320-logs" (OuterVolumeSpecName: "logs") pod "248de275-6c68-4a0d-934d-af45b35a9320" (UID: "248de275-6c68-4a0d-934d-af45b35a9320"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:12:48 crc kubenswrapper[5050]: I1123 16:12:48.996541 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/248de275-6c68-4a0d-934d-af45b35a9320-scripts" (OuterVolumeSpecName: "scripts") pod "248de275-6c68-4a0d-934d-af45b35a9320" (UID: "248de275-6c68-4a0d-934d-af45b35a9320"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:12:48 crc kubenswrapper[5050]: I1123 16:12:48.996598 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/248de275-6c68-4a0d-934d-af45b35a9320-kube-api-access-rzb58" (OuterVolumeSpecName: "kube-api-access-rzb58") pod "248de275-6c68-4a0d-934d-af45b35a9320" (UID: "248de275-6c68-4a0d-934d-af45b35a9320"). InnerVolumeSpecName "kube-api-access-rzb58". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:12:49 crc kubenswrapper[5050]: I1123 16:12:49.016747 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/248de275-6c68-4a0d-934d-af45b35a9320-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "248de275-6c68-4a0d-934d-af45b35a9320" (UID: "248de275-6c68-4a0d-934d-af45b35a9320"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:12:49 crc kubenswrapper[5050]: I1123 16:12:49.037276 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/248de275-6c68-4a0d-934d-af45b35a9320-config-data" (OuterVolumeSpecName: "config-data") pod "248de275-6c68-4a0d-934d-af45b35a9320" (UID: "248de275-6c68-4a0d-934d-af45b35a9320"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:12:49 crc kubenswrapper[5050]: I1123 16:12:49.092280 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/248de275-6c68-4a0d-934d-af45b35a9320-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:49 crc kubenswrapper[5050]: I1123 16:12:49.092352 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/248de275-6c68-4a0d-934d-af45b35a9320-logs\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:49 crc kubenswrapper[5050]: I1123 16:12:49.092374 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/248de275-6c68-4a0d-934d-af45b35a9320-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:49 crc kubenswrapper[5050]: I1123 16:12:49.092393 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/248de275-6c68-4a0d-934d-af45b35a9320-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:49 crc kubenswrapper[5050]: I1123 16:12:49.092414 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rzb58\" (UniqueName: \"kubernetes.io/projected/248de275-6c68-4a0d-934d-af45b35a9320-kube-api-access-rzb58\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:49 crc kubenswrapper[5050]: I1123 16:12:49.494758 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-fz8ng" Nov 23 16:12:49 crc kubenswrapper[5050]: I1123 16:12:49.494759 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-fz8ng" event={"ID":"248de275-6c68-4a0d-934d-af45b35a9320","Type":"ContainerDied","Data":"1c15fac8622e9273e0e0cadb082f0865631b7ca9a8cece8ce3e9b965e38f522e"} Nov 23 16:12:49 crc kubenswrapper[5050]: I1123 16:12:49.494959 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1c15fac8622e9273e0e0cadb082f0865631b7ca9a8cece8ce3e9b965e38f522e" Nov 23 16:12:50 crc kubenswrapper[5050]: I1123 16:12:50.026857 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-678f8c5968-b8ddf"] Nov 23 16:12:50 crc kubenswrapper[5050]: E1123 16:12:50.027876 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="248de275-6c68-4a0d-934d-af45b35a9320" containerName="placement-db-sync" Nov 23 16:12:50 crc kubenswrapper[5050]: I1123 16:12:50.027900 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="248de275-6c68-4a0d-934d-af45b35a9320" containerName="placement-db-sync" Nov 23 16:12:50 crc kubenswrapper[5050]: I1123 16:12:50.028160 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="248de275-6c68-4a0d-934d-af45b35a9320" containerName="placement-db-sync" Nov 23 16:12:50 crc kubenswrapper[5050]: I1123 16:12:50.029773 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-678f8c5968-b8ddf" Nov 23 16:12:50 crc kubenswrapper[5050]: I1123 16:12:50.032743 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 23 16:12:50 crc kubenswrapper[5050]: I1123 16:12:50.033200 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-pdwn5" Nov 23 16:12:50 crc kubenswrapper[5050]: I1123 16:12:50.033547 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 23 16:12:50 crc kubenswrapper[5050]: I1123 16:12:50.071294 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-678f8c5968-b8ddf"] Nov 23 16:12:50 crc kubenswrapper[5050]: I1123 16:12:50.111850 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/70eba0ae-5c52-4839-a886-3d2a8d989ab5-logs\") pod \"placement-678f8c5968-b8ddf\" (UID: \"70eba0ae-5c52-4839-a886-3d2a8d989ab5\") " pod="openstack/placement-678f8c5968-b8ddf" Nov 23 16:12:50 crc kubenswrapper[5050]: I1123 16:12:50.111903 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/70eba0ae-5c52-4839-a886-3d2a8d989ab5-scripts\") pod \"placement-678f8c5968-b8ddf\" (UID: \"70eba0ae-5c52-4839-a886-3d2a8d989ab5\") " pod="openstack/placement-678f8c5968-b8ddf" Nov 23 16:12:50 crc kubenswrapper[5050]: I1123 16:12:50.111973 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70eba0ae-5c52-4839-a886-3d2a8d989ab5-config-data\") pod \"placement-678f8c5968-b8ddf\" (UID: \"70eba0ae-5c52-4839-a886-3d2a8d989ab5\") " pod="openstack/placement-678f8c5968-b8ddf" Nov 23 16:12:50 crc kubenswrapper[5050]: I1123 16:12:50.112030 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70eba0ae-5c52-4839-a886-3d2a8d989ab5-combined-ca-bundle\") pod \"placement-678f8c5968-b8ddf\" (UID: \"70eba0ae-5c52-4839-a886-3d2a8d989ab5\") " pod="openstack/placement-678f8c5968-b8ddf" Nov 23 16:12:50 crc kubenswrapper[5050]: I1123 16:12:50.112051 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jktw\" (UniqueName: \"kubernetes.io/projected/70eba0ae-5c52-4839-a886-3d2a8d989ab5-kube-api-access-4jktw\") pod \"placement-678f8c5968-b8ddf\" (UID: \"70eba0ae-5c52-4839-a886-3d2a8d989ab5\") " pod="openstack/placement-678f8c5968-b8ddf" Nov 23 16:12:50 crc kubenswrapper[5050]: I1123 16:12:50.213202 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/70eba0ae-5c52-4839-a886-3d2a8d989ab5-logs\") pod \"placement-678f8c5968-b8ddf\" (UID: \"70eba0ae-5c52-4839-a886-3d2a8d989ab5\") " pod="openstack/placement-678f8c5968-b8ddf" Nov 23 16:12:50 crc kubenswrapper[5050]: I1123 16:12:50.213582 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/70eba0ae-5c52-4839-a886-3d2a8d989ab5-scripts\") pod \"placement-678f8c5968-b8ddf\" (UID: \"70eba0ae-5c52-4839-a886-3d2a8d989ab5\") " pod="openstack/placement-678f8c5968-b8ddf" Nov 23 16:12:50 crc kubenswrapper[5050]: I1123 16:12:50.213696 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70eba0ae-5c52-4839-a886-3d2a8d989ab5-config-data\") pod \"placement-678f8c5968-b8ddf\" (UID: \"70eba0ae-5c52-4839-a886-3d2a8d989ab5\") " pod="openstack/placement-678f8c5968-b8ddf" Nov 23 16:12:50 crc kubenswrapper[5050]: I1123 16:12:50.213811 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70eba0ae-5c52-4839-a886-3d2a8d989ab5-combined-ca-bundle\") pod \"placement-678f8c5968-b8ddf\" (UID: \"70eba0ae-5c52-4839-a886-3d2a8d989ab5\") " pod="openstack/placement-678f8c5968-b8ddf" Nov 23 16:12:50 crc kubenswrapper[5050]: I1123 16:12:50.213895 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jktw\" (UniqueName: \"kubernetes.io/projected/70eba0ae-5c52-4839-a886-3d2a8d989ab5-kube-api-access-4jktw\") pod \"placement-678f8c5968-b8ddf\" (UID: \"70eba0ae-5c52-4839-a886-3d2a8d989ab5\") " pod="openstack/placement-678f8c5968-b8ddf" Nov 23 16:12:50 crc kubenswrapper[5050]: I1123 16:12:50.213992 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/70eba0ae-5c52-4839-a886-3d2a8d989ab5-logs\") pod \"placement-678f8c5968-b8ddf\" (UID: \"70eba0ae-5c52-4839-a886-3d2a8d989ab5\") " pod="openstack/placement-678f8c5968-b8ddf" Nov 23 16:12:50 crc kubenswrapper[5050]: I1123 16:12:50.220427 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70eba0ae-5c52-4839-a886-3d2a8d989ab5-combined-ca-bundle\") pod \"placement-678f8c5968-b8ddf\" (UID: \"70eba0ae-5c52-4839-a886-3d2a8d989ab5\") " pod="openstack/placement-678f8c5968-b8ddf" Nov 23 16:12:50 crc kubenswrapper[5050]: I1123 16:12:50.220677 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/70eba0ae-5c52-4839-a886-3d2a8d989ab5-scripts\") pod \"placement-678f8c5968-b8ddf\" (UID: \"70eba0ae-5c52-4839-a886-3d2a8d989ab5\") " pod="openstack/placement-678f8c5968-b8ddf" Nov 23 16:12:50 crc kubenswrapper[5050]: I1123 16:12:50.221633 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70eba0ae-5c52-4839-a886-3d2a8d989ab5-config-data\") pod \"placement-678f8c5968-b8ddf\" (UID: \"70eba0ae-5c52-4839-a886-3d2a8d989ab5\") " pod="openstack/placement-678f8c5968-b8ddf" Nov 23 16:12:50 crc kubenswrapper[5050]: I1123 16:12:50.231367 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jktw\" (UniqueName: \"kubernetes.io/projected/70eba0ae-5c52-4839-a886-3d2a8d989ab5-kube-api-access-4jktw\") pod \"placement-678f8c5968-b8ddf\" (UID: \"70eba0ae-5c52-4839-a886-3d2a8d989ab5\") " pod="openstack/placement-678f8c5968-b8ddf" Nov 23 16:12:50 crc kubenswrapper[5050]: I1123 16:12:50.376544 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-678f8c5968-b8ddf" Nov 23 16:12:50 crc kubenswrapper[5050]: W1123 16:12:50.905362 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod70eba0ae_5c52_4839_a886_3d2a8d989ab5.slice/crio-b7e1caf3331df1416234d922477b20c683a9482b9afe7c531ac0983b1323da32 WatchSource:0}: Error finding container b7e1caf3331df1416234d922477b20c683a9482b9afe7c531ac0983b1323da32: Status 404 returned error can't find the container with id b7e1caf3331df1416234d922477b20c683a9482b9afe7c531ac0983b1323da32 Nov 23 16:12:50 crc kubenswrapper[5050]: I1123 16:12:50.908329 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-678f8c5968-b8ddf"] Nov 23 16:12:51 crc kubenswrapper[5050]: I1123 16:12:51.520055 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-678f8c5968-b8ddf" event={"ID":"70eba0ae-5c52-4839-a886-3d2a8d989ab5","Type":"ContainerStarted","Data":"d22b6539a6f385c52d5178803352103f8472a92b162473f2966959cec63bc17b"} Nov 23 16:12:51 crc kubenswrapper[5050]: I1123 16:12:51.521938 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-678f8c5968-b8ddf" event={"ID":"70eba0ae-5c52-4839-a886-3d2a8d989ab5","Type":"ContainerStarted","Data":"4e6daf4355773bd2e2c5299085ca3219a932a50c93532311b5d63369bc35d3bb"} Nov 23 16:12:51 crc kubenswrapper[5050]: I1123 16:12:51.522082 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-678f8c5968-b8ddf" Nov 23 16:12:51 crc kubenswrapper[5050]: I1123 16:12:51.522172 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-678f8c5968-b8ddf" Nov 23 16:12:51 crc kubenswrapper[5050]: I1123 16:12:51.522264 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-678f8c5968-b8ddf" event={"ID":"70eba0ae-5c52-4839-a886-3d2a8d989ab5","Type":"ContainerStarted","Data":"b7e1caf3331df1416234d922477b20c683a9482b9afe7c531ac0983b1323da32"} Nov 23 16:12:54 crc kubenswrapper[5050]: I1123 16:12:54.817985 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" Nov 23 16:12:54 crc kubenswrapper[5050]: I1123 16:12:54.857474 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-678f8c5968-b8ddf" podStartSLOduration=5.857431816 podStartE2EDuration="5.857431816s" podCreationTimestamp="2025-11-23 16:12:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:12:51.5483378 +0000 UTC m=+5466.715334285" watchObservedRunningTime="2025-11-23 16:12:54.857431816 +0000 UTC m=+5470.024428301" Nov 23 16:12:54 crc kubenswrapper[5050]: I1123 16:12:54.958483 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85f66497c-m9hrl"] Nov 23 16:12:54 crc kubenswrapper[5050]: I1123 16:12:54.959129 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-85f66497c-m9hrl" podUID="e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd" containerName="dnsmasq-dns" containerID="cri-o://b698ee0e904d845afe64535f9fef08651dd2e75e3cb4da768ffa89d1f59fe66c" gracePeriod=10 Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.421567 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85f66497c-m9hrl" Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.556841 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-dns-svc\") pod \"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd\" (UID: \"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd\") " Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.557034 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-ovsdbserver-sb\") pod \"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd\" (UID: \"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd\") " Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.557219 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-config\") pod \"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd\" (UID: \"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd\") " Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.558095 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-ovsdbserver-nb\") pod \"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd\" (UID: \"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd\") " Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.558146 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x8blw\" (UniqueName: \"kubernetes.io/projected/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-kube-api-access-x8blw\") pod \"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd\" (UID: \"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd\") " Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.565696 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-kube-api-access-x8blw" (OuterVolumeSpecName: "kube-api-access-x8blw") pod "e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd" (UID: "e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd"). InnerVolumeSpecName "kube-api-access-x8blw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.586938 5050 generic.go:334] "Generic (PLEG): container finished" podID="e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd" containerID="b698ee0e904d845afe64535f9fef08651dd2e75e3cb4da768ffa89d1f59fe66c" exitCode=0 Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.587102 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85f66497c-m9hrl" Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.615497 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-config" (OuterVolumeSpecName: "config") pod "e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd" (UID: "e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.620255 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd" (UID: "e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.626140 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd" (UID: "e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.634266 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd" (UID: "e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.661049 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.661093 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x8blw\" (UniqueName: \"kubernetes.io/projected/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-kube-api-access-x8blw\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.661109 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.661121 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.661132 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd-config\") on node \"crc\" DevicePath \"\"" Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.672810 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85f66497c-m9hrl" event={"ID":"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd","Type":"ContainerDied","Data":"b698ee0e904d845afe64535f9fef08651dd2e75e3cb4da768ffa89d1f59fe66c"} Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.672879 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85f66497c-m9hrl" event={"ID":"e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd","Type":"ContainerDied","Data":"5187919526be9913641157db0a3b0c9573a5d82e1084833fd8147993e0d15008"} Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.672909 5050 scope.go:117] "RemoveContainer" containerID="b698ee0e904d845afe64535f9fef08651dd2e75e3cb4da768ffa89d1f59fe66c" Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.702310 5050 scope.go:117] "RemoveContainer" containerID="8b6511c59b9ded13dc8f8973d2988a403cf7161450962651e53b801a286d9fce" Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.726040 5050 scope.go:117] "RemoveContainer" containerID="b698ee0e904d845afe64535f9fef08651dd2e75e3cb4da768ffa89d1f59fe66c" Nov 23 16:12:55 crc kubenswrapper[5050]: E1123 16:12:55.726707 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b698ee0e904d845afe64535f9fef08651dd2e75e3cb4da768ffa89d1f59fe66c\": container with ID starting with b698ee0e904d845afe64535f9fef08651dd2e75e3cb4da768ffa89d1f59fe66c not found: ID does not exist" containerID="b698ee0e904d845afe64535f9fef08651dd2e75e3cb4da768ffa89d1f59fe66c" Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.726761 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b698ee0e904d845afe64535f9fef08651dd2e75e3cb4da768ffa89d1f59fe66c"} err="failed to get container status \"b698ee0e904d845afe64535f9fef08651dd2e75e3cb4da768ffa89d1f59fe66c\": rpc error: code = NotFound desc = could not find container \"b698ee0e904d845afe64535f9fef08651dd2e75e3cb4da768ffa89d1f59fe66c\": container with ID starting with b698ee0e904d845afe64535f9fef08651dd2e75e3cb4da768ffa89d1f59fe66c not found: ID does not exist" Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.726798 5050 scope.go:117] "RemoveContainer" containerID="8b6511c59b9ded13dc8f8973d2988a403cf7161450962651e53b801a286d9fce" Nov 23 16:12:55 crc kubenswrapper[5050]: E1123 16:12:55.727276 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b6511c59b9ded13dc8f8973d2988a403cf7161450962651e53b801a286d9fce\": container with ID starting with 8b6511c59b9ded13dc8f8973d2988a403cf7161450962651e53b801a286d9fce not found: ID does not exist" containerID="8b6511c59b9ded13dc8f8973d2988a403cf7161450962651e53b801a286d9fce" Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.727328 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b6511c59b9ded13dc8f8973d2988a403cf7161450962651e53b801a286d9fce"} err="failed to get container status \"8b6511c59b9ded13dc8f8973d2988a403cf7161450962651e53b801a286d9fce\": rpc error: code = NotFound desc = could not find container \"8b6511c59b9ded13dc8f8973d2988a403cf7161450962651e53b801a286d9fce\": container with ID starting with 8b6511c59b9ded13dc8f8973d2988a403cf7161450962651e53b801a286d9fce not found: ID does not exist" Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.923895 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85f66497c-m9hrl"] Nov 23 16:12:55 crc kubenswrapper[5050]: I1123 16:12:55.937957 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-85f66497c-m9hrl"] Nov 23 16:12:57 crc kubenswrapper[5050]: I1123 16:12:57.569762 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd" path="/var/lib/kubelet/pods/e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd/volumes" Nov 23 16:13:07 crc kubenswrapper[5050]: I1123 16:13:07.114075 5050 scope.go:117] "RemoveContainer" containerID="d3458d224bbbc0fc13beedd14a1d143aa5979480bbaa400bb95019b33647770a" Nov 23 16:13:07 crc kubenswrapper[5050]: I1123 16:13:07.150401 5050 scope.go:117] "RemoveContainer" containerID="ea31dc6519aca59a5126623a1db0829c746dad379b16e78288734c0aa8eadf6c" Nov 23 16:13:21 crc kubenswrapper[5050]: I1123 16:13:21.386588 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-678f8c5968-b8ddf" Nov 23 16:13:21 crc kubenswrapper[5050]: I1123 16:13:21.488860 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-678f8c5968-b8ddf" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.399160 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-rmlzd"] Nov 23 16:13:47 crc kubenswrapper[5050]: E1123 16:13:47.400427 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd" containerName="dnsmasq-dns" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.400459 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd" containerName="dnsmasq-dns" Nov 23 16:13:47 crc kubenswrapper[5050]: E1123 16:13:47.400484 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd" containerName="init" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.400491 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd" containerName="init" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.400678 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1ac5e24-f6d7-4e99-94fb-4952a7cc06fd" containerName="dnsmasq-dns" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.401401 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-rmlzd" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.412795 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-rmlzd"] Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.474200 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jlm7\" (UniqueName: \"kubernetes.io/projected/18ab378a-a758-455c-9350-8a610f30c66d-kube-api-access-9jlm7\") pod \"nova-api-db-create-rmlzd\" (UID: \"18ab378a-a758-455c-9350-8a610f30c66d\") " pod="openstack/nova-api-db-create-rmlzd" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.474252 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/18ab378a-a758-455c-9350-8a610f30c66d-operator-scripts\") pod \"nova-api-db-create-rmlzd\" (UID: \"18ab378a-a758-455c-9350-8a610f30c66d\") " pod="openstack/nova-api-db-create-rmlzd" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.487330 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-p2fgp"] Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.488744 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-p2fgp" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.501531 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-p2fgp"] Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.578095 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2c9bl\" (UniqueName: \"kubernetes.io/projected/5b75904b-a477-4659-9bc3-0f9771466735-kube-api-access-2c9bl\") pod \"nova-cell0-db-create-p2fgp\" (UID: \"5b75904b-a477-4659-9bc3-0f9771466735\") " pod="openstack/nova-cell0-db-create-p2fgp" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.578228 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jlm7\" (UniqueName: \"kubernetes.io/projected/18ab378a-a758-455c-9350-8a610f30c66d-kube-api-access-9jlm7\") pod \"nova-api-db-create-rmlzd\" (UID: \"18ab378a-a758-455c-9350-8a610f30c66d\") " pod="openstack/nova-api-db-create-rmlzd" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.578282 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/18ab378a-a758-455c-9350-8a610f30c66d-operator-scripts\") pod \"nova-api-db-create-rmlzd\" (UID: \"18ab378a-a758-455c-9350-8a610f30c66d\") " pod="openstack/nova-api-db-create-rmlzd" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.578343 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b75904b-a477-4659-9bc3-0f9771466735-operator-scripts\") pod \"nova-cell0-db-create-p2fgp\" (UID: \"5b75904b-a477-4659-9bc3-0f9771466735\") " pod="openstack/nova-cell0-db-create-p2fgp" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.580364 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/18ab378a-a758-455c-9350-8a610f30c66d-operator-scripts\") pod \"nova-api-db-create-rmlzd\" (UID: \"18ab378a-a758-455c-9350-8a610f30c66d\") " pod="openstack/nova-api-db-create-rmlzd" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.591913 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-2b01-account-create-wk29k"] Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.593326 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2b01-account-create-wk29k" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.595813 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.603301 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jlm7\" (UniqueName: \"kubernetes.io/projected/18ab378a-a758-455c-9350-8a610f30c66d-kube-api-access-9jlm7\") pod \"nova-api-db-create-rmlzd\" (UID: \"18ab378a-a758-455c-9350-8a610f30c66d\") " pod="openstack/nova-api-db-create-rmlzd" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.622805 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-2b01-account-create-wk29k"] Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.688924 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9k7r\" (UniqueName: \"kubernetes.io/projected/c91600e9-e5ac-4426-bcbf-42cc78069c6a-kube-api-access-q9k7r\") pod \"nova-api-2b01-account-create-wk29k\" (UID: \"c91600e9-e5ac-4426-bcbf-42cc78069c6a\") " pod="openstack/nova-api-2b01-account-create-wk29k" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.689047 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2c9bl\" (UniqueName: \"kubernetes.io/projected/5b75904b-a477-4659-9bc3-0f9771466735-kube-api-access-2c9bl\") pod \"nova-cell0-db-create-p2fgp\" (UID: \"5b75904b-a477-4659-9bc3-0f9771466735\") " pod="openstack/nova-cell0-db-create-p2fgp" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.689083 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c91600e9-e5ac-4426-bcbf-42cc78069c6a-operator-scripts\") pod \"nova-api-2b01-account-create-wk29k\" (UID: \"c91600e9-e5ac-4426-bcbf-42cc78069c6a\") " pod="openstack/nova-api-2b01-account-create-wk29k" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.689190 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b75904b-a477-4659-9bc3-0f9771466735-operator-scripts\") pod \"nova-cell0-db-create-p2fgp\" (UID: \"5b75904b-a477-4659-9bc3-0f9771466735\") " pod="openstack/nova-cell0-db-create-p2fgp" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.690430 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b75904b-a477-4659-9bc3-0f9771466735-operator-scripts\") pod \"nova-cell0-db-create-p2fgp\" (UID: \"5b75904b-a477-4659-9bc3-0f9771466735\") " pod="openstack/nova-cell0-db-create-p2fgp" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.704396 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-gl7j8"] Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.705955 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-gl7j8" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.711334 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2c9bl\" (UniqueName: \"kubernetes.io/projected/5b75904b-a477-4659-9bc3-0f9771466735-kube-api-access-2c9bl\") pod \"nova-cell0-db-create-p2fgp\" (UID: \"5b75904b-a477-4659-9bc3-0f9771466735\") " pod="openstack/nova-cell0-db-create-p2fgp" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.718016 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-gl7j8"] Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.748358 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-rmlzd" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.795735 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c91600e9-e5ac-4426-bcbf-42cc78069c6a-operator-scripts\") pod \"nova-api-2b01-account-create-wk29k\" (UID: \"c91600e9-e5ac-4426-bcbf-42cc78069c6a\") " pod="openstack/nova-api-2b01-account-create-wk29k" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.796380 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/47528bce-fa2a-4f34-bdb3-cd44a57be5ef-operator-scripts\") pod \"nova-cell1-db-create-gl7j8\" (UID: \"47528bce-fa2a-4f34-bdb3-cd44a57be5ef\") " pod="openstack/nova-cell1-db-create-gl7j8" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.796651 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9k7r\" (UniqueName: \"kubernetes.io/projected/c91600e9-e5ac-4426-bcbf-42cc78069c6a-kube-api-access-q9k7r\") pod \"nova-api-2b01-account-create-wk29k\" (UID: \"c91600e9-e5ac-4426-bcbf-42cc78069c6a\") " pod="openstack/nova-api-2b01-account-create-wk29k" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.796781 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c91600e9-e5ac-4426-bcbf-42cc78069c6a-operator-scripts\") pod \"nova-api-2b01-account-create-wk29k\" (UID: \"c91600e9-e5ac-4426-bcbf-42cc78069c6a\") " pod="openstack/nova-api-2b01-account-create-wk29k" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.797114 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2j7p\" (UniqueName: \"kubernetes.io/projected/47528bce-fa2a-4f34-bdb3-cd44a57be5ef-kube-api-access-r2j7p\") pod \"nova-cell1-db-create-gl7j8\" (UID: \"47528bce-fa2a-4f34-bdb3-cd44a57be5ef\") " pod="openstack/nova-cell1-db-create-gl7j8" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.798709 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-6645-account-create-4zgnc"] Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.800283 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-6645-account-create-4zgnc" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.807788 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.815872 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-p2fgp" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.826583 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-6645-account-create-4zgnc"] Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.831217 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9k7r\" (UniqueName: \"kubernetes.io/projected/c91600e9-e5ac-4426-bcbf-42cc78069c6a-kube-api-access-q9k7r\") pod \"nova-api-2b01-account-create-wk29k\" (UID: \"c91600e9-e5ac-4426-bcbf-42cc78069c6a\") " pod="openstack/nova-api-2b01-account-create-wk29k" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.894433 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-9cac-account-create-m4bh5"] Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.896210 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-9cac-account-create-m4bh5" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.899145 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3f67377-c66e-4af9-b42f-185208ef4cb6-operator-scripts\") pod \"nova-cell0-6645-account-create-4zgnc\" (UID: \"d3f67377-c66e-4af9-b42f-185208ef4cb6\") " pod="openstack/nova-cell0-6645-account-create-4zgnc" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.899300 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/47528bce-fa2a-4f34-bdb3-cd44a57be5ef-operator-scripts\") pod \"nova-cell1-db-create-gl7j8\" (UID: \"47528bce-fa2a-4f34-bdb3-cd44a57be5ef\") " pod="openstack/nova-cell1-db-create-gl7j8" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.899353 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2j7p\" (UniqueName: \"kubernetes.io/projected/47528bce-fa2a-4f34-bdb3-cd44a57be5ef-kube-api-access-r2j7p\") pod \"nova-cell1-db-create-gl7j8\" (UID: \"47528bce-fa2a-4f34-bdb3-cd44a57be5ef\") " pod="openstack/nova-cell1-db-create-gl7j8" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.899474 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nz76r\" (UniqueName: \"kubernetes.io/projected/d3f67377-c66e-4af9-b42f-185208ef4cb6-kube-api-access-nz76r\") pod \"nova-cell0-6645-account-create-4zgnc\" (UID: \"d3f67377-c66e-4af9-b42f-185208ef4cb6\") " pod="openstack/nova-cell0-6645-account-create-4zgnc" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.901122 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/47528bce-fa2a-4f34-bdb3-cd44a57be5ef-operator-scripts\") pod \"nova-cell1-db-create-gl7j8\" (UID: \"47528bce-fa2a-4f34-bdb3-cd44a57be5ef\") " pod="openstack/nova-cell1-db-create-gl7j8" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.903992 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.927889 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-9cac-account-create-m4bh5"] Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.930614 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2j7p\" (UniqueName: \"kubernetes.io/projected/47528bce-fa2a-4f34-bdb3-cd44a57be5ef-kube-api-access-r2j7p\") pod \"nova-cell1-db-create-gl7j8\" (UID: \"47528bce-fa2a-4f34-bdb3-cd44a57be5ef\") " pod="openstack/nova-cell1-db-create-gl7j8" Nov 23 16:13:47 crc kubenswrapper[5050]: I1123 16:13:47.950926 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2b01-account-create-wk29k" Nov 23 16:13:48 crc kubenswrapper[5050]: I1123 16:13:48.001850 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nz76r\" (UniqueName: \"kubernetes.io/projected/d3f67377-c66e-4af9-b42f-185208ef4cb6-kube-api-access-nz76r\") pod \"nova-cell0-6645-account-create-4zgnc\" (UID: \"d3f67377-c66e-4af9-b42f-185208ef4cb6\") " pod="openstack/nova-cell0-6645-account-create-4zgnc" Nov 23 16:13:48 crc kubenswrapper[5050]: I1123 16:13:48.001938 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dff083ef-35fc-44a9-92f2-e884a936cc27-operator-scripts\") pod \"nova-cell1-9cac-account-create-m4bh5\" (UID: \"dff083ef-35fc-44a9-92f2-e884a936cc27\") " pod="openstack/nova-cell1-9cac-account-create-m4bh5" Nov 23 16:13:48 crc kubenswrapper[5050]: I1123 16:13:48.002015 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3f67377-c66e-4af9-b42f-185208ef4cb6-operator-scripts\") pod \"nova-cell0-6645-account-create-4zgnc\" (UID: \"d3f67377-c66e-4af9-b42f-185208ef4cb6\") " pod="openstack/nova-cell0-6645-account-create-4zgnc" Nov 23 16:13:48 crc kubenswrapper[5050]: I1123 16:13:48.002063 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfm6t\" (UniqueName: \"kubernetes.io/projected/dff083ef-35fc-44a9-92f2-e884a936cc27-kube-api-access-pfm6t\") pod \"nova-cell1-9cac-account-create-m4bh5\" (UID: \"dff083ef-35fc-44a9-92f2-e884a936cc27\") " pod="openstack/nova-cell1-9cac-account-create-m4bh5" Nov 23 16:13:48 crc kubenswrapper[5050]: I1123 16:13:48.003190 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3f67377-c66e-4af9-b42f-185208ef4cb6-operator-scripts\") pod \"nova-cell0-6645-account-create-4zgnc\" (UID: \"d3f67377-c66e-4af9-b42f-185208ef4cb6\") " pod="openstack/nova-cell0-6645-account-create-4zgnc" Nov 23 16:13:48 crc kubenswrapper[5050]: I1123 16:13:48.031242 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nz76r\" (UniqueName: \"kubernetes.io/projected/d3f67377-c66e-4af9-b42f-185208ef4cb6-kube-api-access-nz76r\") pod \"nova-cell0-6645-account-create-4zgnc\" (UID: \"d3f67377-c66e-4af9-b42f-185208ef4cb6\") " pod="openstack/nova-cell0-6645-account-create-4zgnc" Nov 23 16:13:48 crc kubenswrapper[5050]: I1123 16:13:48.081294 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-gl7j8" Nov 23 16:13:48 crc kubenswrapper[5050]: I1123 16:13:48.107484 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dff083ef-35fc-44a9-92f2-e884a936cc27-operator-scripts\") pod \"nova-cell1-9cac-account-create-m4bh5\" (UID: \"dff083ef-35fc-44a9-92f2-e884a936cc27\") " pod="openstack/nova-cell1-9cac-account-create-m4bh5" Nov 23 16:13:48 crc kubenswrapper[5050]: I1123 16:13:48.107632 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfm6t\" (UniqueName: \"kubernetes.io/projected/dff083ef-35fc-44a9-92f2-e884a936cc27-kube-api-access-pfm6t\") pod \"nova-cell1-9cac-account-create-m4bh5\" (UID: \"dff083ef-35fc-44a9-92f2-e884a936cc27\") " pod="openstack/nova-cell1-9cac-account-create-m4bh5" Nov 23 16:13:48 crc kubenswrapper[5050]: I1123 16:13:48.108467 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dff083ef-35fc-44a9-92f2-e884a936cc27-operator-scripts\") pod \"nova-cell1-9cac-account-create-m4bh5\" (UID: \"dff083ef-35fc-44a9-92f2-e884a936cc27\") " pod="openstack/nova-cell1-9cac-account-create-m4bh5" Nov 23 16:13:48 crc kubenswrapper[5050]: I1123 16:13:48.132374 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfm6t\" (UniqueName: \"kubernetes.io/projected/dff083ef-35fc-44a9-92f2-e884a936cc27-kube-api-access-pfm6t\") pod \"nova-cell1-9cac-account-create-m4bh5\" (UID: \"dff083ef-35fc-44a9-92f2-e884a936cc27\") " pod="openstack/nova-cell1-9cac-account-create-m4bh5" Nov 23 16:13:48 crc kubenswrapper[5050]: I1123 16:13:48.251181 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-6645-account-create-4zgnc" Nov 23 16:13:48 crc kubenswrapper[5050]: I1123 16:13:48.272481 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-9cac-account-create-m4bh5" Nov 23 16:13:48 crc kubenswrapper[5050]: I1123 16:13:48.304487 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-rmlzd"] Nov 23 16:13:48 crc kubenswrapper[5050]: I1123 16:13:48.464477 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-p2fgp"] Nov 23 16:13:48 crc kubenswrapper[5050]: W1123 16:13:48.472013 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5b75904b_a477_4659_9bc3_0f9771466735.slice/crio-de1375c9c2e9c4bf88bbb6d487bf67bd56a25fc33c45459fddce02375d27873a WatchSource:0}: Error finding container de1375c9c2e9c4bf88bbb6d487bf67bd56a25fc33c45459fddce02375d27873a: Status 404 returned error can't find the container with id de1375c9c2e9c4bf88bbb6d487bf67bd56a25fc33c45459fddce02375d27873a Nov 23 16:13:48 crc kubenswrapper[5050]: I1123 16:13:48.581345 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-2b01-account-create-wk29k"] Nov 23 16:13:48 crc kubenswrapper[5050]: I1123 16:13:48.680387 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-gl7j8"] Nov 23 16:13:48 crc kubenswrapper[5050]: W1123 16:13:48.693379 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod47528bce_fa2a_4f34_bdb3_cd44a57be5ef.slice/crio-34810a148d0d68167219b283c75fe7e212bfabd3c8c63cc107e224310f43a107 WatchSource:0}: Error finding container 34810a148d0d68167219b283c75fe7e212bfabd3c8c63cc107e224310f43a107: Status 404 returned error can't find the container with id 34810a148d0d68167219b283c75fe7e212bfabd3c8c63cc107e224310f43a107 Nov 23 16:13:48 crc kubenswrapper[5050]: I1123 16:13:48.846017 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-6645-account-create-4zgnc"] Nov 23 16:13:48 crc kubenswrapper[5050]: I1123 16:13:48.858620 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-9cac-account-create-m4bh5"] Nov 23 16:13:48 crc kubenswrapper[5050]: W1123 16:13:48.886119 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd3f67377_c66e_4af9_b42f_185208ef4cb6.slice/crio-97b764ef877290e0803b3d5aa96b603893eff43a7825f75848800882681159f8 WatchSource:0}: Error finding container 97b764ef877290e0803b3d5aa96b603893eff43a7825f75848800882681159f8: Status 404 returned error can't find the container with id 97b764ef877290e0803b3d5aa96b603893eff43a7825f75848800882681159f8 Nov 23 16:13:48 crc kubenswrapper[5050]: W1123 16:13:48.890802 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddff083ef_35fc_44a9_92f2_e884a936cc27.slice/crio-e0e4343d11ec7d6a101bb88d453991d11a6fd0f7de584211edaf2ece04ef1e2f WatchSource:0}: Error finding container e0e4343d11ec7d6a101bb88d453991d11a6fd0f7de584211edaf2ece04ef1e2f: Status 404 returned error can't find the container with id e0e4343d11ec7d6a101bb88d453991d11a6fd0f7de584211edaf2ece04ef1e2f Nov 23 16:13:49 crc kubenswrapper[5050]: I1123 16:13:49.286852 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-6645-account-create-4zgnc" event={"ID":"d3f67377-c66e-4af9-b42f-185208ef4cb6","Type":"ContainerStarted","Data":"08686e8ee77d5026ee88b2bd0fb7f63d84555c48d0740fc16832a37a5ddf94c7"} Nov 23 16:13:49 crc kubenswrapper[5050]: I1123 16:13:49.287636 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-6645-account-create-4zgnc" event={"ID":"d3f67377-c66e-4af9-b42f-185208ef4cb6","Type":"ContainerStarted","Data":"97b764ef877290e0803b3d5aa96b603893eff43a7825f75848800882681159f8"} Nov 23 16:13:49 crc kubenswrapper[5050]: I1123 16:13:49.292309 5050 generic.go:334] "Generic (PLEG): container finished" podID="18ab378a-a758-455c-9350-8a610f30c66d" containerID="56572cfb16a7a993f68dcbedf5b4a4a8e376cbe720373063fcaab98ff2f182c0" exitCode=0 Nov 23 16:13:49 crc kubenswrapper[5050]: I1123 16:13:49.292390 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-rmlzd" event={"ID":"18ab378a-a758-455c-9350-8a610f30c66d","Type":"ContainerDied","Data":"56572cfb16a7a993f68dcbedf5b4a4a8e376cbe720373063fcaab98ff2f182c0"} Nov 23 16:13:49 crc kubenswrapper[5050]: I1123 16:13:49.292426 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-rmlzd" event={"ID":"18ab378a-a758-455c-9350-8a610f30c66d","Type":"ContainerStarted","Data":"56a0c5400c94741841ce03fb12c31bad6af125804c057e5e790fe5b7c4a08045"} Nov 23 16:13:49 crc kubenswrapper[5050]: I1123 16:13:49.296157 5050 generic.go:334] "Generic (PLEG): container finished" podID="5b75904b-a477-4659-9bc3-0f9771466735" containerID="e34077fda1bb8f1b76d792a2c067477a67654fa7320df8f07c1309e670558ff0" exitCode=0 Nov 23 16:13:49 crc kubenswrapper[5050]: I1123 16:13:49.296259 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-p2fgp" event={"ID":"5b75904b-a477-4659-9bc3-0f9771466735","Type":"ContainerDied","Data":"e34077fda1bb8f1b76d792a2c067477a67654fa7320df8f07c1309e670558ff0"} Nov 23 16:13:49 crc kubenswrapper[5050]: I1123 16:13:49.296284 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-p2fgp" event={"ID":"5b75904b-a477-4659-9bc3-0f9771466735","Type":"ContainerStarted","Data":"de1375c9c2e9c4bf88bbb6d487bf67bd56a25fc33c45459fddce02375d27873a"} Nov 23 16:13:49 crc kubenswrapper[5050]: I1123 16:13:49.299178 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-2b01-account-create-wk29k" event={"ID":"c91600e9-e5ac-4426-bcbf-42cc78069c6a","Type":"ContainerStarted","Data":"063fc696506451a17f5c6024b739ce49418ba8b685a0a1e208cecba6b5c8ed02"} Nov 23 16:13:49 crc kubenswrapper[5050]: I1123 16:13:49.299389 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-2b01-account-create-wk29k" event={"ID":"c91600e9-e5ac-4426-bcbf-42cc78069c6a","Type":"ContainerStarted","Data":"a806ef20fdd1cb38a2ccd8440c1ee204c1388b2195d985074d5fcb59378ca07e"} Nov 23 16:13:49 crc kubenswrapper[5050]: I1123 16:13:49.301243 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-9cac-account-create-m4bh5" event={"ID":"dff083ef-35fc-44a9-92f2-e884a936cc27","Type":"ContainerStarted","Data":"88fa9adc47e25e304dacc4150a0980a9e11406b7edb82da752f7bdde29111eb2"} Nov 23 16:13:49 crc kubenswrapper[5050]: I1123 16:13:49.301266 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-9cac-account-create-m4bh5" event={"ID":"dff083ef-35fc-44a9-92f2-e884a936cc27","Type":"ContainerStarted","Data":"e0e4343d11ec7d6a101bb88d453991d11a6fd0f7de584211edaf2ece04ef1e2f"} Nov 23 16:13:49 crc kubenswrapper[5050]: I1123 16:13:49.307990 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-gl7j8" event={"ID":"47528bce-fa2a-4f34-bdb3-cd44a57be5ef","Type":"ContainerStarted","Data":"5bb900dd47b5d925af4de3b58077019a8802d7af78107df419ec6c3627896256"} Nov 23 16:13:49 crc kubenswrapper[5050]: I1123 16:13:49.309499 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-gl7j8" event={"ID":"47528bce-fa2a-4f34-bdb3-cd44a57be5ef","Type":"ContainerStarted","Data":"34810a148d0d68167219b283c75fe7e212bfabd3c8c63cc107e224310f43a107"} Nov 23 16:13:49 crc kubenswrapper[5050]: I1123 16:13:49.310258 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-6645-account-create-4zgnc" podStartSLOduration=2.310239766 podStartE2EDuration="2.310239766s" podCreationTimestamp="2025-11-23 16:13:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:13:49.303073164 +0000 UTC m=+5524.470069649" watchObservedRunningTime="2025-11-23 16:13:49.310239766 +0000 UTC m=+5524.477236241" Nov 23 16:13:49 crc kubenswrapper[5050]: I1123 16:13:49.344305 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-9cac-account-create-m4bh5" podStartSLOduration=2.344280027 podStartE2EDuration="2.344280027s" podCreationTimestamp="2025-11-23 16:13:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:13:49.340572892 +0000 UTC m=+5524.507569377" watchObservedRunningTime="2025-11-23 16:13:49.344280027 +0000 UTC m=+5524.511276512" Nov 23 16:13:49 crc kubenswrapper[5050]: I1123 16:13:49.409403 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-db-create-gl7j8" podStartSLOduration=2.409369754 podStartE2EDuration="2.409369754s" podCreationTimestamp="2025-11-23 16:13:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:13:49.398353903 +0000 UTC m=+5524.565350388" watchObservedRunningTime="2025-11-23 16:13:49.409369754 +0000 UTC m=+5524.576366239" Nov 23 16:13:50 crc kubenswrapper[5050]: I1123 16:13:50.325623 5050 generic.go:334] "Generic (PLEG): container finished" podID="d3f67377-c66e-4af9-b42f-185208ef4cb6" containerID="08686e8ee77d5026ee88b2bd0fb7f63d84555c48d0740fc16832a37a5ddf94c7" exitCode=0 Nov 23 16:13:50 crc kubenswrapper[5050]: I1123 16:13:50.325745 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-6645-account-create-4zgnc" event={"ID":"d3f67377-c66e-4af9-b42f-185208ef4cb6","Type":"ContainerDied","Data":"08686e8ee77d5026ee88b2bd0fb7f63d84555c48d0740fc16832a37a5ddf94c7"} Nov 23 16:13:50 crc kubenswrapper[5050]: I1123 16:13:50.329620 5050 generic.go:334] "Generic (PLEG): container finished" podID="c91600e9-e5ac-4426-bcbf-42cc78069c6a" containerID="063fc696506451a17f5c6024b739ce49418ba8b685a0a1e208cecba6b5c8ed02" exitCode=0 Nov 23 16:13:50 crc kubenswrapper[5050]: I1123 16:13:50.329694 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-2b01-account-create-wk29k" event={"ID":"c91600e9-e5ac-4426-bcbf-42cc78069c6a","Type":"ContainerDied","Data":"063fc696506451a17f5c6024b739ce49418ba8b685a0a1e208cecba6b5c8ed02"} Nov 23 16:13:50 crc kubenswrapper[5050]: I1123 16:13:50.332865 5050 generic.go:334] "Generic (PLEG): container finished" podID="dff083ef-35fc-44a9-92f2-e884a936cc27" containerID="88fa9adc47e25e304dacc4150a0980a9e11406b7edb82da752f7bdde29111eb2" exitCode=0 Nov 23 16:13:50 crc kubenswrapper[5050]: I1123 16:13:50.332966 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-9cac-account-create-m4bh5" event={"ID":"dff083ef-35fc-44a9-92f2-e884a936cc27","Type":"ContainerDied","Data":"88fa9adc47e25e304dacc4150a0980a9e11406b7edb82da752f7bdde29111eb2"} Nov 23 16:13:50 crc kubenswrapper[5050]: I1123 16:13:50.336760 5050 generic.go:334] "Generic (PLEG): container finished" podID="47528bce-fa2a-4f34-bdb3-cd44a57be5ef" containerID="5bb900dd47b5d925af4de3b58077019a8802d7af78107df419ec6c3627896256" exitCode=0 Nov 23 16:13:50 crc kubenswrapper[5050]: I1123 16:13:50.336867 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-gl7j8" event={"ID":"47528bce-fa2a-4f34-bdb3-cd44a57be5ef","Type":"ContainerDied","Data":"5bb900dd47b5d925af4de3b58077019a8802d7af78107df419ec6c3627896256"} Nov 23 16:13:50 crc kubenswrapper[5050]: I1123 16:13:50.816781 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2b01-account-create-wk29k" Nov 23 16:13:50 crc kubenswrapper[5050]: I1123 16:13:50.865874 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c91600e9-e5ac-4426-bcbf-42cc78069c6a-operator-scripts\") pod \"c91600e9-e5ac-4426-bcbf-42cc78069c6a\" (UID: \"c91600e9-e5ac-4426-bcbf-42cc78069c6a\") " Nov 23 16:13:50 crc kubenswrapper[5050]: I1123 16:13:50.865958 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q9k7r\" (UniqueName: \"kubernetes.io/projected/c91600e9-e5ac-4426-bcbf-42cc78069c6a-kube-api-access-q9k7r\") pod \"c91600e9-e5ac-4426-bcbf-42cc78069c6a\" (UID: \"c91600e9-e5ac-4426-bcbf-42cc78069c6a\") " Nov 23 16:13:50 crc kubenswrapper[5050]: I1123 16:13:50.866984 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c91600e9-e5ac-4426-bcbf-42cc78069c6a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c91600e9-e5ac-4426-bcbf-42cc78069c6a" (UID: "c91600e9-e5ac-4426-bcbf-42cc78069c6a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:13:50 crc kubenswrapper[5050]: I1123 16:13:50.900733 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c91600e9-e5ac-4426-bcbf-42cc78069c6a-kube-api-access-q9k7r" (OuterVolumeSpecName: "kube-api-access-q9k7r") pod "c91600e9-e5ac-4426-bcbf-42cc78069c6a" (UID: "c91600e9-e5ac-4426-bcbf-42cc78069c6a"). InnerVolumeSpecName "kube-api-access-q9k7r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:13:50 crc kubenswrapper[5050]: I1123 16:13:50.968842 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q9k7r\" (UniqueName: \"kubernetes.io/projected/c91600e9-e5ac-4426-bcbf-42cc78069c6a-kube-api-access-q9k7r\") on node \"crc\" DevicePath \"\"" Nov 23 16:13:50 crc kubenswrapper[5050]: I1123 16:13:50.968879 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c91600e9-e5ac-4426-bcbf-42cc78069c6a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:13:50 crc kubenswrapper[5050]: I1123 16:13:50.979722 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-p2fgp" Nov 23 16:13:50 crc kubenswrapper[5050]: I1123 16:13:50.987151 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-rmlzd" Nov 23 16:13:51 crc kubenswrapper[5050]: I1123 16:13:51.070593 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b75904b-a477-4659-9bc3-0f9771466735-operator-scripts\") pod \"5b75904b-a477-4659-9bc3-0f9771466735\" (UID: \"5b75904b-a477-4659-9bc3-0f9771466735\") " Nov 23 16:13:51 crc kubenswrapper[5050]: I1123 16:13:51.070690 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/18ab378a-a758-455c-9350-8a610f30c66d-operator-scripts\") pod \"18ab378a-a758-455c-9350-8a610f30c66d\" (UID: \"18ab378a-a758-455c-9350-8a610f30c66d\") " Nov 23 16:13:51 crc kubenswrapper[5050]: I1123 16:13:51.070780 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2c9bl\" (UniqueName: \"kubernetes.io/projected/5b75904b-a477-4659-9bc3-0f9771466735-kube-api-access-2c9bl\") pod \"5b75904b-a477-4659-9bc3-0f9771466735\" (UID: \"5b75904b-a477-4659-9bc3-0f9771466735\") " Nov 23 16:13:51 crc kubenswrapper[5050]: I1123 16:13:51.070876 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9jlm7\" (UniqueName: \"kubernetes.io/projected/18ab378a-a758-455c-9350-8a610f30c66d-kube-api-access-9jlm7\") pod \"18ab378a-a758-455c-9350-8a610f30c66d\" (UID: \"18ab378a-a758-455c-9350-8a610f30c66d\") " Nov 23 16:13:51 crc kubenswrapper[5050]: I1123 16:13:51.071260 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/18ab378a-a758-455c-9350-8a610f30c66d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "18ab378a-a758-455c-9350-8a610f30c66d" (UID: "18ab378a-a758-455c-9350-8a610f30c66d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:13:51 crc kubenswrapper[5050]: I1123 16:13:51.071495 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b75904b-a477-4659-9bc3-0f9771466735-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5b75904b-a477-4659-9bc3-0f9771466735" (UID: "5b75904b-a477-4659-9bc3-0f9771466735"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:13:51 crc kubenswrapper[5050]: I1123 16:13:51.071984 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b75904b-a477-4659-9bc3-0f9771466735-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:13:51 crc kubenswrapper[5050]: I1123 16:13:51.072012 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/18ab378a-a758-455c-9350-8a610f30c66d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:13:51 crc kubenswrapper[5050]: I1123 16:13:51.076386 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b75904b-a477-4659-9bc3-0f9771466735-kube-api-access-2c9bl" (OuterVolumeSpecName: "kube-api-access-2c9bl") pod "5b75904b-a477-4659-9bc3-0f9771466735" (UID: "5b75904b-a477-4659-9bc3-0f9771466735"). InnerVolumeSpecName "kube-api-access-2c9bl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:13:51 crc kubenswrapper[5050]: I1123 16:13:51.077233 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18ab378a-a758-455c-9350-8a610f30c66d-kube-api-access-9jlm7" (OuterVolumeSpecName: "kube-api-access-9jlm7") pod "18ab378a-a758-455c-9350-8a610f30c66d" (UID: "18ab378a-a758-455c-9350-8a610f30c66d"). InnerVolumeSpecName "kube-api-access-9jlm7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:13:51 crc kubenswrapper[5050]: I1123 16:13:51.173644 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2c9bl\" (UniqueName: \"kubernetes.io/projected/5b75904b-a477-4659-9bc3-0f9771466735-kube-api-access-2c9bl\") on node \"crc\" DevicePath \"\"" Nov 23 16:13:51 crc kubenswrapper[5050]: I1123 16:13:51.173694 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9jlm7\" (UniqueName: \"kubernetes.io/projected/18ab378a-a758-455c-9350-8a610f30c66d-kube-api-access-9jlm7\") on node \"crc\" DevicePath \"\"" Nov 23 16:13:51 crc kubenswrapper[5050]: I1123 16:13:51.350726 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-2b01-account-create-wk29k" event={"ID":"c91600e9-e5ac-4426-bcbf-42cc78069c6a","Type":"ContainerDied","Data":"a806ef20fdd1cb38a2ccd8440c1ee204c1388b2195d985074d5fcb59378ca07e"} Nov 23 16:13:51 crc kubenswrapper[5050]: I1123 16:13:51.350789 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2b01-account-create-wk29k" Nov 23 16:13:51 crc kubenswrapper[5050]: I1123 16:13:51.350825 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a806ef20fdd1cb38a2ccd8440c1ee204c1388b2195d985074d5fcb59378ca07e" Nov 23 16:13:51 crc kubenswrapper[5050]: I1123 16:13:51.353197 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-rmlzd" event={"ID":"18ab378a-a758-455c-9350-8a610f30c66d","Type":"ContainerDied","Data":"56a0c5400c94741841ce03fb12c31bad6af125804c057e5e790fe5b7c4a08045"} Nov 23 16:13:51 crc kubenswrapper[5050]: I1123 16:13:51.353246 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="56a0c5400c94741841ce03fb12c31bad6af125804c057e5e790fe5b7c4a08045" Nov 23 16:13:51 crc kubenswrapper[5050]: I1123 16:13:51.353251 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-rmlzd" Nov 23 16:13:51 crc kubenswrapper[5050]: I1123 16:13:51.356263 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-p2fgp" event={"ID":"5b75904b-a477-4659-9bc3-0f9771466735","Type":"ContainerDied","Data":"de1375c9c2e9c4bf88bbb6d487bf67bd56a25fc33c45459fddce02375d27873a"} Nov 23 16:13:51 crc kubenswrapper[5050]: I1123 16:13:51.356326 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-p2fgp" Nov 23 16:13:51 crc kubenswrapper[5050]: I1123 16:13:51.356344 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="de1375c9c2e9c4bf88bbb6d487bf67bd56a25fc33c45459fddce02375d27873a" Nov 23 16:13:51 crc kubenswrapper[5050]: I1123 16:13:51.829475 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-9cac-account-create-m4bh5" Nov 23 16:13:51 crc kubenswrapper[5050]: I1123 16:13:51.970890 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-gl7j8" Nov 23 16:13:51 crc kubenswrapper[5050]: I1123 16:13:51.978629 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-6645-account-create-4zgnc" Nov 23 16:13:52 crc kubenswrapper[5050]: I1123 16:13:52.016934 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r2j7p\" (UniqueName: \"kubernetes.io/projected/47528bce-fa2a-4f34-bdb3-cd44a57be5ef-kube-api-access-r2j7p\") pod \"47528bce-fa2a-4f34-bdb3-cd44a57be5ef\" (UID: \"47528bce-fa2a-4f34-bdb3-cd44a57be5ef\") " Nov 23 16:13:52 crc kubenswrapper[5050]: I1123 16:13:52.017021 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3f67377-c66e-4af9-b42f-185208ef4cb6-operator-scripts\") pod \"d3f67377-c66e-4af9-b42f-185208ef4cb6\" (UID: \"d3f67377-c66e-4af9-b42f-185208ef4cb6\") " Nov 23 16:13:52 crc kubenswrapper[5050]: I1123 16:13:52.017115 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/47528bce-fa2a-4f34-bdb3-cd44a57be5ef-operator-scripts\") pod \"47528bce-fa2a-4f34-bdb3-cd44a57be5ef\" (UID: \"47528bce-fa2a-4f34-bdb3-cd44a57be5ef\") " Nov 23 16:13:52 crc kubenswrapper[5050]: I1123 16:13:52.017135 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dff083ef-35fc-44a9-92f2-e884a936cc27-operator-scripts\") pod \"dff083ef-35fc-44a9-92f2-e884a936cc27\" (UID: \"dff083ef-35fc-44a9-92f2-e884a936cc27\") " Nov 23 16:13:52 crc kubenswrapper[5050]: I1123 16:13:52.017157 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pfm6t\" (UniqueName: \"kubernetes.io/projected/dff083ef-35fc-44a9-92f2-e884a936cc27-kube-api-access-pfm6t\") pod \"dff083ef-35fc-44a9-92f2-e884a936cc27\" (UID: \"dff083ef-35fc-44a9-92f2-e884a936cc27\") " Nov 23 16:13:52 crc kubenswrapper[5050]: I1123 16:13:52.017351 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nz76r\" (UniqueName: \"kubernetes.io/projected/d3f67377-c66e-4af9-b42f-185208ef4cb6-kube-api-access-nz76r\") pod \"d3f67377-c66e-4af9-b42f-185208ef4cb6\" (UID: \"d3f67377-c66e-4af9-b42f-185208ef4cb6\") " Nov 23 16:13:52 crc kubenswrapper[5050]: I1123 16:13:52.018255 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/47528bce-fa2a-4f34-bdb3-cd44a57be5ef-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "47528bce-fa2a-4f34-bdb3-cd44a57be5ef" (UID: "47528bce-fa2a-4f34-bdb3-cd44a57be5ef"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:13:52 crc kubenswrapper[5050]: I1123 16:13:52.018895 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dff083ef-35fc-44a9-92f2-e884a936cc27-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "dff083ef-35fc-44a9-92f2-e884a936cc27" (UID: "dff083ef-35fc-44a9-92f2-e884a936cc27"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:13:52 crc kubenswrapper[5050]: I1123 16:13:52.027213 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47528bce-fa2a-4f34-bdb3-cd44a57be5ef-kube-api-access-r2j7p" (OuterVolumeSpecName: "kube-api-access-r2j7p") pod "47528bce-fa2a-4f34-bdb3-cd44a57be5ef" (UID: "47528bce-fa2a-4f34-bdb3-cd44a57be5ef"). InnerVolumeSpecName "kube-api-access-r2j7p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:13:52 crc kubenswrapper[5050]: I1123 16:13:52.027786 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3f67377-c66e-4af9-b42f-185208ef4cb6-kube-api-access-nz76r" (OuterVolumeSpecName: "kube-api-access-nz76r") pod "d3f67377-c66e-4af9-b42f-185208ef4cb6" (UID: "d3f67377-c66e-4af9-b42f-185208ef4cb6"). InnerVolumeSpecName "kube-api-access-nz76r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:13:52 crc kubenswrapper[5050]: I1123 16:13:52.027919 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3f67377-c66e-4af9-b42f-185208ef4cb6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d3f67377-c66e-4af9-b42f-185208ef4cb6" (UID: "d3f67377-c66e-4af9-b42f-185208ef4cb6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:13:52 crc kubenswrapper[5050]: I1123 16:13:52.028606 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dff083ef-35fc-44a9-92f2-e884a936cc27-kube-api-access-pfm6t" (OuterVolumeSpecName: "kube-api-access-pfm6t") pod "dff083ef-35fc-44a9-92f2-e884a936cc27" (UID: "dff083ef-35fc-44a9-92f2-e884a936cc27"). InnerVolumeSpecName "kube-api-access-pfm6t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:13:52 crc kubenswrapper[5050]: I1123 16:13:52.120567 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/47528bce-fa2a-4f34-bdb3-cd44a57be5ef-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:13:52 crc kubenswrapper[5050]: I1123 16:13:52.120615 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dff083ef-35fc-44a9-92f2-e884a936cc27-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:13:52 crc kubenswrapper[5050]: I1123 16:13:52.120630 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pfm6t\" (UniqueName: \"kubernetes.io/projected/dff083ef-35fc-44a9-92f2-e884a936cc27-kube-api-access-pfm6t\") on node \"crc\" DevicePath \"\"" Nov 23 16:13:52 crc kubenswrapper[5050]: I1123 16:13:52.120647 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nz76r\" (UniqueName: \"kubernetes.io/projected/d3f67377-c66e-4af9-b42f-185208ef4cb6-kube-api-access-nz76r\") on node \"crc\" DevicePath \"\"" Nov 23 16:13:52 crc kubenswrapper[5050]: I1123 16:13:52.120661 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r2j7p\" (UniqueName: \"kubernetes.io/projected/47528bce-fa2a-4f34-bdb3-cd44a57be5ef-kube-api-access-r2j7p\") on node \"crc\" DevicePath \"\"" Nov 23 16:13:52 crc kubenswrapper[5050]: I1123 16:13:52.120674 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3f67377-c66e-4af9-b42f-185208ef4cb6-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:13:52 crc kubenswrapper[5050]: I1123 16:13:52.372606 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-gl7j8" Nov 23 16:13:52 crc kubenswrapper[5050]: I1123 16:13:52.372610 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-gl7j8" event={"ID":"47528bce-fa2a-4f34-bdb3-cd44a57be5ef","Type":"ContainerDied","Data":"34810a148d0d68167219b283c75fe7e212bfabd3c8c63cc107e224310f43a107"} Nov 23 16:13:52 crc kubenswrapper[5050]: I1123 16:13:52.372778 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="34810a148d0d68167219b283c75fe7e212bfabd3c8c63cc107e224310f43a107" Nov 23 16:13:52 crc kubenswrapper[5050]: I1123 16:13:52.377435 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-6645-account-create-4zgnc" event={"ID":"d3f67377-c66e-4af9-b42f-185208ef4cb6","Type":"ContainerDied","Data":"97b764ef877290e0803b3d5aa96b603893eff43a7825f75848800882681159f8"} Nov 23 16:13:52 crc kubenswrapper[5050]: I1123 16:13:52.377575 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="97b764ef877290e0803b3d5aa96b603893eff43a7825f75848800882681159f8" Nov 23 16:13:52 crc kubenswrapper[5050]: I1123 16:13:52.377502 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-6645-account-create-4zgnc" Nov 23 16:13:52 crc kubenswrapper[5050]: I1123 16:13:52.380126 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-9cac-account-create-m4bh5" event={"ID":"dff083ef-35fc-44a9-92f2-e884a936cc27","Type":"ContainerDied","Data":"e0e4343d11ec7d6a101bb88d453991d11a6fd0f7de584211edaf2ece04ef1e2f"} Nov 23 16:13:52 crc kubenswrapper[5050]: I1123 16:13:52.380154 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e0e4343d11ec7d6a101bb88d453991d11a6fd0f7de584211edaf2ece04ef1e2f" Nov 23 16:13:52 crc kubenswrapper[5050]: I1123 16:13:52.380244 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-9cac-account-create-m4bh5" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.015907 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-9skqn"] Nov 23 16:13:58 crc kubenswrapper[5050]: E1123 16:13:58.017136 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47528bce-fa2a-4f34-bdb3-cd44a57be5ef" containerName="mariadb-database-create" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.017157 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="47528bce-fa2a-4f34-bdb3-cd44a57be5ef" containerName="mariadb-database-create" Nov 23 16:13:58 crc kubenswrapper[5050]: E1123 16:13:58.017181 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b75904b-a477-4659-9bc3-0f9771466735" containerName="mariadb-database-create" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.017187 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b75904b-a477-4659-9bc3-0f9771466735" containerName="mariadb-database-create" Nov 23 16:13:58 crc kubenswrapper[5050]: E1123 16:13:58.017199 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18ab378a-a758-455c-9350-8a610f30c66d" containerName="mariadb-database-create" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.017207 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="18ab378a-a758-455c-9350-8a610f30c66d" containerName="mariadb-database-create" Nov 23 16:13:58 crc kubenswrapper[5050]: E1123 16:13:58.017227 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3f67377-c66e-4af9-b42f-185208ef4cb6" containerName="mariadb-account-create" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.017235 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3f67377-c66e-4af9-b42f-185208ef4cb6" containerName="mariadb-account-create" Nov 23 16:13:58 crc kubenswrapper[5050]: E1123 16:13:58.017258 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c91600e9-e5ac-4426-bcbf-42cc78069c6a" containerName="mariadb-account-create" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.017264 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="c91600e9-e5ac-4426-bcbf-42cc78069c6a" containerName="mariadb-account-create" Nov 23 16:13:58 crc kubenswrapper[5050]: E1123 16:13:58.017274 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dff083ef-35fc-44a9-92f2-e884a936cc27" containerName="mariadb-account-create" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.017279 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="dff083ef-35fc-44a9-92f2-e884a936cc27" containerName="mariadb-account-create" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.017517 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="dff083ef-35fc-44a9-92f2-e884a936cc27" containerName="mariadb-account-create" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.017537 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="c91600e9-e5ac-4426-bcbf-42cc78069c6a" containerName="mariadb-account-create" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.017551 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b75904b-a477-4659-9bc3-0f9771466735" containerName="mariadb-database-create" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.017566 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3f67377-c66e-4af9-b42f-185208ef4cb6" containerName="mariadb-account-create" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.017578 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="47528bce-fa2a-4f34-bdb3-cd44a57be5ef" containerName="mariadb-database-create" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.017592 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="18ab378a-a758-455c-9350-8a610f30c66d" containerName="mariadb-database-create" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.018411 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-9skqn" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.021987 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.022212 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-nfvzh" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.029519 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.031705 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-9skqn"] Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.161974 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8k4ft\" (UniqueName: \"kubernetes.io/projected/10796220-fe20-4f66-b1bf-08bd71375d85-kube-api-access-8k4ft\") pod \"nova-cell0-conductor-db-sync-9skqn\" (UID: \"10796220-fe20-4f66-b1bf-08bd71375d85\") " pod="openstack/nova-cell0-conductor-db-sync-9skqn" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.162962 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10796220-fe20-4f66-b1bf-08bd71375d85-config-data\") pod \"nova-cell0-conductor-db-sync-9skqn\" (UID: \"10796220-fe20-4f66-b1bf-08bd71375d85\") " pod="openstack/nova-cell0-conductor-db-sync-9skqn" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.163116 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10796220-fe20-4f66-b1bf-08bd71375d85-scripts\") pod \"nova-cell0-conductor-db-sync-9skqn\" (UID: \"10796220-fe20-4f66-b1bf-08bd71375d85\") " pod="openstack/nova-cell0-conductor-db-sync-9skqn" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.163301 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10796220-fe20-4f66-b1bf-08bd71375d85-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-9skqn\" (UID: \"10796220-fe20-4f66-b1bf-08bd71375d85\") " pod="openstack/nova-cell0-conductor-db-sync-9skqn" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.265066 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10796220-fe20-4f66-b1bf-08bd71375d85-config-data\") pod \"nova-cell0-conductor-db-sync-9skqn\" (UID: \"10796220-fe20-4f66-b1bf-08bd71375d85\") " pod="openstack/nova-cell0-conductor-db-sync-9skqn" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.265144 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10796220-fe20-4f66-b1bf-08bd71375d85-scripts\") pod \"nova-cell0-conductor-db-sync-9skqn\" (UID: \"10796220-fe20-4f66-b1bf-08bd71375d85\") " pod="openstack/nova-cell0-conductor-db-sync-9skqn" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.265218 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10796220-fe20-4f66-b1bf-08bd71375d85-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-9skqn\" (UID: \"10796220-fe20-4f66-b1bf-08bd71375d85\") " pod="openstack/nova-cell0-conductor-db-sync-9skqn" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.265267 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8k4ft\" (UniqueName: \"kubernetes.io/projected/10796220-fe20-4f66-b1bf-08bd71375d85-kube-api-access-8k4ft\") pod \"nova-cell0-conductor-db-sync-9skqn\" (UID: \"10796220-fe20-4f66-b1bf-08bd71375d85\") " pod="openstack/nova-cell0-conductor-db-sync-9skqn" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.274167 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10796220-fe20-4f66-b1bf-08bd71375d85-scripts\") pod \"nova-cell0-conductor-db-sync-9skqn\" (UID: \"10796220-fe20-4f66-b1bf-08bd71375d85\") " pod="openstack/nova-cell0-conductor-db-sync-9skqn" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.274928 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10796220-fe20-4f66-b1bf-08bd71375d85-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-9skqn\" (UID: \"10796220-fe20-4f66-b1bf-08bd71375d85\") " pod="openstack/nova-cell0-conductor-db-sync-9skqn" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.275324 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10796220-fe20-4f66-b1bf-08bd71375d85-config-data\") pod \"nova-cell0-conductor-db-sync-9skqn\" (UID: \"10796220-fe20-4f66-b1bf-08bd71375d85\") " pod="openstack/nova-cell0-conductor-db-sync-9skqn" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.292122 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8k4ft\" (UniqueName: \"kubernetes.io/projected/10796220-fe20-4f66-b1bf-08bd71375d85-kube-api-access-8k4ft\") pod \"nova-cell0-conductor-db-sync-9skqn\" (UID: \"10796220-fe20-4f66-b1bf-08bd71375d85\") " pod="openstack/nova-cell0-conductor-db-sync-9skqn" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.344050 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-9skqn" Nov 23 16:13:58 crc kubenswrapper[5050]: I1123 16:13:58.883269 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-9skqn"] Nov 23 16:13:59 crc kubenswrapper[5050]: I1123 16:13:59.225217 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:13:59 crc kubenswrapper[5050]: I1123 16:13:59.225733 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:13:59 crc kubenswrapper[5050]: I1123 16:13:59.461144 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-9skqn" event={"ID":"10796220-fe20-4f66-b1bf-08bd71375d85","Type":"ContainerStarted","Data":"8debc2c09a4a9d9beba47e046e4496565d9478c2cd6ef9a058b6c604b2132e22"} Nov 23 16:13:59 crc kubenswrapper[5050]: I1123 16:13:59.461207 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-9skqn" event={"ID":"10796220-fe20-4f66-b1bf-08bd71375d85","Type":"ContainerStarted","Data":"28fb2feb3651948fbfd17ebbb2f992482ad64eabccb2e3be529f118172c706cb"} Nov 23 16:13:59 crc kubenswrapper[5050]: I1123 16:13:59.487992 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-9skqn" podStartSLOduration=2.487820219 podStartE2EDuration="2.487820219s" podCreationTimestamp="2025-11-23 16:13:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:13:59.480505952 +0000 UTC m=+5534.647502467" watchObservedRunningTime="2025-11-23 16:13:59.487820219 +0000 UTC m=+5534.654816744" Nov 23 16:14:05 crc kubenswrapper[5050]: I1123 16:14:05.567021 5050 generic.go:334] "Generic (PLEG): container finished" podID="10796220-fe20-4f66-b1bf-08bd71375d85" containerID="8debc2c09a4a9d9beba47e046e4496565d9478c2cd6ef9a058b6c604b2132e22" exitCode=0 Nov 23 16:14:05 crc kubenswrapper[5050]: I1123 16:14:05.570169 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-9skqn" event={"ID":"10796220-fe20-4f66-b1bf-08bd71375d85","Type":"ContainerDied","Data":"8debc2c09a4a9d9beba47e046e4496565d9478c2cd6ef9a058b6c604b2132e22"} Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.067278 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-9skqn" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.177176 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10796220-fe20-4f66-b1bf-08bd71375d85-config-data\") pod \"10796220-fe20-4f66-b1bf-08bd71375d85\" (UID: \"10796220-fe20-4f66-b1bf-08bd71375d85\") " Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.177902 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10796220-fe20-4f66-b1bf-08bd71375d85-scripts\") pod \"10796220-fe20-4f66-b1bf-08bd71375d85\" (UID: \"10796220-fe20-4f66-b1bf-08bd71375d85\") " Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.177951 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8k4ft\" (UniqueName: \"kubernetes.io/projected/10796220-fe20-4f66-b1bf-08bd71375d85-kube-api-access-8k4ft\") pod \"10796220-fe20-4f66-b1bf-08bd71375d85\" (UID: \"10796220-fe20-4f66-b1bf-08bd71375d85\") " Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.178100 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10796220-fe20-4f66-b1bf-08bd71375d85-combined-ca-bundle\") pod \"10796220-fe20-4f66-b1bf-08bd71375d85\" (UID: \"10796220-fe20-4f66-b1bf-08bd71375d85\") " Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.186653 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10796220-fe20-4f66-b1bf-08bd71375d85-scripts" (OuterVolumeSpecName: "scripts") pod "10796220-fe20-4f66-b1bf-08bd71375d85" (UID: "10796220-fe20-4f66-b1bf-08bd71375d85"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.186998 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10796220-fe20-4f66-b1bf-08bd71375d85-kube-api-access-8k4ft" (OuterVolumeSpecName: "kube-api-access-8k4ft") pod "10796220-fe20-4f66-b1bf-08bd71375d85" (UID: "10796220-fe20-4f66-b1bf-08bd71375d85"). InnerVolumeSpecName "kube-api-access-8k4ft". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.218695 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10796220-fe20-4f66-b1bf-08bd71375d85-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "10796220-fe20-4f66-b1bf-08bd71375d85" (UID: "10796220-fe20-4f66-b1bf-08bd71375d85"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.220288 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10796220-fe20-4f66-b1bf-08bd71375d85-config-data" (OuterVolumeSpecName: "config-data") pod "10796220-fe20-4f66-b1bf-08bd71375d85" (UID: "10796220-fe20-4f66-b1bf-08bd71375d85"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.283804 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8k4ft\" (UniqueName: \"kubernetes.io/projected/10796220-fe20-4f66-b1bf-08bd71375d85-kube-api-access-8k4ft\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.283857 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10796220-fe20-4f66-b1bf-08bd71375d85-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.283872 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10796220-fe20-4f66-b1bf-08bd71375d85-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.283883 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10796220-fe20-4f66-b1bf-08bd71375d85-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.371921 5050 scope.go:117] "RemoveContainer" containerID="c78974c1b9e1482068cc20864fb7d5529b2859fbbe8ae659ce48b90e90212f4b" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.407771 5050 scope.go:117] "RemoveContainer" containerID="7a12a36cb5e094a99ccf3f2d057dbaded80340ff9f4bf98a7cd0236583d0a156" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.448156 5050 scope.go:117] "RemoveContainer" containerID="c60f0a4489aca68db2f287a17d908c0fd6c671e48582e0301d53ce00eb4ac8d3" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.596506 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-9skqn" event={"ID":"10796220-fe20-4f66-b1bf-08bd71375d85","Type":"ContainerDied","Data":"28fb2feb3651948fbfd17ebbb2f992482ad64eabccb2e3be529f118172c706cb"} Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.596559 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="28fb2feb3651948fbfd17ebbb2f992482ad64eabccb2e3be529f118172c706cb" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.596649 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-9skqn" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.704321 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 23 16:14:07 crc kubenswrapper[5050]: E1123 16:14:07.704933 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10796220-fe20-4f66-b1bf-08bd71375d85" containerName="nova-cell0-conductor-db-sync" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.704957 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="10796220-fe20-4f66-b1bf-08bd71375d85" containerName="nova-cell0-conductor-db-sync" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.705201 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="10796220-fe20-4f66-b1bf-08bd71375d85" containerName="nova-cell0-conductor-db-sync" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.706114 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.709515 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-nfvzh" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.713954 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.729427 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.794007 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bddd9398-a4d1-45ef-b671-8d370afc13a3-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"bddd9398-a4d1-45ef-b671-8d370afc13a3\") " pod="openstack/nova-cell0-conductor-0" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.794290 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bddd9398-a4d1-45ef-b671-8d370afc13a3-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"bddd9398-a4d1-45ef-b671-8d370afc13a3\") " pod="openstack/nova-cell0-conductor-0" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.794385 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qnpdx\" (UniqueName: \"kubernetes.io/projected/bddd9398-a4d1-45ef-b671-8d370afc13a3-kube-api-access-qnpdx\") pod \"nova-cell0-conductor-0\" (UID: \"bddd9398-a4d1-45ef-b671-8d370afc13a3\") " pod="openstack/nova-cell0-conductor-0" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.895766 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qnpdx\" (UniqueName: \"kubernetes.io/projected/bddd9398-a4d1-45ef-b671-8d370afc13a3-kube-api-access-qnpdx\") pod \"nova-cell0-conductor-0\" (UID: \"bddd9398-a4d1-45ef-b671-8d370afc13a3\") " pod="openstack/nova-cell0-conductor-0" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.895886 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bddd9398-a4d1-45ef-b671-8d370afc13a3-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"bddd9398-a4d1-45ef-b671-8d370afc13a3\") " pod="openstack/nova-cell0-conductor-0" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.895965 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bddd9398-a4d1-45ef-b671-8d370afc13a3-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"bddd9398-a4d1-45ef-b671-8d370afc13a3\") " pod="openstack/nova-cell0-conductor-0" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.901372 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bddd9398-a4d1-45ef-b671-8d370afc13a3-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"bddd9398-a4d1-45ef-b671-8d370afc13a3\") " pod="openstack/nova-cell0-conductor-0" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.902468 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bddd9398-a4d1-45ef-b671-8d370afc13a3-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"bddd9398-a4d1-45ef-b671-8d370afc13a3\") " pod="openstack/nova-cell0-conductor-0" Nov 23 16:14:07 crc kubenswrapper[5050]: I1123 16:14:07.918232 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qnpdx\" (UniqueName: \"kubernetes.io/projected/bddd9398-a4d1-45ef-b671-8d370afc13a3-kube-api-access-qnpdx\") pod \"nova-cell0-conductor-0\" (UID: \"bddd9398-a4d1-45ef-b671-8d370afc13a3\") " pod="openstack/nova-cell0-conductor-0" Nov 23 16:14:08 crc kubenswrapper[5050]: I1123 16:14:08.027240 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 23 16:14:08 crc kubenswrapper[5050]: I1123 16:14:08.516838 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 23 16:14:08 crc kubenswrapper[5050]: I1123 16:14:08.611319 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"bddd9398-a4d1-45ef-b671-8d370afc13a3","Type":"ContainerStarted","Data":"fe5e0c9aa04021e00a2200be51525c47098f3c14fae93917ec67a41e49b32e50"} Nov 23 16:14:09 crc kubenswrapper[5050]: I1123 16:14:09.634858 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"bddd9398-a4d1-45ef-b671-8d370afc13a3","Type":"ContainerStarted","Data":"ae548bb41956ddfb6a2967b0fae62592d8080c28444f96a3eb8e2bfe0a16faa0"} Nov 23 16:14:09 crc kubenswrapper[5050]: I1123 16:14:09.635780 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 23 16:14:09 crc kubenswrapper[5050]: I1123 16:14:09.662316 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.662286774 podStartE2EDuration="2.662286774s" podCreationTimestamp="2025-11-23 16:14:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:14:09.658909609 +0000 UTC m=+5544.825906114" watchObservedRunningTime="2025-11-23 16:14:09.662286774 +0000 UTC m=+5544.829283279" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.060413 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.545743 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-q5mm8"] Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.547232 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-q5mm8" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.552772 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.552831 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.565030 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-q5mm8"] Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.630909 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e17c8304-cb0e-4fc5-b97f-17d6b65a4e13-config-data\") pod \"nova-cell0-cell-mapping-q5mm8\" (UID: \"e17c8304-cb0e-4fc5-b97f-17d6b65a4e13\") " pod="openstack/nova-cell0-cell-mapping-q5mm8" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.631118 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e17c8304-cb0e-4fc5-b97f-17d6b65a4e13-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-q5mm8\" (UID: \"e17c8304-cb0e-4fc5-b97f-17d6b65a4e13\") " pod="openstack/nova-cell0-cell-mapping-q5mm8" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.631169 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e17c8304-cb0e-4fc5-b97f-17d6b65a4e13-scripts\") pod \"nova-cell0-cell-mapping-q5mm8\" (UID: \"e17c8304-cb0e-4fc5-b97f-17d6b65a4e13\") " pod="openstack/nova-cell0-cell-mapping-q5mm8" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.631297 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glp2m\" (UniqueName: \"kubernetes.io/projected/e17c8304-cb0e-4fc5-b97f-17d6b65a4e13-kube-api-access-glp2m\") pod \"nova-cell0-cell-mapping-q5mm8\" (UID: \"e17c8304-cb0e-4fc5-b97f-17d6b65a4e13\") " pod="openstack/nova-cell0-cell-mapping-q5mm8" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.735873 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glp2m\" (UniqueName: \"kubernetes.io/projected/e17c8304-cb0e-4fc5-b97f-17d6b65a4e13-kube-api-access-glp2m\") pod \"nova-cell0-cell-mapping-q5mm8\" (UID: \"e17c8304-cb0e-4fc5-b97f-17d6b65a4e13\") " pod="openstack/nova-cell0-cell-mapping-q5mm8" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.736040 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e17c8304-cb0e-4fc5-b97f-17d6b65a4e13-config-data\") pod \"nova-cell0-cell-mapping-q5mm8\" (UID: \"e17c8304-cb0e-4fc5-b97f-17d6b65a4e13\") " pod="openstack/nova-cell0-cell-mapping-q5mm8" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.736137 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e17c8304-cb0e-4fc5-b97f-17d6b65a4e13-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-q5mm8\" (UID: \"e17c8304-cb0e-4fc5-b97f-17d6b65a4e13\") " pod="openstack/nova-cell0-cell-mapping-q5mm8" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.736209 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e17c8304-cb0e-4fc5-b97f-17d6b65a4e13-scripts\") pod \"nova-cell0-cell-mapping-q5mm8\" (UID: \"e17c8304-cb0e-4fc5-b97f-17d6b65a4e13\") " pod="openstack/nova-cell0-cell-mapping-q5mm8" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.770792 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.781334 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e17c8304-cb0e-4fc5-b97f-17d6b65a4e13-scripts\") pod \"nova-cell0-cell-mapping-q5mm8\" (UID: \"e17c8304-cb0e-4fc5-b97f-17d6b65a4e13\") " pod="openstack/nova-cell0-cell-mapping-q5mm8" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.793177 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.800943 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glp2m\" (UniqueName: \"kubernetes.io/projected/e17c8304-cb0e-4fc5-b97f-17d6b65a4e13-kube-api-access-glp2m\") pod \"nova-cell0-cell-mapping-q5mm8\" (UID: \"e17c8304-cb0e-4fc5-b97f-17d6b65a4e13\") " pod="openstack/nova-cell0-cell-mapping-q5mm8" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.803132 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e17c8304-cb0e-4fc5-b97f-17d6b65a4e13-config-data\") pod \"nova-cell0-cell-mapping-q5mm8\" (UID: \"e17c8304-cb0e-4fc5-b97f-17d6b65a4e13\") " pod="openstack/nova-cell0-cell-mapping-q5mm8" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.803236 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.810626 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.814436 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e17c8304-cb0e-4fc5-b97f-17d6b65a4e13-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-q5mm8\" (UID: \"e17c8304-cb0e-4fc5-b97f-17d6b65a4e13\") " pod="openstack/nova-cell0-cell-mapping-q5mm8" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.828069 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.829886 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.840564 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.847120 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.878913 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-q5mm8" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.920218 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.921988 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.930101 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.945559 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45984aec-f81e-44de-a1f3-cb3dfe1f8d27-config-data\") pod \"nova-scheduler-0\" (UID: \"45984aec-f81e-44de-a1f3-cb3dfe1f8d27\") " pod="openstack/nova-scheduler-0" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.945619 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d185f9e-5f51-4519-8fe2-4700c0b6c256-config-data\") pod \"nova-api-0\" (UID: \"9d185f9e-5f51-4519-8fe2-4700c0b6c256\") " pod="openstack/nova-api-0" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.945680 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45984aec-f81e-44de-a1f3-cb3dfe1f8d27-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"45984aec-f81e-44de-a1f3-cb3dfe1f8d27\") " pod="openstack/nova-scheduler-0" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.945705 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnjf9\" (UniqueName: \"kubernetes.io/projected/9d185f9e-5f51-4519-8fe2-4700c0b6c256-kube-api-access-cnjf9\") pod \"nova-api-0\" (UID: \"9d185f9e-5f51-4519-8fe2-4700c0b6c256\") " pod="openstack/nova-api-0" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.945736 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d185f9e-5f51-4519-8fe2-4700c0b6c256-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"9d185f9e-5f51-4519-8fe2-4700c0b6c256\") " pod="openstack/nova-api-0" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.945780 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d185f9e-5f51-4519-8fe2-4700c0b6c256-logs\") pod \"nova-api-0\" (UID: \"9d185f9e-5f51-4519-8fe2-4700c0b6c256\") " pod="openstack/nova-api-0" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.945825 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7q976\" (UniqueName: \"kubernetes.io/projected/45984aec-f81e-44de-a1f3-cb3dfe1f8d27-kube-api-access-7q976\") pod \"nova-scheduler-0\" (UID: \"45984aec-f81e-44de-a1f3-cb3dfe1f8d27\") " pod="openstack/nova-scheduler-0" Nov 23 16:14:13 crc kubenswrapper[5050]: I1123 16:14:13.949432 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.037981 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.041126 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.047352 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.047927 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/db3e5993-2352-41db-902d-7b646bc63c54-logs\") pod \"nova-metadata-0\" (UID: \"db3e5993-2352-41db-902d-7b646bc63c54\") " pod="openstack/nova-metadata-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.048057 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45984aec-f81e-44de-a1f3-cb3dfe1f8d27-config-data\") pod \"nova-scheduler-0\" (UID: \"45984aec-f81e-44de-a1f3-cb3dfe1f8d27\") " pod="openstack/nova-scheduler-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.048152 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d185f9e-5f51-4519-8fe2-4700c0b6c256-config-data\") pod \"nova-api-0\" (UID: \"9d185f9e-5f51-4519-8fe2-4700c0b6c256\") " pod="openstack/nova-api-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.048266 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db3e5993-2352-41db-902d-7b646bc63c54-config-data\") pod \"nova-metadata-0\" (UID: \"db3e5993-2352-41db-902d-7b646bc63c54\") " pod="openstack/nova-metadata-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.048666 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45984aec-f81e-44de-a1f3-cb3dfe1f8d27-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"45984aec-f81e-44de-a1f3-cb3dfe1f8d27\") " pod="openstack/nova-scheduler-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.048764 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db3e5993-2352-41db-902d-7b646bc63c54-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"db3e5993-2352-41db-902d-7b646bc63c54\") " pod="openstack/nova-metadata-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.048868 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnjf9\" (UniqueName: \"kubernetes.io/projected/9d185f9e-5f51-4519-8fe2-4700c0b6c256-kube-api-access-cnjf9\") pod \"nova-api-0\" (UID: \"9d185f9e-5f51-4519-8fe2-4700c0b6c256\") " pod="openstack/nova-api-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.048959 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d185f9e-5f51-4519-8fe2-4700c0b6c256-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"9d185f9e-5f51-4519-8fe2-4700c0b6c256\") " pod="openstack/nova-api-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.049104 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d185f9e-5f51-4519-8fe2-4700c0b6c256-logs\") pod \"nova-api-0\" (UID: \"9d185f9e-5f51-4519-8fe2-4700c0b6c256\") " pod="openstack/nova-api-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.049197 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qh5ln\" (UniqueName: \"kubernetes.io/projected/db3e5993-2352-41db-902d-7b646bc63c54-kube-api-access-qh5ln\") pod \"nova-metadata-0\" (UID: \"db3e5993-2352-41db-902d-7b646bc63c54\") " pod="openstack/nova-metadata-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.049323 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7q976\" (UniqueName: \"kubernetes.io/projected/45984aec-f81e-44de-a1f3-cb3dfe1f8d27-kube-api-access-7q976\") pod \"nova-scheduler-0\" (UID: \"45984aec-f81e-44de-a1f3-cb3dfe1f8d27\") " pod="openstack/nova-scheduler-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.055734 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d185f9e-5f51-4519-8fe2-4700c0b6c256-logs\") pod \"nova-api-0\" (UID: \"9d185f9e-5f51-4519-8fe2-4700c0b6c256\") " pod="openstack/nova-api-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.055903 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.060799 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d185f9e-5f51-4519-8fe2-4700c0b6c256-config-data\") pod \"nova-api-0\" (UID: \"9d185f9e-5f51-4519-8fe2-4700c0b6c256\") " pod="openstack/nova-api-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.061569 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d185f9e-5f51-4519-8fe2-4700c0b6c256-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"9d185f9e-5f51-4519-8fe2-4700c0b6c256\") " pod="openstack/nova-api-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.064987 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7786bd4bc7-kqvfg"] Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.066879 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.071239 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45984aec-f81e-44de-a1f3-cb3dfe1f8d27-config-data\") pod \"nova-scheduler-0\" (UID: \"45984aec-f81e-44de-a1f3-cb3dfe1f8d27\") " pod="openstack/nova-scheduler-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.075922 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45984aec-f81e-44de-a1f3-cb3dfe1f8d27-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"45984aec-f81e-44de-a1f3-cb3dfe1f8d27\") " pod="openstack/nova-scheduler-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.099363 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnjf9\" (UniqueName: \"kubernetes.io/projected/9d185f9e-5f51-4519-8fe2-4700c0b6c256-kube-api-access-cnjf9\") pod \"nova-api-0\" (UID: \"9d185f9e-5f51-4519-8fe2-4700c0b6c256\") " pod="openstack/nova-api-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.099911 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7q976\" (UniqueName: \"kubernetes.io/projected/45984aec-f81e-44de-a1f3-cb3dfe1f8d27-kube-api-access-7q976\") pod \"nova-scheduler-0\" (UID: \"45984aec-f81e-44de-a1f3-cb3dfe1f8d27\") " pod="openstack/nova-scheduler-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.124987 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7786bd4bc7-kqvfg"] Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.151582 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-ovsdbserver-nb\") pod \"dnsmasq-dns-7786bd4bc7-kqvfg\" (UID: \"d01a8a6c-70b5-47ec-a11b-303c4cb986cd\") " pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.152142 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-dns-svc\") pod \"dnsmasq-dns-7786bd4bc7-kqvfg\" (UID: \"d01a8a6c-70b5-47ec-a11b-303c4cb986cd\") " pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.152288 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/db3e5993-2352-41db-902d-7b646bc63c54-logs\") pod \"nova-metadata-0\" (UID: \"db3e5993-2352-41db-902d-7b646bc63c54\") " pod="openstack/nova-metadata-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.152327 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-config\") pod \"dnsmasq-dns-7786bd4bc7-kqvfg\" (UID: \"d01a8a6c-70b5-47ec-a11b-303c4cb986cd\") " pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.152477 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b67f9b82-2a40-46a6-af4f-1439ba842a82-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"b67f9b82-2a40-46a6-af4f-1439ba842a82\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.152508 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db3e5993-2352-41db-902d-7b646bc63c54-config-data\") pod \"nova-metadata-0\" (UID: \"db3e5993-2352-41db-902d-7b646bc63c54\") " pod="openstack/nova-metadata-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.152567 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-ovsdbserver-sb\") pod \"dnsmasq-dns-7786bd4bc7-kqvfg\" (UID: \"d01a8a6c-70b5-47ec-a11b-303c4cb986cd\") " pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.152587 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db3e5993-2352-41db-902d-7b646bc63c54-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"db3e5993-2352-41db-902d-7b646bc63c54\") " pod="openstack/nova-metadata-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.152659 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4xzhz\" (UniqueName: \"kubernetes.io/projected/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-kube-api-access-4xzhz\") pod \"dnsmasq-dns-7786bd4bc7-kqvfg\" (UID: \"d01a8a6c-70b5-47ec-a11b-303c4cb986cd\") " pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.152719 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qh5ln\" (UniqueName: \"kubernetes.io/projected/db3e5993-2352-41db-902d-7b646bc63c54-kube-api-access-qh5ln\") pod \"nova-metadata-0\" (UID: \"db3e5993-2352-41db-902d-7b646bc63c54\") " pod="openstack/nova-metadata-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.152743 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b67f9b82-2a40-46a6-af4f-1439ba842a82-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"b67f9b82-2a40-46a6-af4f-1439ba842a82\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.152790 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dx6zz\" (UniqueName: \"kubernetes.io/projected/b67f9b82-2a40-46a6-af4f-1439ba842a82-kube-api-access-dx6zz\") pod \"nova-cell1-novncproxy-0\" (UID: \"b67f9b82-2a40-46a6-af4f-1439ba842a82\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.154263 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/db3e5993-2352-41db-902d-7b646bc63c54-logs\") pod \"nova-metadata-0\" (UID: \"db3e5993-2352-41db-902d-7b646bc63c54\") " pod="openstack/nova-metadata-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.163473 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db3e5993-2352-41db-902d-7b646bc63c54-config-data\") pod \"nova-metadata-0\" (UID: \"db3e5993-2352-41db-902d-7b646bc63c54\") " pod="openstack/nova-metadata-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.163949 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db3e5993-2352-41db-902d-7b646bc63c54-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"db3e5993-2352-41db-902d-7b646bc63c54\") " pod="openstack/nova-metadata-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.180761 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qh5ln\" (UniqueName: \"kubernetes.io/projected/db3e5993-2352-41db-902d-7b646bc63c54-kube-api-access-qh5ln\") pod \"nova-metadata-0\" (UID: \"db3e5993-2352-41db-902d-7b646bc63c54\") " pod="openstack/nova-metadata-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.255058 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-config\") pod \"dnsmasq-dns-7786bd4bc7-kqvfg\" (UID: \"d01a8a6c-70b5-47ec-a11b-303c4cb986cd\") " pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.255117 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b67f9b82-2a40-46a6-af4f-1439ba842a82-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"b67f9b82-2a40-46a6-af4f-1439ba842a82\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.255168 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-ovsdbserver-sb\") pod \"dnsmasq-dns-7786bd4bc7-kqvfg\" (UID: \"d01a8a6c-70b5-47ec-a11b-303c4cb986cd\") " pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.255201 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4xzhz\" (UniqueName: \"kubernetes.io/projected/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-kube-api-access-4xzhz\") pod \"dnsmasq-dns-7786bd4bc7-kqvfg\" (UID: \"d01a8a6c-70b5-47ec-a11b-303c4cb986cd\") " pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.255243 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b67f9b82-2a40-46a6-af4f-1439ba842a82-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"b67f9b82-2a40-46a6-af4f-1439ba842a82\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.255275 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dx6zz\" (UniqueName: \"kubernetes.io/projected/b67f9b82-2a40-46a6-af4f-1439ba842a82-kube-api-access-dx6zz\") pod \"nova-cell1-novncproxy-0\" (UID: \"b67f9b82-2a40-46a6-af4f-1439ba842a82\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.255297 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-ovsdbserver-nb\") pod \"dnsmasq-dns-7786bd4bc7-kqvfg\" (UID: \"d01a8a6c-70b5-47ec-a11b-303c4cb986cd\") " pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.255316 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-dns-svc\") pod \"dnsmasq-dns-7786bd4bc7-kqvfg\" (UID: \"d01a8a6c-70b5-47ec-a11b-303c4cb986cd\") " pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.256212 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-config\") pod \"dnsmasq-dns-7786bd4bc7-kqvfg\" (UID: \"d01a8a6c-70b5-47ec-a11b-303c4cb986cd\") " pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.257172 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-ovsdbserver-nb\") pod \"dnsmasq-dns-7786bd4bc7-kqvfg\" (UID: \"d01a8a6c-70b5-47ec-a11b-303c4cb986cd\") " pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.258789 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b67f9b82-2a40-46a6-af4f-1439ba842a82-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"b67f9b82-2a40-46a6-af4f-1439ba842a82\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.260862 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-ovsdbserver-sb\") pod \"dnsmasq-dns-7786bd4bc7-kqvfg\" (UID: \"d01a8a6c-70b5-47ec-a11b-303c4cb986cd\") " pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.262166 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-dns-svc\") pod \"dnsmasq-dns-7786bd4bc7-kqvfg\" (UID: \"d01a8a6c-70b5-47ec-a11b-303c4cb986cd\") " pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.264239 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b67f9b82-2a40-46a6-af4f-1439ba842a82-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"b67f9b82-2a40-46a6-af4f-1439ba842a82\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.295234 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dx6zz\" (UniqueName: \"kubernetes.io/projected/b67f9b82-2a40-46a6-af4f-1439ba842a82-kube-api-access-dx6zz\") pod \"nova-cell1-novncproxy-0\" (UID: \"b67f9b82-2a40-46a6-af4f-1439ba842a82\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.301602 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4xzhz\" (UniqueName: \"kubernetes.io/projected/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-kube-api-access-4xzhz\") pod \"dnsmasq-dns-7786bd4bc7-kqvfg\" (UID: \"d01a8a6c-70b5-47ec-a11b-303c4cb986cd\") " pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.308841 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.326816 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.349080 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.393215 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.435003 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.526420 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-q5mm8"] Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.716106 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-q5mm8" event={"ID":"e17c8304-cb0e-4fc5-b97f-17d6b65a4e13","Type":"ContainerStarted","Data":"63a93c888d65f338f6b4f0aa75ea2b7c2e60d1c826ad21928b967bf66ce1647d"} Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.881938 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-pznlh"] Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.883598 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-pznlh" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.886037 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.891379 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-pznlh"] Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.891625 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.969778 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.979328 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccbcaa1b-3eea-47c1-8622-b881afcdaa7d-config-data\") pod \"nova-cell1-conductor-db-sync-pznlh\" (UID: \"ccbcaa1b-3eea-47c1-8622-b881afcdaa7d\") " pod="openstack/nova-cell1-conductor-db-sync-pznlh" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.979396 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ccbcaa1b-3eea-47c1-8622-b881afcdaa7d-scripts\") pod \"nova-cell1-conductor-db-sync-pznlh\" (UID: \"ccbcaa1b-3eea-47c1-8622-b881afcdaa7d\") " pod="openstack/nova-cell1-conductor-db-sync-pznlh" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.979429 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccbcaa1b-3eea-47c1-8622-b881afcdaa7d-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-pznlh\" (UID: \"ccbcaa1b-3eea-47c1-8622-b881afcdaa7d\") " pod="openstack/nova-cell1-conductor-db-sync-pznlh" Nov 23 16:14:14 crc kubenswrapper[5050]: I1123 16:14:14.979477 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fpwmp\" (UniqueName: \"kubernetes.io/projected/ccbcaa1b-3eea-47c1-8622-b881afcdaa7d-kube-api-access-fpwmp\") pod \"nova-cell1-conductor-db-sync-pznlh\" (UID: \"ccbcaa1b-3eea-47c1-8622-b881afcdaa7d\") " pod="openstack/nova-cell1-conductor-db-sync-pznlh" Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.085367 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccbcaa1b-3eea-47c1-8622-b881afcdaa7d-config-data\") pod \"nova-cell1-conductor-db-sync-pznlh\" (UID: \"ccbcaa1b-3eea-47c1-8622-b881afcdaa7d\") " pod="openstack/nova-cell1-conductor-db-sync-pznlh" Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.085428 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ccbcaa1b-3eea-47c1-8622-b881afcdaa7d-scripts\") pod \"nova-cell1-conductor-db-sync-pznlh\" (UID: \"ccbcaa1b-3eea-47c1-8622-b881afcdaa7d\") " pod="openstack/nova-cell1-conductor-db-sync-pznlh" Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.085646 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccbcaa1b-3eea-47c1-8622-b881afcdaa7d-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-pznlh\" (UID: \"ccbcaa1b-3eea-47c1-8622-b881afcdaa7d\") " pod="openstack/nova-cell1-conductor-db-sync-pznlh" Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.085666 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpwmp\" (UniqueName: \"kubernetes.io/projected/ccbcaa1b-3eea-47c1-8622-b881afcdaa7d-kube-api-access-fpwmp\") pod \"nova-cell1-conductor-db-sync-pznlh\" (UID: \"ccbcaa1b-3eea-47c1-8622-b881afcdaa7d\") " pod="openstack/nova-cell1-conductor-db-sync-pznlh" Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.099738 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ccbcaa1b-3eea-47c1-8622-b881afcdaa7d-scripts\") pod \"nova-cell1-conductor-db-sync-pznlh\" (UID: \"ccbcaa1b-3eea-47c1-8622-b881afcdaa7d\") " pod="openstack/nova-cell1-conductor-db-sync-pznlh" Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.107364 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccbcaa1b-3eea-47c1-8622-b881afcdaa7d-config-data\") pod \"nova-cell1-conductor-db-sync-pznlh\" (UID: \"ccbcaa1b-3eea-47c1-8622-b881afcdaa7d\") " pod="openstack/nova-cell1-conductor-db-sync-pznlh" Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.108178 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccbcaa1b-3eea-47c1-8622-b881afcdaa7d-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-pznlh\" (UID: \"ccbcaa1b-3eea-47c1-8622-b881afcdaa7d\") " pod="openstack/nova-cell1-conductor-db-sync-pznlh" Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.109577 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fpwmp\" (UniqueName: \"kubernetes.io/projected/ccbcaa1b-3eea-47c1-8622-b881afcdaa7d-kube-api-access-fpwmp\") pod \"nova-cell1-conductor-db-sync-pznlh\" (UID: \"ccbcaa1b-3eea-47c1-8622-b881afcdaa7d\") " pod="openstack/nova-cell1-conductor-db-sync-pznlh" Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.111243 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.140964 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.208353 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-pznlh" Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.250140 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.265685 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7786bd4bc7-kqvfg"] Nov 23 16:14:15 crc kubenswrapper[5050]: W1123 16:14:15.284826 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd01a8a6c_70b5_47ec_a11b_303c4cb986cd.slice/crio-4e9ad49505d451aef6ba6ce0a02e899969a8530f9dca11d229d495feb1656102 WatchSource:0}: Error finding container 4e9ad49505d451aef6ba6ce0a02e899969a8530f9dca11d229d495feb1656102: Status 404 returned error can't find the container with id 4e9ad49505d451aef6ba6ce0a02e899969a8530f9dca11d229d495feb1656102 Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.737636 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-pznlh"] Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.741295 5050 generic.go:334] "Generic (PLEG): container finished" podID="d01a8a6c-70b5-47ec-a11b-303c4cb986cd" containerID="e2a40ca89884aa8b907acd55d8f67046a5abdb05207b259410bbd46bfa258ce3" exitCode=0 Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.741359 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" event={"ID":"d01a8a6c-70b5-47ec-a11b-303c4cb986cd","Type":"ContainerDied","Data":"e2a40ca89884aa8b907acd55d8f67046a5abdb05207b259410bbd46bfa258ce3"} Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.741389 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" event={"ID":"d01a8a6c-70b5-47ec-a11b-303c4cb986cd","Type":"ContainerStarted","Data":"4e9ad49505d451aef6ba6ce0a02e899969a8530f9dca11d229d495feb1656102"} Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.765369 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"45984aec-f81e-44de-a1f3-cb3dfe1f8d27","Type":"ContainerStarted","Data":"a3fe00a082e84da528b621ea0550508537aa9293496595fd053f0b80402f8937"} Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.765435 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"45984aec-f81e-44de-a1f3-cb3dfe1f8d27","Type":"ContainerStarted","Data":"1a0d22bc7a93a42887f0605017994c5488af242e85f40e55a32aaca8579a2984"} Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.784383 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"db3e5993-2352-41db-902d-7b646bc63c54","Type":"ContainerStarted","Data":"c0f7133b79b3b894ed9e597032cfdfbbc0e6702826c5040b393756a23c6539ea"} Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.784787 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"db3e5993-2352-41db-902d-7b646bc63c54","Type":"ContainerStarted","Data":"c099cb16b2a6eee564628253af440add7cf392441cdd709b4879c4fb3ba994cc"} Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.797322 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9d185f9e-5f51-4519-8fe2-4700c0b6c256","Type":"ContainerStarted","Data":"4af2f8c17211fe5c9f021e5cc95f8117ceb1f0b6c0132c59df0bd9f71a8b50a2"} Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.797381 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9d185f9e-5f51-4519-8fe2-4700c0b6c256","Type":"ContainerStarted","Data":"7035a08f3f17519852399129d1558b2291be907d5f656114e8d926c79f7d2156"} Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.797397 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9d185f9e-5f51-4519-8fe2-4700c0b6c256","Type":"ContainerStarted","Data":"e3081f79213a688f789dd3fe90a85133a9162aa881549f85137ade811551f0f4"} Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.800097 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b67f9b82-2a40-46a6-af4f-1439ba842a82","Type":"ContainerStarted","Data":"4a2b66faf2c54e742a5a9153f39b99cc972ff08e36dec093cf99963511f79456"} Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.800138 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b67f9b82-2a40-46a6-af4f-1439ba842a82","Type":"ContainerStarted","Data":"dc4dcc025151a7e51a2cf78eb1886c71d14779f1d7da3c7e65d9b8d09102dc4a"} Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.801604 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-q5mm8" event={"ID":"e17c8304-cb0e-4fc5-b97f-17d6b65a4e13","Type":"ContainerStarted","Data":"05fc1817af6846b0b98226a6fe360837816f34f6615eeae2e10dc0b4828f6a8d"} Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.840495 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.840469454 podStartE2EDuration="2.840469454s" podCreationTimestamp="2025-11-23 16:14:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:14:15.794841746 +0000 UTC m=+5550.961838231" watchObservedRunningTime="2025-11-23 16:14:15.840469454 +0000 UTC m=+5551.007465939" Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.845460 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.845430944 podStartE2EDuration="2.845430944s" podCreationTimestamp="2025-11-23 16:14:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:14:15.813227515 +0000 UTC m=+5550.980224000" watchObservedRunningTime="2025-11-23 16:14:15.845430944 +0000 UTC m=+5551.012427429" Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.892823 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.892797081 podStartE2EDuration="2.892797081s" podCreationTimestamp="2025-11-23 16:14:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:14:15.834626509 +0000 UTC m=+5551.001622994" watchObservedRunningTime="2025-11-23 16:14:15.892797081 +0000 UTC m=+5551.059793566" Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.896322 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-q5mm8" podStartSLOduration=2.8963107 podStartE2EDuration="2.8963107s" podCreationTimestamp="2025-11-23 16:14:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:14:15.855810287 +0000 UTC m=+5551.022806772" watchObservedRunningTime="2025-11-23 16:14:15.8963107 +0000 UTC m=+5551.063307185" Nov 23 16:14:15 crc kubenswrapper[5050]: I1123 16:14:15.913737 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.913693561 podStartE2EDuration="2.913693561s" podCreationTimestamp="2025-11-23 16:14:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:14:15.877058857 +0000 UTC m=+5551.044055342" watchObservedRunningTime="2025-11-23 16:14:15.913693561 +0000 UTC m=+5551.080690046" Nov 23 16:14:16 crc kubenswrapper[5050]: I1123 16:14:16.812412 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-pznlh" event={"ID":"ccbcaa1b-3eea-47c1-8622-b881afcdaa7d","Type":"ContainerStarted","Data":"73fb1d109e600928c168e991d60d8b4d275f6b0c7b32501b0b32357b0eae60d5"} Nov 23 16:14:16 crc kubenswrapper[5050]: I1123 16:14:16.812761 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-pznlh" event={"ID":"ccbcaa1b-3eea-47c1-8622-b881afcdaa7d","Type":"ContainerStarted","Data":"39473c03ed277fd5b6d0911007b64512405faf8b6725f992258d0a37cf1e0d30"} Nov 23 16:14:16 crc kubenswrapper[5050]: I1123 16:14:16.819417 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" event={"ID":"d01a8a6c-70b5-47ec-a11b-303c4cb986cd","Type":"ContainerStarted","Data":"6cd0583fd5de83374a868ba75fe75fbfe9a69bf8ab41e6a6bf694bf3c2227515"} Nov 23 16:14:16 crc kubenswrapper[5050]: I1123 16:14:16.820398 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" Nov 23 16:14:16 crc kubenswrapper[5050]: I1123 16:14:16.824491 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"db3e5993-2352-41db-902d-7b646bc63c54","Type":"ContainerStarted","Data":"cdb6a0a782c32ebe569131cd812b79b91e886414c8238139c2b579b09a61c60e"} Nov 23 16:14:16 crc kubenswrapper[5050]: I1123 16:14:16.836799 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-pznlh" podStartSLOduration=2.83676693 podStartE2EDuration="2.83676693s" podCreationTimestamp="2025-11-23 16:14:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:14:16.832219811 +0000 UTC m=+5551.999216326" watchObservedRunningTime="2025-11-23 16:14:16.83676693 +0000 UTC m=+5552.003763415" Nov 23 16:14:16 crc kubenswrapper[5050]: I1123 16:14:16.869084 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" podStartSLOduration=2.869053871 podStartE2EDuration="2.869053871s" podCreationTimestamp="2025-11-23 16:14:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:14:16.851033192 +0000 UTC m=+5552.018029677" watchObservedRunningTime="2025-11-23 16:14:16.869053871 +0000 UTC m=+5552.036050356" Nov 23 16:14:18 crc kubenswrapper[5050]: I1123 16:14:18.852489 5050 generic.go:334] "Generic (PLEG): container finished" podID="ccbcaa1b-3eea-47c1-8622-b881afcdaa7d" containerID="73fb1d109e600928c168e991d60d8b4d275f6b0c7b32501b0b32357b0eae60d5" exitCode=0 Nov 23 16:14:18 crc kubenswrapper[5050]: I1123 16:14:18.852567 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-pznlh" event={"ID":"ccbcaa1b-3eea-47c1-8622-b881afcdaa7d","Type":"ContainerDied","Data":"73fb1d109e600928c168e991d60d8b4d275f6b0c7b32501b0b32357b0eae60d5"} Nov 23 16:14:19 crc kubenswrapper[5050]: I1123 16:14:19.328522 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 23 16:14:19 crc kubenswrapper[5050]: I1123 16:14:19.349659 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 23 16:14:19 crc kubenswrapper[5050]: I1123 16:14:19.349715 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 23 16:14:19 crc kubenswrapper[5050]: I1123 16:14:19.394181 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:14:20 crc kubenswrapper[5050]: I1123 16:14:20.315642 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-pznlh" Nov 23 16:14:20 crc kubenswrapper[5050]: I1123 16:14:20.421543 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fpwmp\" (UniqueName: \"kubernetes.io/projected/ccbcaa1b-3eea-47c1-8622-b881afcdaa7d-kube-api-access-fpwmp\") pod \"ccbcaa1b-3eea-47c1-8622-b881afcdaa7d\" (UID: \"ccbcaa1b-3eea-47c1-8622-b881afcdaa7d\") " Nov 23 16:14:20 crc kubenswrapper[5050]: I1123 16:14:20.422344 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ccbcaa1b-3eea-47c1-8622-b881afcdaa7d-scripts\") pod \"ccbcaa1b-3eea-47c1-8622-b881afcdaa7d\" (UID: \"ccbcaa1b-3eea-47c1-8622-b881afcdaa7d\") " Nov 23 16:14:20 crc kubenswrapper[5050]: I1123 16:14:20.422431 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccbcaa1b-3eea-47c1-8622-b881afcdaa7d-config-data\") pod \"ccbcaa1b-3eea-47c1-8622-b881afcdaa7d\" (UID: \"ccbcaa1b-3eea-47c1-8622-b881afcdaa7d\") " Nov 23 16:14:20 crc kubenswrapper[5050]: I1123 16:14:20.422612 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccbcaa1b-3eea-47c1-8622-b881afcdaa7d-combined-ca-bundle\") pod \"ccbcaa1b-3eea-47c1-8622-b881afcdaa7d\" (UID: \"ccbcaa1b-3eea-47c1-8622-b881afcdaa7d\") " Nov 23 16:14:20 crc kubenswrapper[5050]: I1123 16:14:20.432192 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ccbcaa1b-3eea-47c1-8622-b881afcdaa7d-scripts" (OuterVolumeSpecName: "scripts") pod "ccbcaa1b-3eea-47c1-8622-b881afcdaa7d" (UID: "ccbcaa1b-3eea-47c1-8622-b881afcdaa7d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:14:20 crc kubenswrapper[5050]: I1123 16:14:20.432635 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ccbcaa1b-3eea-47c1-8622-b881afcdaa7d-kube-api-access-fpwmp" (OuterVolumeSpecName: "kube-api-access-fpwmp") pod "ccbcaa1b-3eea-47c1-8622-b881afcdaa7d" (UID: "ccbcaa1b-3eea-47c1-8622-b881afcdaa7d"). InnerVolumeSpecName "kube-api-access-fpwmp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:14:20 crc kubenswrapper[5050]: I1123 16:14:20.480094 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ccbcaa1b-3eea-47c1-8622-b881afcdaa7d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ccbcaa1b-3eea-47c1-8622-b881afcdaa7d" (UID: "ccbcaa1b-3eea-47c1-8622-b881afcdaa7d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:14:20 crc kubenswrapper[5050]: I1123 16:14:20.482158 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ccbcaa1b-3eea-47c1-8622-b881afcdaa7d-config-data" (OuterVolumeSpecName: "config-data") pod "ccbcaa1b-3eea-47c1-8622-b881afcdaa7d" (UID: "ccbcaa1b-3eea-47c1-8622-b881afcdaa7d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:14:20 crc kubenswrapper[5050]: I1123 16:14:20.525093 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccbcaa1b-3eea-47c1-8622-b881afcdaa7d-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:20 crc kubenswrapper[5050]: I1123 16:14:20.525146 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccbcaa1b-3eea-47c1-8622-b881afcdaa7d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:20 crc kubenswrapper[5050]: I1123 16:14:20.525160 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fpwmp\" (UniqueName: \"kubernetes.io/projected/ccbcaa1b-3eea-47c1-8622-b881afcdaa7d-kube-api-access-fpwmp\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:20 crc kubenswrapper[5050]: I1123 16:14:20.525176 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ccbcaa1b-3eea-47c1-8622-b881afcdaa7d-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:20 crc kubenswrapper[5050]: I1123 16:14:20.884335 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-pznlh" event={"ID":"ccbcaa1b-3eea-47c1-8622-b881afcdaa7d","Type":"ContainerDied","Data":"39473c03ed277fd5b6d0911007b64512405faf8b6725f992258d0a37cf1e0d30"} Nov 23 16:14:20 crc kubenswrapper[5050]: I1123 16:14:20.884392 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-pznlh" Nov 23 16:14:20 crc kubenswrapper[5050]: I1123 16:14:20.884408 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="39473c03ed277fd5b6d0911007b64512405faf8b6725f992258d0a37cf1e0d30" Nov 23 16:14:20 crc kubenswrapper[5050]: I1123 16:14:20.887498 5050 generic.go:334] "Generic (PLEG): container finished" podID="e17c8304-cb0e-4fc5-b97f-17d6b65a4e13" containerID="05fc1817af6846b0b98226a6fe360837816f34f6615eeae2e10dc0b4828f6a8d" exitCode=0 Nov 23 16:14:20 crc kubenswrapper[5050]: I1123 16:14:20.887598 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-q5mm8" event={"ID":"e17c8304-cb0e-4fc5-b97f-17d6b65a4e13","Type":"ContainerDied","Data":"05fc1817af6846b0b98226a6fe360837816f34f6615eeae2e10dc0b4828f6a8d"} Nov 23 16:14:21 crc kubenswrapper[5050]: I1123 16:14:21.034782 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 23 16:14:21 crc kubenswrapper[5050]: E1123 16:14:21.035356 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ccbcaa1b-3eea-47c1-8622-b881afcdaa7d" containerName="nova-cell1-conductor-db-sync" Nov 23 16:14:21 crc kubenswrapper[5050]: I1123 16:14:21.035382 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccbcaa1b-3eea-47c1-8622-b881afcdaa7d" containerName="nova-cell1-conductor-db-sync" Nov 23 16:14:21 crc kubenswrapper[5050]: I1123 16:14:21.035622 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="ccbcaa1b-3eea-47c1-8622-b881afcdaa7d" containerName="nova-cell1-conductor-db-sync" Nov 23 16:14:21 crc kubenswrapper[5050]: I1123 16:14:21.036421 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 23 16:14:21 crc kubenswrapper[5050]: I1123 16:14:21.046155 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 23 16:14:21 crc kubenswrapper[5050]: I1123 16:14:21.048057 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 23 16:14:21 crc kubenswrapper[5050]: I1123 16:14:21.138773 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8269f32-ce74-4977-aa1b-e7324d6d9935-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"b8269f32-ce74-4977-aa1b-e7324d6d9935\") " pod="openstack/nova-cell1-conductor-0" Nov 23 16:14:21 crc kubenswrapper[5050]: I1123 16:14:21.138935 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8269f32-ce74-4977-aa1b-e7324d6d9935-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"b8269f32-ce74-4977-aa1b-e7324d6d9935\") " pod="openstack/nova-cell1-conductor-0" Nov 23 16:14:21 crc kubenswrapper[5050]: I1123 16:14:21.138997 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjqdm\" (UniqueName: \"kubernetes.io/projected/b8269f32-ce74-4977-aa1b-e7324d6d9935-kube-api-access-gjqdm\") pod \"nova-cell1-conductor-0\" (UID: \"b8269f32-ce74-4977-aa1b-e7324d6d9935\") " pod="openstack/nova-cell1-conductor-0" Nov 23 16:14:21 crc kubenswrapper[5050]: I1123 16:14:21.243141 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8269f32-ce74-4977-aa1b-e7324d6d9935-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"b8269f32-ce74-4977-aa1b-e7324d6d9935\") " pod="openstack/nova-cell1-conductor-0" Nov 23 16:14:21 crc kubenswrapper[5050]: I1123 16:14:21.243318 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjqdm\" (UniqueName: \"kubernetes.io/projected/b8269f32-ce74-4977-aa1b-e7324d6d9935-kube-api-access-gjqdm\") pod \"nova-cell1-conductor-0\" (UID: \"b8269f32-ce74-4977-aa1b-e7324d6d9935\") " pod="openstack/nova-cell1-conductor-0" Nov 23 16:14:21 crc kubenswrapper[5050]: I1123 16:14:21.244262 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8269f32-ce74-4977-aa1b-e7324d6d9935-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"b8269f32-ce74-4977-aa1b-e7324d6d9935\") " pod="openstack/nova-cell1-conductor-0" Nov 23 16:14:21 crc kubenswrapper[5050]: I1123 16:14:21.250182 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8269f32-ce74-4977-aa1b-e7324d6d9935-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"b8269f32-ce74-4977-aa1b-e7324d6d9935\") " pod="openstack/nova-cell1-conductor-0" Nov 23 16:14:21 crc kubenswrapper[5050]: I1123 16:14:21.250971 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8269f32-ce74-4977-aa1b-e7324d6d9935-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"b8269f32-ce74-4977-aa1b-e7324d6d9935\") " pod="openstack/nova-cell1-conductor-0" Nov 23 16:14:21 crc kubenswrapper[5050]: I1123 16:14:21.270322 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjqdm\" (UniqueName: \"kubernetes.io/projected/b8269f32-ce74-4977-aa1b-e7324d6d9935-kube-api-access-gjqdm\") pod \"nova-cell1-conductor-0\" (UID: \"b8269f32-ce74-4977-aa1b-e7324d6d9935\") " pod="openstack/nova-cell1-conductor-0" Nov 23 16:14:21 crc kubenswrapper[5050]: I1123 16:14:21.359559 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 23 16:14:21 crc kubenswrapper[5050]: I1123 16:14:21.906337 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 23 16:14:21 crc kubenswrapper[5050]: W1123 16:14:21.907179 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb8269f32_ce74_4977_aa1b_e7324d6d9935.slice/crio-b75dd840f0c346435d79616b1598b40d84b456e8a7976d025f3e9bd6602b1897 WatchSource:0}: Error finding container b75dd840f0c346435d79616b1598b40d84b456e8a7976d025f3e9bd6602b1897: Status 404 returned error can't find the container with id b75dd840f0c346435d79616b1598b40d84b456e8a7976d025f3e9bd6602b1897 Nov 23 16:14:22 crc kubenswrapper[5050]: I1123 16:14:22.210908 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-q5mm8" Nov 23 16:14:22 crc kubenswrapper[5050]: I1123 16:14:22.268358 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e17c8304-cb0e-4fc5-b97f-17d6b65a4e13-combined-ca-bundle\") pod \"e17c8304-cb0e-4fc5-b97f-17d6b65a4e13\" (UID: \"e17c8304-cb0e-4fc5-b97f-17d6b65a4e13\") " Nov 23 16:14:22 crc kubenswrapper[5050]: I1123 16:14:22.268801 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e17c8304-cb0e-4fc5-b97f-17d6b65a4e13-config-data\") pod \"e17c8304-cb0e-4fc5-b97f-17d6b65a4e13\" (UID: \"e17c8304-cb0e-4fc5-b97f-17d6b65a4e13\") " Nov 23 16:14:22 crc kubenswrapper[5050]: I1123 16:14:22.269031 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-glp2m\" (UniqueName: \"kubernetes.io/projected/e17c8304-cb0e-4fc5-b97f-17d6b65a4e13-kube-api-access-glp2m\") pod \"e17c8304-cb0e-4fc5-b97f-17d6b65a4e13\" (UID: \"e17c8304-cb0e-4fc5-b97f-17d6b65a4e13\") " Nov 23 16:14:22 crc kubenswrapper[5050]: I1123 16:14:22.269298 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e17c8304-cb0e-4fc5-b97f-17d6b65a4e13-scripts\") pod \"e17c8304-cb0e-4fc5-b97f-17d6b65a4e13\" (UID: \"e17c8304-cb0e-4fc5-b97f-17d6b65a4e13\") " Nov 23 16:14:22 crc kubenswrapper[5050]: I1123 16:14:22.282506 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e17c8304-cb0e-4fc5-b97f-17d6b65a4e13-scripts" (OuterVolumeSpecName: "scripts") pod "e17c8304-cb0e-4fc5-b97f-17d6b65a4e13" (UID: "e17c8304-cb0e-4fc5-b97f-17d6b65a4e13"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:14:22 crc kubenswrapper[5050]: I1123 16:14:22.287267 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e17c8304-cb0e-4fc5-b97f-17d6b65a4e13-kube-api-access-glp2m" (OuterVolumeSpecName: "kube-api-access-glp2m") pod "e17c8304-cb0e-4fc5-b97f-17d6b65a4e13" (UID: "e17c8304-cb0e-4fc5-b97f-17d6b65a4e13"). InnerVolumeSpecName "kube-api-access-glp2m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:14:22 crc kubenswrapper[5050]: I1123 16:14:22.300705 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e17c8304-cb0e-4fc5-b97f-17d6b65a4e13-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e17c8304-cb0e-4fc5-b97f-17d6b65a4e13" (UID: "e17c8304-cb0e-4fc5-b97f-17d6b65a4e13"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:14:22 crc kubenswrapper[5050]: I1123 16:14:22.301623 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e17c8304-cb0e-4fc5-b97f-17d6b65a4e13-config-data" (OuterVolumeSpecName: "config-data") pod "e17c8304-cb0e-4fc5-b97f-17d6b65a4e13" (UID: "e17c8304-cb0e-4fc5-b97f-17d6b65a4e13"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:14:22 crc kubenswrapper[5050]: I1123 16:14:22.372555 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e17c8304-cb0e-4fc5-b97f-17d6b65a4e13-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:22 crc kubenswrapper[5050]: I1123 16:14:22.372599 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e17c8304-cb0e-4fc5-b97f-17d6b65a4e13-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:22 crc kubenswrapper[5050]: I1123 16:14:22.372613 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e17c8304-cb0e-4fc5-b97f-17d6b65a4e13-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:22 crc kubenswrapper[5050]: I1123 16:14:22.372624 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-glp2m\" (UniqueName: \"kubernetes.io/projected/e17c8304-cb0e-4fc5-b97f-17d6b65a4e13-kube-api-access-glp2m\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:22 crc kubenswrapper[5050]: I1123 16:14:22.922619 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-q5mm8" Nov 23 16:14:22 crc kubenswrapper[5050]: I1123 16:14:22.922613 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-q5mm8" event={"ID":"e17c8304-cb0e-4fc5-b97f-17d6b65a4e13","Type":"ContainerDied","Data":"63a93c888d65f338f6b4f0aa75ea2b7c2e60d1c826ad21928b967bf66ce1647d"} Nov 23 16:14:22 crc kubenswrapper[5050]: I1123 16:14:22.923250 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="63a93c888d65f338f6b4f0aa75ea2b7c2e60d1c826ad21928b967bf66ce1647d" Nov 23 16:14:22 crc kubenswrapper[5050]: I1123 16:14:22.927064 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"b8269f32-ce74-4977-aa1b-e7324d6d9935","Type":"ContainerStarted","Data":"9ab652d7f87e93ebb30e4dff16ee739ee0e999fbf81494e2affcaf7c5b2080a0"} Nov 23 16:14:22 crc kubenswrapper[5050]: I1123 16:14:22.927135 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"b8269f32-ce74-4977-aa1b-e7324d6d9935","Type":"ContainerStarted","Data":"b75dd840f0c346435d79616b1598b40d84b456e8a7976d025f3e9bd6602b1897"} Nov 23 16:14:22 crc kubenswrapper[5050]: I1123 16:14:22.927794 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 23 16:14:22 crc kubenswrapper[5050]: I1123 16:14:22.969010 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.968973981 podStartE2EDuration="2.968973981s" podCreationTimestamp="2025-11-23 16:14:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:14:22.96609548 +0000 UTC m=+5558.133092045" watchObservedRunningTime="2025-11-23 16:14:22.968973981 +0000 UTC m=+5558.135970506" Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.125741 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.126133 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="9d185f9e-5f51-4519-8fe2-4700c0b6c256" containerName="nova-api-log" containerID="cri-o://7035a08f3f17519852399129d1558b2291be907d5f656114e8d926c79f7d2156" gracePeriod=30 Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.126380 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="9d185f9e-5f51-4519-8fe2-4700c0b6c256" containerName="nova-api-api" containerID="cri-o://4af2f8c17211fe5c9f021e5cc95f8117ceb1f0b6c0132c59df0bd9f71a8b50a2" gracePeriod=30 Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.145912 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.147291 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="45984aec-f81e-44de-a1f3-cb3dfe1f8d27" containerName="nova-scheduler-scheduler" containerID="cri-o://a3fe00a082e84da528b621ea0550508537aa9293496595fd053f0b80402f8937" gracePeriod=30 Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.174313 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.174653 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="db3e5993-2352-41db-902d-7b646bc63c54" containerName="nova-metadata-log" containerID="cri-o://c0f7133b79b3b894ed9e597032cfdfbbc0e6702826c5040b393756a23c6539ea" gracePeriod=30 Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.175031 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="db3e5993-2352-41db-902d-7b646bc63c54" containerName="nova-metadata-metadata" containerID="cri-o://cdb6a0a782c32ebe569131cd812b79b91e886414c8238139c2b579b09a61c60e" gracePeriod=30 Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.830485 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.837377 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.917634 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db3e5993-2352-41db-902d-7b646bc63c54-config-data\") pod \"db3e5993-2352-41db-902d-7b646bc63c54\" (UID: \"db3e5993-2352-41db-902d-7b646bc63c54\") " Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.917746 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d185f9e-5f51-4519-8fe2-4700c0b6c256-combined-ca-bundle\") pod \"9d185f9e-5f51-4519-8fe2-4700c0b6c256\" (UID: \"9d185f9e-5f51-4519-8fe2-4700c0b6c256\") " Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.917788 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d185f9e-5f51-4519-8fe2-4700c0b6c256-logs\") pod \"9d185f9e-5f51-4519-8fe2-4700c0b6c256\" (UID: \"9d185f9e-5f51-4519-8fe2-4700c0b6c256\") " Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.917840 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qh5ln\" (UniqueName: \"kubernetes.io/projected/db3e5993-2352-41db-902d-7b646bc63c54-kube-api-access-qh5ln\") pod \"db3e5993-2352-41db-902d-7b646bc63c54\" (UID: \"db3e5993-2352-41db-902d-7b646bc63c54\") " Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.917966 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cnjf9\" (UniqueName: \"kubernetes.io/projected/9d185f9e-5f51-4519-8fe2-4700c0b6c256-kube-api-access-cnjf9\") pod \"9d185f9e-5f51-4519-8fe2-4700c0b6c256\" (UID: \"9d185f9e-5f51-4519-8fe2-4700c0b6c256\") " Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.918012 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d185f9e-5f51-4519-8fe2-4700c0b6c256-config-data\") pod \"9d185f9e-5f51-4519-8fe2-4700c0b6c256\" (UID: \"9d185f9e-5f51-4519-8fe2-4700c0b6c256\") " Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.918150 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/db3e5993-2352-41db-902d-7b646bc63c54-logs\") pod \"db3e5993-2352-41db-902d-7b646bc63c54\" (UID: \"db3e5993-2352-41db-902d-7b646bc63c54\") " Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.918217 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db3e5993-2352-41db-902d-7b646bc63c54-combined-ca-bundle\") pod \"db3e5993-2352-41db-902d-7b646bc63c54\" (UID: \"db3e5993-2352-41db-902d-7b646bc63c54\") " Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.918839 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d185f9e-5f51-4519-8fe2-4700c0b6c256-logs" (OuterVolumeSpecName: "logs") pod "9d185f9e-5f51-4519-8fe2-4700c0b6c256" (UID: "9d185f9e-5f51-4519-8fe2-4700c0b6c256"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.919056 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/db3e5993-2352-41db-902d-7b646bc63c54-logs" (OuterVolumeSpecName: "logs") pod "db3e5993-2352-41db-902d-7b646bc63c54" (UID: "db3e5993-2352-41db-902d-7b646bc63c54"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.924641 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db3e5993-2352-41db-902d-7b646bc63c54-kube-api-access-qh5ln" (OuterVolumeSpecName: "kube-api-access-qh5ln") pod "db3e5993-2352-41db-902d-7b646bc63c54" (UID: "db3e5993-2352-41db-902d-7b646bc63c54"). InnerVolumeSpecName "kube-api-access-qh5ln". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.926600 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d185f9e-5f51-4519-8fe2-4700c0b6c256-kube-api-access-cnjf9" (OuterVolumeSpecName: "kube-api-access-cnjf9") pod "9d185f9e-5f51-4519-8fe2-4700c0b6c256" (UID: "9d185f9e-5f51-4519-8fe2-4700c0b6c256"). InnerVolumeSpecName "kube-api-access-cnjf9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.942868 5050 generic.go:334] "Generic (PLEG): container finished" podID="db3e5993-2352-41db-902d-7b646bc63c54" containerID="cdb6a0a782c32ebe569131cd812b79b91e886414c8238139c2b579b09a61c60e" exitCode=0 Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.942906 5050 generic.go:334] "Generic (PLEG): container finished" podID="db3e5993-2352-41db-902d-7b646bc63c54" containerID="c0f7133b79b3b894ed9e597032cfdfbbc0e6702826c5040b393756a23c6539ea" exitCode=143 Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.942989 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"db3e5993-2352-41db-902d-7b646bc63c54","Type":"ContainerDied","Data":"cdb6a0a782c32ebe569131cd812b79b91e886414c8238139c2b579b09a61c60e"} Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.943027 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"db3e5993-2352-41db-902d-7b646bc63c54","Type":"ContainerDied","Data":"c0f7133b79b3b894ed9e597032cfdfbbc0e6702826c5040b393756a23c6539ea"} Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.943042 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"db3e5993-2352-41db-902d-7b646bc63c54","Type":"ContainerDied","Data":"c099cb16b2a6eee564628253af440add7cf392441cdd709b4879c4fb3ba994cc"} Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.943082 5050 scope.go:117] "RemoveContainer" containerID="cdb6a0a782c32ebe569131cd812b79b91e886414c8238139c2b579b09a61c60e" Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.943195 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.946862 5050 generic.go:334] "Generic (PLEG): container finished" podID="9d185f9e-5f51-4519-8fe2-4700c0b6c256" containerID="4af2f8c17211fe5c9f021e5cc95f8117ceb1f0b6c0132c59df0bd9f71a8b50a2" exitCode=0 Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.946894 5050 generic.go:334] "Generic (PLEG): container finished" podID="9d185f9e-5f51-4519-8fe2-4700c0b6c256" containerID="7035a08f3f17519852399129d1558b2291be907d5f656114e8d926c79f7d2156" exitCode=143 Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.947213 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.947589 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9d185f9e-5f51-4519-8fe2-4700c0b6c256","Type":"ContainerDied","Data":"4af2f8c17211fe5c9f021e5cc95f8117ceb1f0b6c0132c59df0bd9f71a8b50a2"} Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.947717 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9d185f9e-5f51-4519-8fe2-4700c0b6c256","Type":"ContainerDied","Data":"7035a08f3f17519852399129d1558b2291be907d5f656114e8d926c79f7d2156"} Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.947794 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9d185f9e-5f51-4519-8fe2-4700c0b6c256","Type":"ContainerDied","Data":"e3081f79213a688f789dd3fe90a85133a9162aa881549f85137ade811551f0f4"} Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.951149 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db3e5993-2352-41db-902d-7b646bc63c54-config-data" (OuterVolumeSpecName: "config-data") pod "db3e5993-2352-41db-902d-7b646bc63c54" (UID: "db3e5993-2352-41db-902d-7b646bc63c54"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.953110 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d185f9e-5f51-4519-8fe2-4700c0b6c256-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9d185f9e-5f51-4519-8fe2-4700c0b6c256" (UID: "9d185f9e-5f51-4519-8fe2-4700c0b6c256"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.957244 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db3e5993-2352-41db-902d-7b646bc63c54-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "db3e5993-2352-41db-902d-7b646bc63c54" (UID: "db3e5993-2352-41db-902d-7b646bc63c54"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.957688 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d185f9e-5f51-4519-8fe2-4700c0b6c256-config-data" (OuterVolumeSpecName: "config-data") pod "9d185f9e-5f51-4519-8fe2-4700c0b6c256" (UID: "9d185f9e-5f51-4519-8fe2-4700c0b6c256"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.967948 5050 scope.go:117] "RemoveContainer" containerID="c0f7133b79b3b894ed9e597032cfdfbbc0e6702826c5040b393756a23c6539ea" Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.993058 5050 scope.go:117] "RemoveContainer" containerID="cdb6a0a782c32ebe569131cd812b79b91e886414c8238139c2b579b09a61c60e" Nov 23 16:14:23 crc kubenswrapper[5050]: E1123 16:14:23.993768 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cdb6a0a782c32ebe569131cd812b79b91e886414c8238139c2b579b09a61c60e\": container with ID starting with cdb6a0a782c32ebe569131cd812b79b91e886414c8238139c2b579b09a61c60e not found: ID does not exist" containerID="cdb6a0a782c32ebe569131cd812b79b91e886414c8238139c2b579b09a61c60e" Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.993837 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cdb6a0a782c32ebe569131cd812b79b91e886414c8238139c2b579b09a61c60e"} err="failed to get container status \"cdb6a0a782c32ebe569131cd812b79b91e886414c8238139c2b579b09a61c60e\": rpc error: code = NotFound desc = could not find container \"cdb6a0a782c32ebe569131cd812b79b91e886414c8238139c2b579b09a61c60e\": container with ID starting with cdb6a0a782c32ebe569131cd812b79b91e886414c8238139c2b579b09a61c60e not found: ID does not exist" Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.993874 5050 scope.go:117] "RemoveContainer" containerID="c0f7133b79b3b894ed9e597032cfdfbbc0e6702826c5040b393756a23c6539ea" Nov 23 16:14:23 crc kubenswrapper[5050]: E1123 16:14:23.994359 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c0f7133b79b3b894ed9e597032cfdfbbc0e6702826c5040b393756a23c6539ea\": container with ID starting with c0f7133b79b3b894ed9e597032cfdfbbc0e6702826c5040b393756a23c6539ea not found: ID does not exist" containerID="c0f7133b79b3b894ed9e597032cfdfbbc0e6702826c5040b393756a23c6539ea" Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.994400 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0f7133b79b3b894ed9e597032cfdfbbc0e6702826c5040b393756a23c6539ea"} err="failed to get container status \"c0f7133b79b3b894ed9e597032cfdfbbc0e6702826c5040b393756a23c6539ea\": rpc error: code = NotFound desc = could not find container \"c0f7133b79b3b894ed9e597032cfdfbbc0e6702826c5040b393756a23c6539ea\": container with ID starting with c0f7133b79b3b894ed9e597032cfdfbbc0e6702826c5040b393756a23c6539ea not found: ID does not exist" Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.994431 5050 scope.go:117] "RemoveContainer" containerID="cdb6a0a782c32ebe569131cd812b79b91e886414c8238139c2b579b09a61c60e" Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.994791 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cdb6a0a782c32ebe569131cd812b79b91e886414c8238139c2b579b09a61c60e"} err="failed to get container status \"cdb6a0a782c32ebe569131cd812b79b91e886414c8238139c2b579b09a61c60e\": rpc error: code = NotFound desc = could not find container \"cdb6a0a782c32ebe569131cd812b79b91e886414c8238139c2b579b09a61c60e\": container with ID starting with cdb6a0a782c32ebe569131cd812b79b91e886414c8238139c2b579b09a61c60e not found: ID does not exist" Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.994812 5050 scope.go:117] "RemoveContainer" containerID="c0f7133b79b3b894ed9e597032cfdfbbc0e6702826c5040b393756a23c6539ea" Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.995145 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0f7133b79b3b894ed9e597032cfdfbbc0e6702826c5040b393756a23c6539ea"} err="failed to get container status \"c0f7133b79b3b894ed9e597032cfdfbbc0e6702826c5040b393756a23c6539ea\": rpc error: code = NotFound desc = could not find container \"c0f7133b79b3b894ed9e597032cfdfbbc0e6702826c5040b393756a23c6539ea\": container with ID starting with c0f7133b79b3b894ed9e597032cfdfbbc0e6702826c5040b393756a23c6539ea not found: ID does not exist" Nov 23 16:14:23 crc kubenswrapper[5050]: I1123 16:14:23.995203 5050 scope.go:117] "RemoveContainer" containerID="4af2f8c17211fe5c9f021e5cc95f8117ceb1f0b6c0132c59df0bd9f71a8b50a2" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.018985 5050 scope.go:117] "RemoveContainer" containerID="7035a08f3f17519852399129d1558b2291be907d5f656114e8d926c79f7d2156" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.020261 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cnjf9\" (UniqueName: \"kubernetes.io/projected/9d185f9e-5f51-4519-8fe2-4700c0b6c256-kube-api-access-cnjf9\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.020297 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d185f9e-5f51-4519-8fe2-4700c0b6c256-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.020310 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/db3e5993-2352-41db-902d-7b646bc63c54-logs\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.020327 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db3e5993-2352-41db-902d-7b646bc63c54-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.020342 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db3e5993-2352-41db-902d-7b646bc63c54-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.020353 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d185f9e-5f51-4519-8fe2-4700c0b6c256-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.020363 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d185f9e-5f51-4519-8fe2-4700c0b6c256-logs\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.020374 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qh5ln\" (UniqueName: \"kubernetes.io/projected/db3e5993-2352-41db-902d-7b646bc63c54-kube-api-access-qh5ln\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.038151 5050 scope.go:117] "RemoveContainer" containerID="4af2f8c17211fe5c9f021e5cc95f8117ceb1f0b6c0132c59df0bd9f71a8b50a2" Nov 23 16:14:24 crc kubenswrapper[5050]: E1123 16:14:24.038719 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4af2f8c17211fe5c9f021e5cc95f8117ceb1f0b6c0132c59df0bd9f71a8b50a2\": container with ID starting with 4af2f8c17211fe5c9f021e5cc95f8117ceb1f0b6c0132c59df0bd9f71a8b50a2 not found: ID does not exist" containerID="4af2f8c17211fe5c9f021e5cc95f8117ceb1f0b6c0132c59df0bd9f71a8b50a2" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.038780 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4af2f8c17211fe5c9f021e5cc95f8117ceb1f0b6c0132c59df0bd9f71a8b50a2"} err="failed to get container status \"4af2f8c17211fe5c9f021e5cc95f8117ceb1f0b6c0132c59df0bd9f71a8b50a2\": rpc error: code = NotFound desc = could not find container \"4af2f8c17211fe5c9f021e5cc95f8117ceb1f0b6c0132c59df0bd9f71a8b50a2\": container with ID starting with 4af2f8c17211fe5c9f021e5cc95f8117ceb1f0b6c0132c59df0bd9f71a8b50a2 not found: ID does not exist" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.038818 5050 scope.go:117] "RemoveContainer" containerID="7035a08f3f17519852399129d1558b2291be907d5f656114e8d926c79f7d2156" Nov 23 16:14:24 crc kubenswrapper[5050]: E1123 16:14:24.039248 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7035a08f3f17519852399129d1558b2291be907d5f656114e8d926c79f7d2156\": container with ID starting with 7035a08f3f17519852399129d1558b2291be907d5f656114e8d926c79f7d2156 not found: ID does not exist" containerID="7035a08f3f17519852399129d1558b2291be907d5f656114e8d926c79f7d2156" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.039295 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7035a08f3f17519852399129d1558b2291be907d5f656114e8d926c79f7d2156"} err="failed to get container status \"7035a08f3f17519852399129d1558b2291be907d5f656114e8d926c79f7d2156\": rpc error: code = NotFound desc = could not find container \"7035a08f3f17519852399129d1558b2291be907d5f656114e8d926c79f7d2156\": container with ID starting with 7035a08f3f17519852399129d1558b2291be907d5f656114e8d926c79f7d2156 not found: ID does not exist" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.039311 5050 scope.go:117] "RemoveContainer" containerID="4af2f8c17211fe5c9f021e5cc95f8117ceb1f0b6c0132c59df0bd9f71a8b50a2" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.039681 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4af2f8c17211fe5c9f021e5cc95f8117ceb1f0b6c0132c59df0bd9f71a8b50a2"} err="failed to get container status \"4af2f8c17211fe5c9f021e5cc95f8117ceb1f0b6c0132c59df0bd9f71a8b50a2\": rpc error: code = NotFound desc = could not find container \"4af2f8c17211fe5c9f021e5cc95f8117ceb1f0b6c0132c59df0bd9f71a8b50a2\": container with ID starting with 4af2f8c17211fe5c9f021e5cc95f8117ceb1f0b6c0132c59df0bd9f71a8b50a2 not found: ID does not exist" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.039705 5050 scope.go:117] "RemoveContainer" containerID="7035a08f3f17519852399129d1558b2291be907d5f656114e8d926c79f7d2156" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.040019 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7035a08f3f17519852399129d1558b2291be907d5f656114e8d926c79f7d2156"} err="failed to get container status \"7035a08f3f17519852399129d1558b2291be907d5f656114e8d926c79f7d2156\": rpc error: code = NotFound desc = could not find container \"7035a08f3f17519852399129d1558b2291be907d5f656114e8d926c79f7d2156\": container with ID starting with 7035a08f3f17519852399129d1558b2291be907d5f656114e8d926c79f7d2156 not found: ID does not exist" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.302711 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.317517 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.325270 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.339761 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.353522 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 23 16:14:24 crc kubenswrapper[5050]: E1123 16:14:24.354163 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e17c8304-cb0e-4fc5-b97f-17d6b65a4e13" containerName="nova-manage" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.354187 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e17c8304-cb0e-4fc5-b97f-17d6b65a4e13" containerName="nova-manage" Nov 23 16:14:24 crc kubenswrapper[5050]: E1123 16:14:24.354214 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db3e5993-2352-41db-902d-7b646bc63c54" containerName="nova-metadata-log" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.354223 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="db3e5993-2352-41db-902d-7b646bc63c54" containerName="nova-metadata-log" Nov 23 16:14:24 crc kubenswrapper[5050]: E1123 16:14:24.354252 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d185f9e-5f51-4519-8fe2-4700c0b6c256" containerName="nova-api-log" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.354261 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d185f9e-5f51-4519-8fe2-4700c0b6c256" containerName="nova-api-log" Nov 23 16:14:24 crc kubenswrapper[5050]: E1123 16:14:24.354279 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d185f9e-5f51-4519-8fe2-4700c0b6c256" containerName="nova-api-api" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.354287 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d185f9e-5f51-4519-8fe2-4700c0b6c256" containerName="nova-api-api" Nov 23 16:14:24 crc kubenswrapper[5050]: E1123 16:14:24.354299 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db3e5993-2352-41db-902d-7b646bc63c54" containerName="nova-metadata-metadata" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.354306 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="db3e5993-2352-41db-902d-7b646bc63c54" containerName="nova-metadata-metadata" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.354589 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="db3e5993-2352-41db-902d-7b646bc63c54" containerName="nova-metadata-metadata" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.354613 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="db3e5993-2352-41db-902d-7b646bc63c54" containerName="nova-metadata-log" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.354640 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d185f9e-5f51-4519-8fe2-4700c0b6c256" containerName="nova-api-api" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.354655 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d185f9e-5f51-4519-8fe2-4700c0b6c256" containerName="nova-api-log" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.354672 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="e17c8304-cb0e-4fc5-b97f-17d6b65a4e13" containerName="nova-manage" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.356207 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.360718 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.375526 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.398627 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.402989 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.405663 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.411913 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.418469 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.420555 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.438791 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.456816 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d55bx\" (UniqueName: \"kubernetes.io/projected/c2cb92ab-819c-437a-bbb2-d875552ba38f-kube-api-access-d55bx\") pod \"nova-metadata-0\" (UID: \"c2cb92ab-819c-437a-bbb2-d875552ba38f\") " pod="openstack/nova-metadata-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.456900 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2cb92ab-819c-437a-bbb2-d875552ba38f-config-data\") pod \"nova-metadata-0\" (UID: \"c2cb92ab-819c-437a-bbb2-d875552ba38f\") " pod="openstack/nova-metadata-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.456947 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2cb92ab-819c-437a-bbb2-d875552ba38f-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"c2cb92ab-819c-437a-bbb2-d875552ba38f\") " pod="openstack/nova-metadata-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.457187 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2cb92ab-819c-437a-bbb2-d875552ba38f-logs\") pod \"nova-metadata-0\" (UID: \"c2cb92ab-819c-437a-bbb2-d875552ba38f\") " pod="openstack/nova-metadata-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.504866 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6cf4bbfb7f-v86xr"] Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.505467 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" podUID="515fa021-1f02-423a-91df-2495dbfb5e2f" containerName="dnsmasq-dns" containerID="cri-o://22ee77d4e7cfee7671ec8e47bda4aefe5c23243a37db03375ff410a00cea304e" gracePeriod=10 Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.560217 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e49c51f-a38c-410a-98c1-39d92784d4eb-config-data\") pod \"nova-api-0\" (UID: \"1e49c51f-a38c-410a-98c1-39d92784d4eb\") " pod="openstack/nova-api-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.560334 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2cb92ab-819c-437a-bbb2-d875552ba38f-config-data\") pod \"nova-metadata-0\" (UID: \"c2cb92ab-819c-437a-bbb2-d875552ba38f\") " pod="openstack/nova-metadata-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.560404 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2cb92ab-819c-437a-bbb2-d875552ba38f-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"c2cb92ab-819c-437a-bbb2-d875552ba38f\") " pod="openstack/nova-metadata-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.560437 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e49c51f-a38c-410a-98c1-39d92784d4eb-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1e49c51f-a38c-410a-98c1-39d92784d4eb\") " pod="openstack/nova-api-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.560573 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e49c51f-a38c-410a-98c1-39d92784d4eb-logs\") pod \"nova-api-0\" (UID: \"1e49c51f-a38c-410a-98c1-39d92784d4eb\") " pod="openstack/nova-api-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.560709 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bc8x5\" (UniqueName: \"kubernetes.io/projected/1e49c51f-a38c-410a-98c1-39d92784d4eb-kube-api-access-bc8x5\") pod \"nova-api-0\" (UID: \"1e49c51f-a38c-410a-98c1-39d92784d4eb\") " pod="openstack/nova-api-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.560783 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2cb92ab-819c-437a-bbb2-d875552ba38f-logs\") pod \"nova-metadata-0\" (UID: \"c2cb92ab-819c-437a-bbb2-d875552ba38f\") " pod="openstack/nova-metadata-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.560898 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d55bx\" (UniqueName: \"kubernetes.io/projected/c2cb92ab-819c-437a-bbb2-d875552ba38f-kube-api-access-d55bx\") pod \"nova-metadata-0\" (UID: \"c2cb92ab-819c-437a-bbb2-d875552ba38f\") " pod="openstack/nova-metadata-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.561891 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2cb92ab-819c-437a-bbb2-d875552ba38f-logs\") pod \"nova-metadata-0\" (UID: \"c2cb92ab-819c-437a-bbb2-d875552ba38f\") " pod="openstack/nova-metadata-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.566585 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2cb92ab-819c-437a-bbb2-d875552ba38f-config-data\") pod \"nova-metadata-0\" (UID: \"c2cb92ab-819c-437a-bbb2-d875552ba38f\") " pod="openstack/nova-metadata-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.567085 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2cb92ab-819c-437a-bbb2-d875552ba38f-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"c2cb92ab-819c-437a-bbb2-d875552ba38f\") " pod="openstack/nova-metadata-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.580963 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d55bx\" (UniqueName: \"kubernetes.io/projected/c2cb92ab-819c-437a-bbb2-d875552ba38f-kube-api-access-d55bx\") pod \"nova-metadata-0\" (UID: \"c2cb92ab-819c-437a-bbb2-d875552ba38f\") " pod="openstack/nova-metadata-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.669822 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e49c51f-a38c-410a-98c1-39d92784d4eb-config-data\") pod \"nova-api-0\" (UID: \"1e49c51f-a38c-410a-98c1-39d92784d4eb\") " pod="openstack/nova-api-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.670259 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e49c51f-a38c-410a-98c1-39d92784d4eb-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1e49c51f-a38c-410a-98c1-39d92784d4eb\") " pod="openstack/nova-api-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.670384 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e49c51f-a38c-410a-98c1-39d92784d4eb-logs\") pod \"nova-api-0\" (UID: \"1e49c51f-a38c-410a-98c1-39d92784d4eb\") " pod="openstack/nova-api-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.670520 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bc8x5\" (UniqueName: \"kubernetes.io/projected/1e49c51f-a38c-410a-98c1-39d92784d4eb-kube-api-access-bc8x5\") pod \"nova-api-0\" (UID: \"1e49c51f-a38c-410a-98c1-39d92784d4eb\") " pod="openstack/nova-api-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.674926 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e49c51f-a38c-410a-98c1-39d92784d4eb-logs\") pod \"nova-api-0\" (UID: \"1e49c51f-a38c-410a-98c1-39d92784d4eb\") " pod="openstack/nova-api-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.675742 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e49c51f-a38c-410a-98c1-39d92784d4eb-config-data\") pod \"nova-api-0\" (UID: \"1e49c51f-a38c-410a-98c1-39d92784d4eb\") " pod="openstack/nova-api-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.681979 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e49c51f-a38c-410a-98c1-39d92784d4eb-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1e49c51f-a38c-410a-98c1-39d92784d4eb\") " pod="openstack/nova-api-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.710093 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.727654 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bc8x5\" (UniqueName: \"kubernetes.io/projected/1e49c51f-a38c-410a-98c1-39d92784d4eb-kube-api-access-bc8x5\") pod \"nova-api-0\" (UID: \"1e49c51f-a38c-410a-98c1-39d92784d4eb\") " pod="openstack/nova-api-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.741725 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.816935 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" podUID="515fa021-1f02-423a-91df-2495dbfb5e2f" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.1.47:5353: connect: connection refused" Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.989304 5050 generic.go:334] "Generic (PLEG): container finished" podID="515fa021-1f02-423a-91df-2495dbfb5e2f" containerID="22ee77d4e7cfee7671ec8e47bda4aefe5c23243a37db03375ff410a00cea304e" exitCode=0 Nov 23 16:14:24 crc kubenswrapper[5050]: I1123 16:14:24.989546 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" event={"ID":"515fa021-1f02-423a-91df-2495dbfb5e2f","Type":"ContainerDied","Data":"22ee77d4e7cfee7671ec8e47bda4aefe5c23243a37db03375ff410a00cea304e"} Nov 23 16:14:25 crc kubenswrapper[5050]: I1123 16:14:25.009409 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:14:25 crc kubenswrapper[5050]: I1123 16:14:25.148502 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" Nov 23 16:14:25 crc kubenswrapper[5050]: I1123 16:14:25.287068 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/515fa021-1f02-423a-91df-2495dbfb5e2f-dns-svc\") pod \"515fa021-1f02-423a-91df-2495dbfb5e2f\" (UID: \"515fa021-1f02-423a-91df-2495dbfb5e2f\") " Nov 23 16:14:25 crc kubenswrapper[5050]: I1123 16:14:25.287183 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/515fa021-1f02-423a-91df-2495dbfb5e2f-ovsdbserver-sb\") pod \"515fa021-1f02-423a-91df-2495dbfb5e2f\" (UID: \"515fa021-1f02-423a-91df-2495dbfb5e2f\") " Nov 23 16:14:25 crc kubenswrapper[5050]: I1123 16:14:25.287318 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/515fa021-1f02-423a-91df-2495dbfb5e2f-config\") pod \"515fa021-1f02-423a-91df-2495dbfb5e2f\" (UID: \"515fa021-1f02-423a-91df-2495dbfb5e2f\") " Nov 23 16:14:25 crc kubenswrapper[5050]: I1123 16:14:25.287373 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/515fa021-1f02-423a-91df-2495dbfb5e2f-ovsdbserver-nb\") pod \"515fa021-1f02-423a-91df-2495dbfb5e2f\" (UID: \"515fa021-1f02-423a-91df-2495dbfb5e2f\") " Nov 23 16:14:25 crc kubenswrapper[5050]: I1123 16:14:25.287410 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-87ns8\" (UniqueName: \"kubernetes.io/projected/515fa021-1f02-423a-91df-2495dbfb5e2f-kube-api-access-87ns8\") pod \"515fa021-1f02-423a-91df-2495dbfb5e2f\" (UID: \"515fa021-1f02-423a-91df-2495dbfb5e2f\") " Nov 23 16:14:25 crc kubenswrapper[5050]: I1123 16:14:25.296345 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/515fa021-1f02-423a-91df-2495dbfb5e2f-kube-api-access-87ns8" (OuterVolumeSpecName: "kube-api-access-87ns8") pod "515fa021-1f02-423a-91df-2495dbfb5e2f" (UID: "515fa021-1f02-423a-91df-2495dbfb5e2f"). InnerVolumeSpecName "kube-api-access-87ns8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:14:25 crc kubenswrapper[5050]: I1123 16:14:25.354528 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/515fa021-1f02-423a-91df-2495dbfb5e2f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "515fa021-1f02-423a-91df-2495dbfb5e2f" (UID: "515fa021-1f02-423a-91df-2495dbfb5e2f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:14:25 crc kubenswrapper[5050]: I1123 16:14:25.374519 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 23 16:14:25 crc kubenswrapper[5050]: I1123 16:14:25.378258 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/515fa021-1f02-423a-91df-2495dbfb5e2f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "515fa021-1f02-423a-91df-2495dbfb5e2f" (UID: "515fa021-1f02-423a-91df-2495dbfb5e2f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:14:25 crc kubenswrapper[5050]: I1123 16:14:25.383539 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/515fa021-1f02-423a-91df-2495dbfb5e2f-config" (OuterVolumeSpecName: "config") pod "515fa021-1f02-423a-91df-2495dbfb5e2f" (UID: "515fa021-1f02-423a-91df-2495dbfb5e2f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:14:25 crc kubenswrapper[5050]: W1123 16:14:25.384222 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e49c51f_a38c_410a_98c1_39d92784d4eb.slice/crio-d8f06a98fbd5d242140a29842154dcc83151510d95f60bc18467acd45cb8c451 WatchSource:0}: Error finding container d8f06a98fbd5d242140a29842154dcc83151510d95f60bc18467acd45cb8c451: Status 404 returned error can't find the container with id d8f06a98fbd5d242140a29842154dcc83151510d95f60bc18467acd45cb8c451 Nov 23 16:14:25 crc kubenswrapper[5050]: I1123 16:14:25.389668 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/515fa021-1f02-423a-91df-2495dbfb5e2f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:25 crc kubenswrapper[5050]: I1123 16:14:25.389704 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/515fa021-1f02-423a-91df-2495dbfb5e2f-config\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:25 crc kubenswrapper[5050]: I1123 16:14:25.389714 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/515fa021-1f02-423a-91df-2495dbfb5e2f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:25 crc kubenswrapper[5050]: I1123 16:14:25.389726 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-87ns8\" (UniqueName: \"kubernetes.io/projected/515fa021-1f02-423a-91df-2495dbfb5e2f-kube-api-access-87ns8\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:25 crc kubenswrapper[5050]: I1123 16:14:25.403268 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/515fa021-1f02-423a-91df-2495dbfb5e2f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "515fa021-1f02-423a-91df-2495dbfb5e2f" (UID: "515fa021-1f02-423a-91df-2495dbfb5e2f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:14:25 crc kubenswrapper[5050]: I1123 16:14:25.456329 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 16:14:25 crc kubenswrapper[5050]: I1123 16:14:25.493592 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/515fa021-1f02-423a-91df-2495dbfb5e2f-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:25 crc kubenswrapper[5050]: I1123 16:14:25.565709 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d185f9e-5f51-4519-8fe2-4700c0b6c256" path="/var/lib/kubelet/pods/9d185f9e-5f51-4519-8fe2-4700c0b6c256/volumes" Nov 23 16:14:25 crc kubenswrapper[5050]: I1123 16:14:25.566329 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db3e5993-2352-41db-902d-7b646bc63c54" path="/var/lib/kubelet/pods/db3e5993-2352-41db-902d-7b646bc63c54/volumes" Nov 23 16:14:26 crc kubenswrapper[5050]: I1123 16:14:26.005143 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" event={"ID":"515fa021-1f02-423a-91df-2495dbfb5e2f","Type":"ContainerDied","Data":"bf27b8ea16a2b862acd3f23375157d410876eb2bc60b1e08c5596b25645e390c"} Nov 23 16:14:26 crc kubenswrapper[5050]: I1123 16:14:26.005498 5050 scope.go:117] "RemoveContainer" containerID="22ee77d4e7cfee7671ec8e47bda4aefe5c23243a37db03375ff410a00cea304e" Nov 23 16:14:26 crc kubenswrapper[5050]: I1123 16:14:26.005284 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cf4bbfb7f-v86xr" Nov 23 16:14:26 crc kubenswrapper[5050]: I1123 16:14:26.014030 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1e49c51f-a38c-410a-98c1-39d92784d4eb","Type":"ContainerStarted","Data":"9a18699848dd6e5ad0c5e1a3d342c04f8d6b6f2a6bb8e2f69d52ad54001b32a1"} Nov 23 16:14:26 crc kubenswrapper[5050]: I1123 16:14:26.014068 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1e49c51f-a38c-410a-98c1-39d92784d4eb","Type":"ContainerStarted","Data":"d22af8bdb1c1472263d6b1a48924c623b56baa63b8d02b5350a63356ef6f336f"} Nov 23 16:14:26 crc kubenswrapper[5050]: I1123 16:14:26.014086 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1e49c51f-a38c-410a-98c1-39d92784d4eb","Type":"ContainerStarted","Data":"d8f06a98fbd5d242140a29842154dcc83151510d95f60bc18467acd45cb8c451"} Nov 23 16:14:26 crc kubenswrapper[5050]: I1123 16:14:26.032043 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c2cb92ab-819c-437a-bbb2-d875552ba38f","Type":"ContainerStarted","Data":"8c6a539226f2db8bbacbfef333c5b628046a9e7d8b3ce7e7f77410882543729e"} Nov 23 16:14:26 crc kubenswrapper[5050]: I1123 16:14:26.032104 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c2cb92ab-819c-437a-bbb2-d875552ba38f","Type":"ContainerStarted","Data":"d3b6cf58cf5efa14bc8029cad5c42c4ed0b3eb6844c597c1e28f40c96567d5ab"} Nov 23 16:14:26 crc kubenswrapper[5050]: I1123 16:14:26.032124 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c2cb92ab-819c-437a-bbb2-d875552ba38f","Type":"ContainerStarted","Data":"811605e59a36ac173054e8ce3042b1b318f6bb3d76995cf64eebf3a1d0cf4248"} Nov 23 16:14:26 crc kubenswrapper[5050]: I1123 16:14:26.035359 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6cf4bbfb7f-v86xr"] Nov 23 16:14:26 crc kubenswrapper[5050]: I1123 16:14:26.042131 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6cf4bbfb7f-v86xr"] Nov 23 16:14:26 crc kubenswrapper[5050]: I1123 16:14:26.055160 5050 scope.go:117] "RemoveContainer" containerID="0ed09b738c44845aff44e59e2d6691123ae25253b595f9203d65428d4580cdea" Nov 23 16:14:26 crc kubenswrapper[5050]: I1123 16:14:26.068766 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.068740347 podStartE2EDuration="2.068740347s" podCreationTimestamp="2025-11-23 16:14:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:14:26.063077637 +0000 UTC m=+5561.230074142" watchObservedRunningTime="2025-11-23 16:14:26.068740347 +0000 UTC m=+5561.235736832" Nov 23 16:14:26 crc kubenswrapper[5050]: I1123 16:14:26.095664 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.095643697 podStartE2EDuration="2.095643697s" podCreationTimestamp="2025-11-23 16:14:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:14:26.087036604 +0000 UTC m=+5561.254033089" watchObservedRunningTime="2025-11-23 16:14:26.095643697 +0000 UTC m=+5561.262640182" Nov 23 16:14:27 crc kubenswrapper[5050]: I1123 16:14:27.047094 5050 generic.go:334] "Generic (PLEG): container finished" podID="45984aec-f81e-44de-a1f3-cb3dfe1f8d27" containerID="a3fe00a082e84da528b621ea0550508537aa9293496595fd053f0b80402f8937" exitCode=0 Nov 23 16:14:27 crc kubenswrapper[5050]: I1123 16:14:27.047312 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"45984aec-f81e-44de-a1f3-cb3dfe1f8d27","Type":"ContainerDied","Data":"a3fe00a082e84da528b621ea0550508537aa9293496595fd053f0b80402f8937"} Nov 23 16:14:27 crc kubenswrapper[5050]: I1123 16:14:27.157379 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 16:14:27 crc kubenswrapper[5050]: I1123 16:14:27.249197 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7q976\" (UniqueName: \"kubernetes.io/projected/45984aec-f81e-44de-a1f3-cb3dfe1f8d27-kube-api-access-7q976\") pod \"45984aec-f81e-44de-a1f3-cb3dfe1f8d27\" (UID: \"45984aec-f81e-44de-a1f3-cb3dfe1f8d27\") " Nov 23 16:14:27 crc kubenswrapper[5050]: I1123 16:14:27.249580 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45984aec-f81e-44de-a1f3-cb3dfe1f8d27-config-data\") pod \"45984aec-f81e-44de-a1f3-cb3dfe1f8d27\" (UID: \"45984aec-f81e-44de-a1f3-cb3dfe1f8d27\") " Nov 23 16:14:27 crc kubenswrapper[5050]: I1123 16:14:27.249738 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45984aec-f81e-44de-a1f3-cb3dfe1f8d27-combined-ca-bundle\") pod \"45984aec-f81e-44de-a1f3-cb3dfe1f8d27\" (UID: \"45984aec-f81e-44de-a1f3-cb3dfe1f8d27\") " Nov 23 16:14:27 crc kubenswrapper[5050]: I1123 16:14:27.257040 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45984aec-f81e-44de-a1f3-cb3dfe1f8d27-kube-api-access-7q976" (OuterVolumeSpecName: "kube-api-access-7q976") pod "45984aec-f81e-44de-a1f3-cb3dfe1f8d27" (UID: "45984aec-f81e-44de-a1f3-cb3dfe1f8d27"). InnerVolumeSpecName "kube-api-access-7q976". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:14:27 crc kubenswrapper[5050]: I1123 16:14:27.287463 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45984aec-f81e-44de-a1f3-cb3dfe1f8d27-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "45984aec-f81e-44de-a1f3-cb3dfe1f8d27" (UID: "45984aec-f81e-44de-a1f3-cb3dfe1f8d27"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:14:27 crc kubenswrapper[5050]: I1123 16:14:27.298963 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45984aec-f81e-44de-a1f3-cb3dfe1f8d27-config-data" (OuterVolumeSpecName: "config-data") pod "45984aec-f81e-44de-a1f3-cb3dfe1f8d27" (UID: "45984aec-f81e-44de-a1f3-cb3dfe1f8d27"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:14:27 crc kubenswrapper[5050]: I1123 16:14:27.353651 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7q976\" (UniqueName: \"kubernetes.io/projected/45984aec-f81e-44de-a1f3-cb3dfe1f8d27-kube-api-access-7q976\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:27 crc kubenswrapper[5050]: I1123 16:14:27.353777 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45984aec-f81e-44de-a1f3-cb3dfe1f8d27-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:27 crc kubenswrapper[5050]: I1123 16:14:27.353813 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45984aec-f81e-44de-a1f3-cb3dfe1f8d27-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:27 crc kubenswrapper[5050]: I1123 16:14:27.569744 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="515fa021-1f02-423a-91df-2495dbfb5e2f" path="/var/lib/kubelet/pods/515fa021-1f02-423a-91df-2495dbfb5e2f/volumes" Nov 23 16:14:28 crc kubenswrapper[5050]: I1123 16:14:28.060345 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"45984aec-f81e-44de-a1f3-cb3dfe1f8d27","Type":"ContainerDied","Data":"1a0d22bc7a93a42887f0605017994c5488af242e85f40e55a32aaca8579a2984"} Nov 23 16:14:28 crc kubenswrapper[5050]: I1123 16:14:28.060427 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 16:14:28 crc kubenswrapper[5050]: I1123 16:14:28.060940 5050 scope.go:117] "RemoveContainer" containerID="a3fe00a082e84da528b621ea0550508537aa9293496595fd053f0b80402f8937" Nov 23 16:14:28 crc kubenswrapper[5050]: I1123 16:14:28.087520 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 16:14:28 crc kubenswrapper[5050]: I1123 16:14:28.096626 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 16:14:28 crc kubenswrapper[5050]: I1123 16:14:28.114710 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 16:14:28 crc kubenswrapper[5050]: E1123 16:14:28.115222 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45984aec-f81e-44de-a1f3-cb3dfe1f8d27" containerName="nova-scheduler-scheduler" Nov 23 16:14:28 crc kubenswrapper[5050]: I1123 16:14:28.115241 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="45984aec-f81e-44de-a1f3-cb3dfe1f8d27" containerName="nova-scheduler-scheduler" Nov 23 16:14:28 crc kubenswrapper[5050]: E1123 16:14:28.115265 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="515fa021-1f02-423a-91df-2495dbfb5e2f" containerName="dnsmasq-dns" Nov 23 16:14:28 crc kubenswrapper[5050]: I1123 16:14:28.115273 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="515fa021-1f02-423a-91df-2495dbfb5e2f" containerName="dnsmasq-dns" Nov 23 16:14:28 crc kubenswrapper[5050]: E1123 16:14:28.115288 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="515fa021-1f02-423a-91df-2495dbfb5e2f" containerName="init" Nov 23 16:14:28 crc kubenswrapper[5050]: I1123 16:14:28.115295 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="515fa021-1f02-423a-91df-2495dbfb5e2f" containerName="init" Nov 23 16:14:28 crc kubenswrapper[5050]: I1123 16:14:28.115498 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="515fa021-1f02-423a-91df-2495dbfb5e2f" containerName="dnsmasq-dns" Nov 23 16:14:28 crc kubenswrapper[5050]: I1123 16:14:28.115518 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="45984aec-f81e-44de-a1f3-cb3dfe1f8d27" containerName="nova-scheduler-scheduler" Nov 23 16:14:28 crc kubenswrapper[5050]: I1123 16:14:28.116274 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 16:14:28 crc kubenswrapper[5050]: I1123 16:14:28.122289 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 23 16:14:28 crc kubenswrapper[5050]: I1123 16:14:28.140842 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 16:14:28 crc kubenswrapper[5050]: I1123 16:14:28.278145 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad227747-4a44-4fc2-be9e-64f911bdd923-config-data\") pod \"nova-scheduler-0\" (UID: \"ad227747-4a44-4fc2-be9e-64f911bdd923\") " pod="openstack/nova-scheduler-0" Nov 23 16:14:28 crc kubenswrapper[5050]: I1123 16:14:28.278246 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gshkj\" (UniqueName: \"kubernetes.io/projected/ad227747-4a44-4fc2-be9e-64f911bdd923-kube-api-access-gshkj\") pod \"nova-scheduler-0\" (UID: \"ad227747-4a44-4fc2-be9e-64f911bdd923\") " pod="openstack/nova-scheduler-0" Nov 23 16:14:28 crc kubenswrapper[5050]: I1123 16:14:28.278324 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad227747-4a44-4fc2-be9e-64f911bdd923-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ad227747-4a44-4fc2-be9e-64f911bdd923\") " pod="openstack/nova-scheduler-0" Nov 23 16:14:28 crc kubenswrapper[5050]: I1123 16:14:28.380549 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad227747-4a44-4fc2-be9e-64f911bdd923-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ad227747-4a44-4fc2-be9e-64f911bdd923\") " pod="openstack/nova-scheduler-0" Nov 23 16:14:28 crc kubenswrapper[5050]: I1123 16:14:28.380662 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad227747-4a44-4fc2-be9e-64f911bdd923-config-data\") pod \"nova-scheduler-0\" (UID: \"ad227747-4a44-4fc2-be9e-64f911bdd923\") " pod="openstack/nova-scheduler-0" Nov 23 16:14:28 crc kubenswrapper[5050]: I1123 16:14:28.380739 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gshkj\" (UniqueName: \"kubernetes.io/projected/ad227747-4a44-4fc2-be9e-64f911bdd923-kube-api-access-gshkj\") pod \"nova-scheduler-0\" (UID: \"ad227747-4a44-4fc2-be9e-64f911bdd923\") " pod="openstack/nova-scheduler-0" Nov 23 16:14:28 crc kubenswrapper[5050]: I1123 16:14:28.392358 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad227747-4a44-4fc2-be9e-64f911bdd923-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ad227747-4a44-4fc2-be9e-64f911bdd923\") " pod="openstack/nova-scheduler-0" Nov 23 16:14:28 crc kubenswrapper[5050]: I1123 16:14:28.399573 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad227747-4a44-4fc2-be9e-64f911bdd923-config-data\") pod \"nova-scheduler-0\" (UID: \"ad227747-4a44-4fc2-be9e-64f911bdd923\") " pod="openstack/nova-scheduler-0" Nov 23 16:14:28 crc kubenswrapper[5050]: I1123 16:14:28.403789 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gshkj\" (UniqueName: \"kubernetes.io/projected/ad227747-4a44-4fc2-be9e-64f911bdd923-kube-api-access-gshkj\") pod \"nova-scheduler-0\" (UID: \"ad227747-4a44-4fc2-be9e-64f911bdd923\") " pod="openstack/nova-scheduler-0" Nov 23 16:14:28 crc kubenswrapper[5050]: I1123 16:14:28.450264 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 16:14:28 crc kubenswrapper[5050]: I1123 16:14:28.969823 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 16:14:29 crc kubenswrapper[5050]: I1123 16:14:29.072338 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ad227747-4a44-4fc2-be9e-64f911bdd923","Type":"ContainerStarted","Data":"db6a6cfd6f5dbb494b99ec28d0eef4d8c69c0df418eb4e981e35b493f3bfa4cf"} Nov 23 16:14:29 crc kubenswrapper[5050]: I1123 16:14:29.224883 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:14:29 crc kubenswrapper[5050]: I1123 16:14:29.224991 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:14:29 crc kubenswrapper[5050]: I1123 16:14:29.571068 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45984aec-f81e-44de-a1f3-cb3dfe1f8d27" path="/var/lib/kubelet/pods/45984aec-f81e-44de-a1f3-cb3dfe1f8d27/volumes" Nov 23 16:14:29 crc kubenswrapper[5050]: I1123 16:14:29.711107 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 23 16:14:29 crc kubenswrapper[5050]: I1123 16:14:29.711201 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 23 16:14:30 crc kubenswrapper[5050]: I1123 16:14:30.087956 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ad227747-4a44-4fc2-be9e-64f911bdd923","Type":"ContainerStarted","Data":"c6bb79375086dd527994f11bb4946119ce1e75f49d786a78d088d1fc09d0bf0b"} Nov 23 16:14:30 crc kubenswrapper[5050]: I1123 16:14:30.125666 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.125639713 podStartE2EDuration="2.125639713s" podCreationTimestamp="2025-11-23 16:14:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:14:30.119755557 +0000 UTC m=+5565.286752052" watchObservedRunningTime="2025-11-23 16:14:30.125639713 +0000 UTC m=+5565.292636208" Nov 23 16:14:31 crc kubenswrapper[5050]: I1123 16:14:31.393645 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 23 16:14:31 crc kubenswrapper[5050]: I1123 16:14:31.934571 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-5m4kg"] Nov 23 16:14:31 crc kubenswrapper[5050]: I1123 16:14:31.936575 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-5m4kg" Nov 23 16:14:31 crc kubenswrapper[5050]: I1123 16:14:31.939099 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 23 16:14:31 crc kubenswrapper[5050]: I1123 16:14:31.939427 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 23 16:14:31 crc kubenswrapper[5050]: I1123 16:14:31.957623 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-5m4kg"] Nov 23 16:14:31 crc kubenswrapper[5050]: I1123 16:14:31.987152 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hx486\" (UniqueName: \"kubernetes.io/projected/a73e0803-4e30-45bd-9006-d229e8108430-kube-api-access-hx486\") pod \"nova-cell1-cell-mapping-5m4kg\" (UID: \"a73e0803-4e30-45bd-9006-d229e8108430\") " pod="openstack/nova-cell1-cell-mapping-5m4kg" Nov 23 16:14:31 crc kubenswrapper[5050]: I1123 16:14:31.987239 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a73e0803-4e30-45bd-9006-d229e8108430-scripts\") pod \"nova-cell1-cell-mapping-5m4kg\" (UID: \"a73e0803-4e30-45bd-9006-d229e8108430\") " pod="openstack/nova-cell1-cell-mapping-5m4kg" Nov 23 16:14:31 crc kubenswrapper[5050]: I1123 16:14:31.987265 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a73e0803-4e30-45bd-9006-d229e8108430-config-data\") pod \"nova-cell1-cell-mapping-5m4kg\" (UID: \"a73e0803-4e30-45bd-9006-d229e8108430\") " pod="openstack/nova-cell1-cell-mapping-5m4kg" Nov 23 16:14:31 crc kubenswrapper[5050]: I1123 16:14:31.987286 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a73e0803-4e30-45bd-9006-d229e8108430-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-5m4kg\" (UID: \"a73e0803-4e30-45bd-9006-d229e8108430\") " pod="openstack/nova-cell1-cell-mapping-5m4kg" Nov 23 16:14:32 crc kubenswrapper[5050]: I1123 16:14:32.088090 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hx486\" (UniqueName: \"kubernetes.io/projected/a73e0803-4e30-45bd-9006-d229e8108430-kube-api-access-hx486\") pod \"nova-cell1-cell-mapping-5m4kg\" (UID: \"a73e0803-4e30-45bd-9006-d229e8108430\") " pod="openstack/nova-cell1-cell-mapping-5m4kg" Nov 23 16:14:32 crc kubenswrapper[5050]: I1123 16:14:32.088180 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a73e0803-4e30-45bd-9006-d229e8108430-scripts\") pod \"nova-cell1-cell-mapping-5m4kg\" (UID: \"a73e0803-4e30-45bd-9006-d229e8108430\") " pod="openstack/nova-cell1-cell-mapping-5m4kg" Nov 23 16:14:32 crc kubenswrapper[5050]: I1123 16:14:32.088203 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a73e0803-4e30-45bd-9006-d229e8108430-config-data\") pod \"nova-cell1-cell-mapping-5m4kg\" (UID: \"a73e0803-4e30-45bd-9006-d229e8108430\") " pod="openstack/nova-cell1-cell-mapping-5m4kg" Nov 23 16:14:32 crc kubenswrapper[5050]: I1123 16:14:32.088228 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a73e0803-4e30-45bd-9006-d229e8108430-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-5m4kg\" (UID: \"a73e0803-4e30-45bd-9006-d229e8108430\") " pod="openstack/nova-cell1-cell-mapping-5m4kg" Nov 23 16:14:32 crc kubenswrapper[5050]: I1123 16:14:32.098958 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a73e0803-4e30-45bd-9006-d229e8108430-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-5m4kg\" (UID: \"a73e0803-4e30-45bd-9006-d229e8108430\") " pod="openstack/nova-cell1-cell-mapping-5m4kg" Nov 23 16:14:32 crc kubenswrapper[5050]: I1123 16:14:32.105172 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a73e0803-4e30-45bd-9006-d229e8108430-scripts\") pod \"nova-cell1-cell-mapping-5m4kg\" (UID: \"a73e0803-4e30-45bd-9006-d229e8108430\") " pod="openstack/nova-cell1-cell-mapping-5m4kg" Nov 23 16:14:32 crc kubenswrapper[5050]: I1123 16:14:32.105584 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hx486\" (UniqueName: \"kubernetes.io/projected/a73e0803-4e30-45bd-9006-d229e8108430-kube-api-access-hx486\") pod \"nova-cell1-cell-mapping-5m4kg\" (UID: \"a73e0803-4e30-45bd-9006-d229e8108430\") " pod="openstack/nova-cell1-cell-mapping-5m4kg" Nov 23 16:14:32 crc kubenswrapper[5050]: I1123 16:14:32.112984 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a73e0803-4e30-45bd-9006-d229e8108430-config-data\") pod \"nova-cell1-cell-mapping-5m4kg\" (UID: \"a73e0803-4e30-45bd-9006-d229e8108430\") " pod="openstack/nova-cell1-cell-mapping-5m4kg" Nov 23 16:14:32 crc kubenswrapper[5050]: I1123 16:14:32.262545 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-5m4kg" Nov 23 16:14:33 crc kubenswrapper[5050]: I1123 16:14:32.778597 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-5m4kg"] Nov 23 16:14:33 crc kubenswrapper[5050]: I1123 16:14:33.120826 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-5m4kg" event={"ID":"a73e0803-4e30-45bd-9006-d229e8108430","Type":"ContainerStarted","Data":"9d6ee5d8dc6993110b46bea1096907946feea512d71596392ccd6a9c01aabe22"} Nov 23 16:14:33 crc kubenswrapper[5050]: I1123 16:14:33.121287 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-5m4kg" event={"ID":"a73e0803-4e30-45bd-9006-d229e8108430","Type":"ContainerStarted","Data":"aace79558a960b42e65f5aa357ad446cbe98175819bdff8b310c5648ff6c0523"} Nov 23 16:14:33 crc kubenswrapper[5050]: I1123 16:14:33.148952 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-5m4kg" podStartSLOduration=2.14893293 podStartE2EDuration="2.14893293s" podCreationTimestamp="2025-11-23 16:14:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:14:33.145062751 +0000 UTC m=+5568.312059256" watchObservedRunningTime="2025-11-23 16:14:33.14893293 +0000 UTC m=+5568.315929415" Nov 23 16:14:33 crc kubenswrapper[5050]: I1123 16:14:33.450845 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 23 16:14:34 crc kubenswrapper[5050]: I1123 16:14:34.711808 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 23 16:14:34 crc kubenswrapper[5050]: I1123 16:14:34.714516 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 23 16:14:34 crc kubenswrapper[5050]: I1123 16:14:34.743285 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 23 16:14:34 crc kubenswrapper[5050]: I1123 16:14:34.743391 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 23 16:14:35 crc kubenswrapper[5050]: I1123 16:14:35.756102 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="c2cb92ab-819c-437a-bbb2-d875552ba38f" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.66:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 23 16:14:35 crc kubenswrapper[5050]: I1123 16:14:35.879681 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="1e49c51f-a38c-410a-98c1-39d92784d4eb" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.67:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 23 16:14:35 crc kubenswrapper[5050]: I1123 16:14:35.879767 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="c2cb92ab-819c-437a-bbb2-d875552ba38f" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.66:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 23 16:14:35 crc kubenswrapper[5050]: I1123 16:14:35.879874 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="1e49c51f-a38c-410a-98c1-39d92784d4eb" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.67:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 23 16:14:38 crc kubenswrapper[5050]: I1123 16:14:38.190351 5050 generic.go:334] "Generic (PLEG): container finished" podID="a73e0803-4e30-45bd-9006-d229e8108430" containerID="9d6ee5d8dc6993110b46bea1096907946feea512d71596392ccd6a9c01aabe22" exitCode=0 Nov 23 16:14:38 crc kubenswrapper[5050]: I1123 16:14:38.190482 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-5m4kg" event={"ID":"a73e0803-4e30-45bd-9006-d229e8108430","Type":"ContainerDied","Data":"9d6ee5d8dc6993110b46bea1096907946feea512d71596392ccd6a9c01aabe22"} Nov 23 16:14:38 crc kubenswrapper[5050]: I1123 16:14:38.450961 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 23 16:14:38 crc kubenswrapper[5050]: I1123 16:14:38.501187 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 23 16:14:39 crc kubenswrapper[5050]: I1123 16:14:39.237609 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 23 16:14:39 crc kubenswrapper[5050]: I1123 16:14:39.601506 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-5m4kg" Nov 23 16:14:39 crc kubenswrapper[5050]: I1123 16:14:39.790943 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hx486\" (UniqueName: \"kubernetes.io/projected/a73e0803-4e30-45bd-9006-d229e8108430-kube-api-access-hx486\") pod \"a73e0803-4e30-45bd-9006-d229e8108430\" (UID: \"a73e0803-4e30-45bd-9006-d229e8108430\") " Nov 23 16:14:39 crc kubenswrapper[5050]: I1123 16:14:39.791062 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a73e0803-4e30-45bd-9006-d229e8108430-combined-ca-bundle\") pod \"a73e0803-4e30-45bd-9006-d229e8108430\" (UID: \"a73e0803-4e30-45bd-9006-d229e8108430\") " Nov 23 16:14:39 crc kubenswrapper[5050]: I1123 16:14:39.791116 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a73e0803-4e30-45bd-9006-d229e8108430-scripts\") pod \"a73e0803-4e30-45bd-9006-d229e8108430\" (UID: \"a73e0803-4e30-45bd-9006-d229e8108430\") " Nov 23 16:14:39 crc kubenswrapper[5050]: I1123 16:14:39.791179 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a73e0803-4e30-45bd-9006-d229e8108430-config-data\") pod \"a73e0803-4e30-45bd-9006-d229e8108430\" (UID: \"a73e0803-4e30-45bd-9006-d229e8108430\") " Nov 23 16:14:39 crc kubenswrapper[5050]: I1123 16:14:39.800558 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a73e0803-4e30-45bd-9006-d229e8108430-scripts" (OuterVolumeSpecName: "scripts") pod "a73e0803-4e30-45bd-9006-d229e8108430" (UID: "a73e0803-4e30-45bd-9006-d229e8108430"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:14:39 crc kubenswrapper[5050]: I1123 16:14:39.802520 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a73e0803-4e30-45bd-9006-d229e8108430-kube-api-access-hx486" (OuterVolumeSpecName: "kube-api-access-hx486") pod "a73e0803-4e30-45bd-9006-d229e8108430" (UID: "a73e0803-4e30-45bd-9006-d229e8108430"). InnerVolumeSpecName "kube-api-access-hx486". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:14:39 crc kubenswrapper[5050]: I1123 16:14:39.830684 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a73e0803-4e30-45bd-9006-d229e8108430-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a73e0803-4e30-45bd-9006-d229e8108430" (UID: "a73e0803-4e30-45bd-9006-d229e8108430"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:14:39 crc kubenswrapper[5050]: I1123 16:14:39.836235 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a73e0803-4e30-45bd-9006-d229e8108430-config-data" (OuterVolumeSpecName: "config-data") pod "a73e0803-4e30-45bd-9006-d229e8108430" (UID: "a73e0803-4e30-45bd-9006-d229e8108430"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:14:39 crc kubenswrapper[5050]: I1123 16:14:39.894828 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hx486\" (UniqueName: \"kubernetes.io/projected/a73e0803-4e30-45bd-9006-d229e8108430-kube-api-access-hx486\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:39 crc kubenswrapper[5050]: I1123 16:14:39.894868 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a73e0803-4e30-45bd-9006-d229e8108430-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:39 crc kubenswrapper[5050]: I1123 16:14:39.894880 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a73e0803-4e30-45bd-9006-d229e8108430-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:39 crc kubenswrapper[5050]: I1123 16:14:39.894894 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a73e0803-4e30-45bd-9006-d229e8108430-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:40 crc kubenswrapper[5050]: I1123 16:14:40.213540 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-5m4kg" event={"ID":"a73e0803-4e30-45bd-9006-d229e8108430","Type":"ContainerDied","Data":"aace79558a960b42e65f5aa357ad446cbe98175819bdff8b310c5648ff6c0523"} Nov 23 16:14:40 crc kubenswrapper[5050]: I1123 16:14:40.213944 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aace79558a960b42e65f5aa357ad446cbe98175819bdff8b310c5648ff6c0523" Nov 23 16:14:40 crc kubenswrapper[5050]: I1123 16:14:40.213692 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-5m4kg" Nov 23 16:14:40 crc kubenswrapper[5050]: I1123 16:14:40.440087 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 23 16:14:40 crc kubenswrapper[5050]: I1123 16:14:40.440619 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="1e49c51f-a38c-410a-98c1-39d92784d4eb" containerName="nova-api-log" containerID="cri-o://d22af8bdb1c1472263d6b1a48924c623b56baa63b8d02b5350a63356ef6f336f" gracePeriod=30 Nov 23 16:14:40 crc kubenswrapper[5050]: I1123 16:14:40.440715 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="1e49c51f-a38c-410a-98c1-39d92784d4eb" containerName="nova-api-api" containerID="cri-o://9a18699848dd6e5ad0c5e1a3d342c04f8d6b6f2a6bb8e2f69d52ad54001b32a1" gracePeriod=30 Nov 23 16:14:40 crc kubenswrapper[5050]: I1123 16:14:40.514015 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 16:14:40 crc kubenswrapper[5050]: I1123 16:14:40.602282 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 16:14:40 crc kubenswrapper[5050]: I1123 16:14:40.602574 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="c2cb92ab-819c-437a-bbb2-d875552ba38f" containerName="nova-metadata-log" containerID="cri-o://d3b6cf58cf5efa14bc8029cad5c42c4ed0b3eb6844c597c1e28f40c96567d5ab" gracePeriod=30 Nov 23 16:14:40 crc kubenswrapper[5050]: I1123 16:14:40.602716 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="c2cb92ab-819c-437a-bbb2-d875552ba38f" containerName="nova-metadata-metadata" containerID="cri-o://8c6a539226f2db8bbacbfef333c5b628046a9e7d8b3ce7e7f77410882543729e" gracePeriod=30 Nov 23 16:14:41 crc kubenswrapper[5050]: I1123 16:14:41.229431 5050 generic.go:334] "Generic (PLEG): container finished" podID="1e49c51f-a38c-410a-98c1-39d92784d4eb" containerID="d22af8bdb1c1472263d6b1a48924c623b56baa63b8d02b5350a63356ef6f336f" exitCode=143 Nov 23 16:14:41 crc kubenswrapper[5050]: I1123 16:14:41.229486 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1e49c51f-a38c-410a-98c1-39d92784d4eb","Type":"ContainerDied","Data":"d22af8bdb1c1472263d6b1a48924c623b56baa63b8d02b5350a63356ef6f336f"} Nov 23 16:14:41 crc kubenswrapper[5050]: I1123 16:14:41.232109 5050 generic.go:334] "Generic (PLEG): container finished" podID="c2cb92ab-819c-437a-bbb2-d875552ba38f" containerID="d3b6cf58cf5efa14bc8029cad5c42c4ed0b3eb6844c597c1e28f40c96567d5ab" exitCode=143 Nov 23 16:14:41 crc kubenswrapper[5050]: I1123 16:14:41.232186 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c2cb92ab-819c-437a-bbb2-d875552ba38f","Type":"ContainerDied","Data":"d3b6cf58cf5efa14bc8029cad5c42c4ed0b3eb6844c597c1e28f40c96567d5ab"} Nov 23 16:14:41 crc kubenswrapper[5050]: I1123 16:14:41.232311 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="ad227747-4a44-4fc2-be9e-64f911bdd923" containerName="nova-scheduler-scheduler" containerID="cri-o://c6bb79375086dd527994f11bb4946119ce1e75f49d786a78d088d1fc09d0bf0b" gracePeriod=30 Nov 23 16:14:43 crc kubenswrapper[5050]: E1123 16:14:43.455051 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c6bb79375086dd527994f11bb4946119ce1e75f49d786a78d088d1fc09d0bf0b" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 23 16:14:43 crc kubenswrapper[5050]: E1123 16:14:43.458543 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c6bb79375086dd527994f11bb4946119ce1e75f49d786a78d088d1fc09d0bf0b" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 23 16:14:43 crc kubenswrapper[5050]: E1123 16:14:43.460838 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c6bb79375086dd527994f11bb4946119ce1e75f49d786a78d088d1fc09d0bf0b" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 23 16:14:43 crc kubenswrapper[5050]: E1123 16:14:43.460928 5050 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="ad227747-4a44-4fc2-be9e-64f911bdd923" containerName="nova-scheduler-scheduler" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.227115 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.233462 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.290030 5050 generic.go:334] "Generic (PLEG): container finished" podID="c2cb92ab-819c-437a-bbb2-d875552ba38f" containerID="8c6a539226f2db8bbacbfef333c5b628046a9e7d8b3ce7e7f77410882543729e" exitCode=0 Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.290132 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c2cb92ab-819c-437a-bbb2-d875552ba38f","Type":"ContainerDied","Data":"8c6a539226f2db8bbacbfef333c5b628046a9e7d8b3ce7e7f77410882543729e"} Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.290186 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c2cb92ab-819c-437a-bbb2-d875552ba38f","Type":"ContainerDied","Data":"811605e59a36ac173054e8ce3042b1b318f6bb3d76995cf64eebf3a1d0cf4248"} Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.290209 5050 scope.go:117] "RemoveContainer" containerID="8c6a539226f2db8bbacbfef333c5b628046a9e7d8b3ce7e7f77410882543729e" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.290623 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.298065 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d55bx\" (UniqueName: \"kubernetes.io/projected/c2cb92ab-819c-437a-bbb2-d875552ba38f-kube-api-access-d55bx\") pod \"c2cb92ab-819c-437a-bbb2-d875552ba38f\" (UID: \"c2cb92ab-819c-437a-bbb2-d875552ba38f\") " Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.298333 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bc8x5\" (UniqueName: \"kubernetes.io/projected/1e49c51f-a38c-410a-98c1-39d92784d4eb-kube-api-access-bc8x5\") pod \"1e49c51f-a38c-410a-98c1-39d92784d4eb\" (UID: \"1e49c51f-a38c-410a-98c1-39d92784d4eb\") " Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.298506 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e49c51f-a38c-410a-98c1-39d92784d4eb-combined-ca-bundle\") pod \"1e49c51f-a38c-410a-98c1-39d92784d4eb\" (UID: \"1e49c51f-a38c-410a-98c1-39d92784d4eb\") " Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.298591 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2cb92ab-819c-437a-bbb2-d875552ba38f-logs\") pod \"c2cb92ab-819c-437a-bbb2-d875552ba38f\" (UID: \"c2cb92ab-819c-437a-bbb2-d875552ba38f\") " Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.298745 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e49c51f-a38c-410a-98c1-39d92784d4eb-logs\") pod \"1e49c51f-a38c-410a-98c1-39d92784d4eb\" (UID: \"1e49c51f-a38c-410a-98c1-39d92784d4eb\") " Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.299144 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e49c51f-a38c-410a-98c1-39d92784d4eb-config-data\") pod \"1e49c51f-a38c-410a-98c1-39d92784d4eb\" (UID: \"1e49c51f-a38c-410a-98c1-39d92784d4eb\") " Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.299306 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2cb92ab-819c-437a-bbb2-d875552ba38f-config-data\") pod \"c2cb92ab-819c-437a-bbb2-d875552ba38f\" (UID: \"c2cb92ab-819c-437a-bbb2-d875552ba38f\") " Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.299394 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2cb92ab-819c-437a-bbb2-d875552ba38f-combined-ca-bundle\") pod \"c2cb92ab-819c-437a-bbb2-d875552ba38f\" (UID: \"c2cb92ab-819c-437a-bbb2-d875552ba38f\") " Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.300839 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2cb92ab-819c-437a-bbb2-d875552ba38f-logs" (OuterVolumeSpecName: "logs") pod "c2cb92ab-819c-437a-bbb2-d875552ba38f" (UID: "c2cb92ab-819c-437a-bbb2-d875552ba38f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.300871 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1e49c51f-a38c-410a-98c1-39d92784d4eb-logs" (OuterVolumeSpecName: "logs") pod "1e49c51f-a38c-410a-98c1-39d92784d4eb" (UID: "1e49c51f-a38c-410a-98c1-39d92784d4eb"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.301056 5050 generic.go:334] "Generic (PLEG): container finished" podID="1e49c51f-a38c-410a-98c1-39d92784d4eb" containerID="9a18699848dd6e5ad0c5e1a3d342c04f8d6b6f2a6bb8e2f69d52ad54001b32a1" exitCode=0 Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.301125 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1e49c51f-a38c-410a-98c1-39d92784d4eb","Type":"ContainerDied","Data":"9a18699848dd6e5ad0c5e1a3d342c04f8d6b6f2a6bb8e2f69d52ad54001b32a1"} Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.301162 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1e49c51f-a38c-410a-98c1-39d92784d4eb","Type":"ContainerDied","Data":"d8f06a98fbd5d242140a29842154dcc83151510d95f60bc18467acd45cb8c451"} Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.301253 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.301538 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e49c51f-a38c-410a-98c1-39d92784d4eb-logs\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.301561 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2cb92ab-819c-437a-bbb2-d875552ba38f-logs\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.306771 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2cb92ab-819c-437a-bbb2-d875552ba38f-kube-api-access-d55bx" (OuterVolumeSpecName: "kube-api-access-d55bx") pod "c2cb92ab-819c-437a-bbb2-d875552ba38f" (UID: "c2cb92ab-819c-437a-bbb2-d875552ba38f"). InnerVolumeSpecName "kube-api-access-d55bx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.315322 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e49c51f-a38c-410a-98c1-39d92784d4eb-kube-api-access-bc8x5" (OuterVolumeSpecName: "kube-api-access-bc8x5") pod "1e49c51f-a38c-410a-98c1-39d92784d4eb" (UID: "1e49c51f-a38c-410a-98c1-39d92784d4eb"). InnerVolumeSpecName "kube-api-access-bc8x5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.332178 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2cb92ab-819c-437a-bbb2-d875552ba38f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c2cb92ab-819c-437a-bbb2-d875552ba38f" (UID: "c2cb92ab-819c-437a-bbb2-d875552ba38f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.334748 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e49c51f-a38c-410a-98c1-39d92784d4eb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1e49c51f-a38c-410a-98c1-39d92784d4eb" (UID: "1e49c51f-a38c-410a-98c1-39d92784d4eb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.336073 5050 scope.go:117] "RemoveContainer" containerID="d3b6cf58cf5efa14bc8029cad5c42c4ed0b3eb6844c597c1e28f40c96567d5ab" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.352223 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2cb92ab-819c-437a-bbb2-d875552ba38f-config-data" (OuterVolumeSpecName: "config-data") pod "c2cb92ab-819c-437a-bbb2-d875552ba38f" (UID: "c2cb92ab-819c-437a-bbb2-d875552ba38f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.354236 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e49c51f-a38c-410a-98c1-39d92784d4eb-config-data" (OuterVolumeSpecName: "config-data") pod "1e49c51f-a38c-410a-98c1-39d92784d4eb" (UID: "1e49c51f-a38c-410a-98c1-39d92784d4eb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.359383 5050 scope.go:117] "RemoveContainer" containerID="8c6a539226f2db8bbacbfef333c5b628046a9e7d8b3ce7e7f77410882543729e" Nov 23 16:14:44 crc kubenswrapper[5050]: E1123 16:14:44.360164 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c6a539226f2db8bbacbfef333c5b628046a9e7d8b3ce7e7f77410882543729e\": container with ID starting with 8c6a539226f2db8bbacbfef333c5b628046a9e7d8b3ce7e7f77410882543729e not found: ID does not exist" containerID="8c6a539226f2db8bbacbfef333c5b628046a9e7d8b3ce7e7f77410882543729e" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.360312 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c6a539226f2db8bbacbfef333c5b628046a9e7d8b3ce7e7f77410882543729e"} err="failed to get container status \"8c6a539226f2db8bbacbfef333c5b628046a9e7d8b3ce7e7f77410882543729e\": rpc error: code = NotFound desc = could not find container \"8c6a539226f2db8bbacbfef333c5b628046a9e7d8b3ce7e7f77410882543729e\": container with ID starting with 8c6a539226f2db8bbacbfef333c5b628046a9e7d8b3ce7e7f77410882543729e not found: ID does not exist" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.360441 5050 scope.go:117] "RemoveContainer" containerID="d3b6cf58cf5efa14bc8029cad5c42c4ed0b3eb6844c597c1e28f40c96567d5ab" Nov 23 16:14:44 crc kubenswrapper[5050]: E1123 16:14:44.360995 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d3b6cf58cf5efa14bc8029cad5c42c4ed0b3eb6844c597c1e28f40c96567d5ab\": container with ID starting with d3b6cf58cf5efa14bc8029cad5c42c4ed0b3eb6844c597c1e28f40c96567d5ab not found: ID does not exist" containerID="d3b6cf58cf5efa14bc8029cad5c42c4ed0b3eb6844c597c1e28f40c96567d5ab" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.361026 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d3b6cf58cf5efa14bc8029cad5c42c4ed0b3eb6844c597c1e28f40c96567d5ab"} err="failed to get container status \"d3b6cf58cf5efa14bc8029cad5c42c4ed0b3eb6844c597c1e28f40c96567d5ab\": rpc error: code = NotFound desc = could not find container \"d3b6cf58cf5efa14bc8029cad5c42c4ed0b3eb6844c597c1e28f40c96567d5ab\": container with ID starting with d3b6cf58cf5efa14bc8029cad5c42c4ed0b3eb6844c597c1e28f40c96567d5ab not found: ID does not exist" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.361040 5050 scope.go:117] "RemoveContainer" containerID="9a18699848dd6e5ad0c5e1a3d342c04f8d6b6f2a6bb8e2f69d52ad54001b32a1" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.391737 5050 scope.go:117] "RemoveContainer" containerID="d22af8bdb1c1472263d6b1a48924c623b56baa63b8d02b5350a63356ef6f336f" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.402927 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bc8x5\" (UniqueName: \"kubernetes.io/projected/1e49c51f-a38c-410a-98c1-39d92784d4eb-kube-api-access-bc8x5\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.402967 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e49c51f-a38c-410a-98c1-39d92784d4eb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.402978 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e49c51f-a38c-410a-98c1-39d92784d4eb-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.402987 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2cb92ab-819c-437a-bbb2-d875552ba38f-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.402996 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2cb92ab-819c-437a-bbb2-d875552ba38f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.403008 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d55bx\" (UniqueName: \"kubernetes.io/projected/c2cb92ab-819c-437a-bbb2-d875552ba38f-kube-api-access-d55bx\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.419658 5050 scope.go:117] "RemoveContainer" containerID="9a18699848dd6e5ad0c5e1a3d342c04f8d6b6f2a6bb8e2f69d52ad54001b32a1" Nov 23 16:14:44 crc kubenswrapper[5050]: E1123 16:14:44.420406 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a18699848dd6e5ad0c5e1a3d342c04f8d6b6f2a6bb8e2f69d52ad54001b32a1\": container with ID starting with 9a18699848dd6e5ad0c5e1a3d342c04f8d6b6f2a6bb8e2f69d52ad54001b32a1 not found: ID does not exist" containerID="9a18699848dd6e5ad0c5e1a3d342c04f8d6b6f2a6bb8e2f69d52ad54001b32a1" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.420630 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a18699848dd6e5ad0c5e1a3d342c04f8d6b6f2a6bb8e2f69d52ad54001b32a1"} err="failed to get container status \"9a18699848dd6e5ad0c5e1a3d342c04f8d6b6f2a6bb8e2f69d52ad54001b32a1\": rpc error: code = NotFound desc = could not find container \"9a18699848dd6e5ad0c5e1a3d342c04f8d6b6f2a6bb8e2f69d52ad54001b32a1\": container with ID starting with 9a18699848dd6e5ad0c5e1a3d342c04f8d6b6f2a6bb8e2f69d52ad54001b32a1 not found: ID does not exist" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.420781 5050 scope.go:117] "RemoveContainer" containerID="d22af8bdb1c1472263d6b1a48924c623b56baa63b8d02b5350a63356ef6f336f" Nov 23 16:14:44 crc kubenswrapper[5050]: E1123 16:14:44.421506 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d22af8bdb1c1472263d6b1a48924c623b56baa63b8d02b5350a63356ef6f336f\": container with ID starting with d22af8bdb1c1472263d6b1a48924c623b56baa63b8d02b5350a63356ef6f336f not found: ID does not exist" containerID="d22af8bdb1c1472263d6b1a48924c623b56baa63b8d02b5350a63356ef6f336f" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.421565 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d22af8bdb1c1472263d6b1a48924c623b56baa63b8d02b5350a63356ef6f336f"} err="failed to get container status \"d22af8bdb1c1472263d6b1a48924c623b56baa63b8d02b5350a63356ef6f336f\": rpc error: code = NotFound desc = could not find container \"d22af8bdb1c1472263d6b1a48924c623b56baa63b8d02b5350a63356ef6f336f\": container with ID starting with d22af8bdb1c1472263d6b1a48924c623b56baa63b8d02b5350a63356ef6f336f not found: ID does not exist" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.656774 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.685959 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.713252 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 23 16:14:44 crc kubenswrapper[5050]: E1123 16:14:44.722603 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2cb92ab-819c-437a-bbb2-d875552ba38f" containerName="nova-metadata-log" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.722650 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2cb92ab-819c-437a-bbb2-d875552ba38f" containerName="nova-metadata-log" Nov 23 16:14:44 crc kubenswrapper[5050]: E1123 16:14:44.722669 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a73e0803-4e30-45bd-9006-d229e8108430" containerName="nova-manage" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.722681 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="a73e0803-4e30-45bd-9006-d229e8108430" containerName="nova-manage" Nov 23 16:14:44 crc kubenswrapper[5050]: E1123 16:14:44.722710 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2cb92ab-819c-437a-bbb2-d875552ba38f" containerName="nova-metadata-metadata" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.722720 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2cb92ab-819c-437a-bbb2-d875552ba38f" containerName="nova-metadata-metadata" Nov 23 16:14:44 crc kubenswrapper[5050]: E1123 16:14:44.722753 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e49c51f-a38c-410a-98c1-39d92784d4eb" containerName="nova-api-log" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.722764 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e49c51f-a38c-410a-98c1-39d92784d4eb" containerName="nova-api-log" Nov 23 16:14:44 crc kubenswrapper[5050]: E1123 16:14:44.722784 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e49c51f-a38c-410a-98c1-39d92784d4eb" containerName="nova-api-api" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.722793 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e49c51f-a38c-410a-98c1-39d92784d4eb" containerName="nova-api-api" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.723138 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2cb92ab-819c-437a-bbb2-d875552ba38f" containerName="nova-metadata-metadata" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.723153 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="a73e0803-4e30-45bd-9006-d229e8108430" containerName="nova-manage" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.723164 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2cb92ab-819c-437a-bbb2-d875552ba38f" containerName="nova-metadata-log" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.723185 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e49c51f-a38c-410a-98c1-39d92784d4eb" containerName="nova-api-api" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.723198 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e49c51f-a38c-410a-98c1-39d92784d4eb" containerName="nova-api-log" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.727508 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.732791 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.739780 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.750825 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.761576 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.771574 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.773749 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.776938 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.781704 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.914836 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b43a331c-cdd6-41e1-b962-ce6709d1d200-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b43a331c-cdd6-41e1-b962-ce6709d1d200\") " pod="openstack/nova-api-0" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.915312 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5954b03a-534a-435b-9106-6be013bcecb1-config-data\") pod \"nova-metadata-0\" (UID: \"5954b03a-534a-435b-9106-6be013bcecb1\") " pod="openstack/nova-metadata-0" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.915491 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b43a331c-cdd6-41e1-b962-ce6709d1d200-config-data\") pod \"nova-api-0\" (UID: \"b43a331c-cdd6-41e1-b962-ce6709d1d200\") " pod="openstack/nova-api-0" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.915639 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnjnv\" (UniqueName: \"kubernetes.io/projected/b43a331c-cdd6-41e1-b962-ce6709d1d200-kube-api-access-cnjnv\") pod \"nova-api-0\" (UID: \"b43a331c-cdd6-41e1-b962-ce6709d1d200\") " pod="openstack/nova-api-0" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.915703 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brkct\" (UniqueName: \"kubernetes.io/projected/5954b03a-534a-435b-9106-6be013bcecb1-kube-api-access-brkct\") pod \"nova-metadata-0\" (UID: \"5954b03a-534a-435b-9106-6be013bcecb1\") " pod="openstack/nova-metadata-0" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.915812 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5954b03a-534a-435b-9106-6be013bcecb1-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"5954b03a-534a-435b-9106-6be013bcecb1\") " pod="openstack/nova-metadata-0" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.916055 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b43a331c-cdd6-41e1-b962-ce6709d1d200-logs\") pod \"nova-api-0\" (UID: \"b43a331c-cdd6-41e1-b962-ce6709d1d200\") " pod="openstack/nova-api-0" Nov 23 16:14:44 crc kubenswrapper[5050]: I1123 16:14:44.916117 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5954b03a-534a-435b-9106-6be013bcecb1-logs\") pod \"nova-metadata-0\" (UID: \"5954b03a-534a-435b-9106-6be013bcecb1\") " pod="openstack/nova-metadata-0" Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.019420 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5954b03a-534a-435b-9106-6be013bcecb1-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"5954b03a-534a-435b-9106-6be013bcecb1\") " pod="openstack/nova-metadata-0" Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.019650 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b43a331c-cdd6-41e1-b962-ce6709d1d200-logs\") pod \"nova-api-0\" (UID: \"b43a331c-cdd6-41e1-b962-ce6709d1d200\") " pod="openstack/nova-api-0" Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.019843 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5954b03a-534a-435b-9106-6be013bcecb1-logs\") pod \"nova-metadata-0\" (UID: \"5954b03a-534a-435b-9106-6be013bcecb1\") " pod="openstack/nova-metadata-0" Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.019961 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b43a331c-cdd6-41e1-b962-ce6709d1d200-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b43a331c-cdd6-41e1-b962-ce6709d1d200\") " pod="openstack/nova-api-0" Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.020145 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5954b03a-534a-435b-9106-6be013bcecb1-config-data\") pod \"nova-metadata-0\" (UID: \"5954b03a-534a-435b-9106-6be013bcecb1\") " pod="openstack/nova-metadata-0" Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.020329 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b43a331c-cdd6-41e1-b962-ce6709d1d200-config-data\") pod \"nova-api-0\" (UID: \"b43a331c-cdd6-41e1-b962-ce6709d1d200\") " pod="openstack/nova-api-0" Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.020510 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnjnv\" (UniqueName: \"kubernetes.io/projected/b43a331c-cdd6-41e1-b962-ce6709d1d200-kube-api-access-cnjnv\") pod \"nova-api-0\" (UID: \"b43a331c-cdd6-41e1-b962-ce6709d1d200\") " pod="openstack/nova-api-0" Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.020617 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brkct\" (UniqueName: \"kubernetes.io/projected/5954b03a-534a-435b-9106-6be013bcecb1-kube-api-access-brkct\") pod \"nova-metadata-0\" (UID: \"5954b03a-534a-435b-9106-6be013bcecb1\") " pod="openstack/nova-metadata-0" Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.021206 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5954b03a-534a-435b-9106-6be013bcecb1-logs\") pod \"nova-metadata-0\" (UID: \"5954b03a-534a-435b-9106-6be013bcecb1\") " pod="openstack/nova-metadata-0" Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.021552 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b43a331c-cdd6-41e1-b962-ce6709d1d200-logs\") pod \"nova-api-0\" (UID: \"b43a331c-cdd6-41e1-b962-ce6709d1d200\") " pod="openstack/nova-api-0" Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.024753 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5954b03a-534a-435b-9106-6be013bcecb1-config-data\") pod \"nova-metadata-0\" (UID: \"5954b03a-534a-435b-9106-6be013bcecb1\") " pod="openstack/nova-metadata-0" Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.025149 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b43a331c-cdd6-41e1-b962-ce6709d1d200-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b43a331c-cdd6-41e1-b962-ce6709d1d200\") " pod="openstack/nova-api-0" Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.025499 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5954b03a-534a-435b-9106-6be013bcecb1-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"5954b03a-534a-435b-9106-6be013bcecb1\") " pod="openstack/nova-metadata-0" Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.030389 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b43a331c-cdd6-41e1-b962-ce6709d1d200-config-data\") pod \"nova-api-0\" (UID: \"b43a331c-cdd6-41e1-b962-ce6709d1d200\") " pod="openstack/nova-api-0" Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.041586 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnjnv\" (UniqueName: \"kubernetes.io/projected/b43a331c-cdd6-41e1-b962-ce6709d1d200-kube-api-access-cnjnv\") pod \"nova-api-0\" (UID: \"b43a331c-cdd6-41e1-b962-ce6709d1d200\") " pod="openstack/nova-api-0" Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.044007 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brkct\" (UniqueName: \"kubernetes.io/projected/5954b03a-534a-435b-9106-6be013bcecb1-kube-api-access-brkct\") pod \"nova-metadata-0\" (UID: \"5954b03a-534a-435b-9106-6be013bcecb1\") " pod="openstack/nova-metadata-0" Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.071682 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.103549 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.571850 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e49c51f-a38c-410a-98c1-39d92784d4eb" path="/var/lib/kubelet/pods/1e49c51f-a38c-410a-98c1-39d92784d4eb/volumes" Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.573422 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2cb92ab-819c-437a-bbb2-d875552ba38f" path="/var/lib/kubelet/pods/c2cb92ab-819c-437a-bbb2-d875552ba38f/volumes" Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.618468 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 16:14:45 crc kubenswrapper[5050]: W1123 16:14:45.626662 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5954b03a_534a_435b_9106_6be013bcecb1.slice/crio-fbb088b25078e73453fa6dcb49f54ba18565a6b4724c918713c41effe5074752 WatchSource:0}: Error finding container fbb088b25078e73453fa6dcb49f54ba18565a6b4724c918713c41effe5074752: Status 404 returned error can't find the container with id fbb088b25078e73453fa6dcb49f54ba18565a6b4724c918713c41effe5074752 Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.666699 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 23 16:14:45 crc kubenswrapper[5050]: W1123 16:14:45.675988 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb43a331c_cdd6_41e1_b962_ce6709d1d200.slice/crio-572e23ba368571fe4e94de8c03b9fbbef2b79c6bde612d9c523c56b9ebf03a45 WatchSource:0}: Error finding container 572e23ba368571fe4e94de8c03b9fbbef2b79c6bde612d9c523c56b9ebf03a45: Status 404 returned error can't find the container with id 572e23ba368571fe4e94de8c03b9fbbef2b79c6bde612d9c523c56b9ebf03a45 Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.840457 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.944353 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad227747-4a44-4fc2-be9e-64f911bdd923-config-data\") pod \"ad227747-4a44-4fc2-be9e-64f911bdd923\" (UID: \"ad227747-4a44-4fc2-be9e-64f911bdd923\") " Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.944476 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gshkj\" (UniqueName: \"kubernetes.io/projected/ad227747-4a44-4fc2-be9e-64f911bdd923-kube-api-access-gshkj\") pod \"ad227747-4a44-4fc2-be9e-64f911bdd923\" (UID: \"ad227747-4a44-4fc2-be9e-64f911bdd923\") " Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.944731 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad227747-4a44-4fc2-be9e-64f911bdd923-combined-ca-bundle\") pod \"ad227747-4a44-4fc2-be9e-64f911bdd923\" (UID: \"ad227747-4a44-4fc2-be9e-64f911bdd923\") " Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.949472 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad227747-4a44-4fc2-be9e-64f911bdd923-kube-api-access-gshkj" (OuterVolumeSpecName: "kube-api-access-gshkj") pod "ad227747-4a44-4fc2-be9e-64f911bdd923" (UID: "ad227747-4a44-4fc2-be9e-64f911bdd923"). InnerVolumeSpecName "kube-api-access-gshkj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.974706 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad227747-4a44-4fc2-be9e-64f911bdd923-config-data" (OuterVolumeSpecName: "config-data") pod "ad227747-4a44-4fc2-be9e-64f911bdd923" (UID: "ad227747-4a44-4fc2-be9e-64f911bdd923"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:14:45 crc kubenswrapper[5050]: I1123 16:14:45.987728 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad227747-4a44-4fc2-be9e-64f911bdd923-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ad227747-4a44-4fc2-be9e-64f911bdd923" (UID: "ad227747-4a44-4fc2-be9e-64f911bdd923"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.048901 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gshkj\" (UniqueName: \"kubernetes.io/projected/ad227747-4a44-4fc2-be9e-64f911bdd923-kube-api-access-gshkj\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.049028 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad227747-4a44-4fc2-be9e-64f911bdd923-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.049042 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad227747-4a44-4fc2-be9e-64f911bdd923-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.339484 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b43a331c-cdd6-41e1-b962-ce6709d1d200","Type":"ContainerStarted","Data":"c8d373669db01fc469a20d7444ad7a4a2c3381438472cad0965a20c8cd5d7ce2"} Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.340051 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b43a331c-cdd6-41e1-b962-ce6709d1d200","Type":"ContainerStarted","Data":"30e11a93d6fdc77b1c2a02e908870e7098aaf58f6b99802562946afc4bfd9c26"} Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.340138 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b43a331c-cdd6-41e1-b962-ce6709d1d200","Type":"ContainerStarted","Data":"572e23ba368571fe4e94de8c03b9fbbef2b79c6bde612d9c523c56b9ebf03a45"} Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.348615 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"5954b03a-534a-435b-9106-6be013bcecb1","Type":"ContainerStarted","Data":"5eb68b9a5c49d7f12e9db73e6ee87d20414540759fb2229375f7093e6a977ce6"} Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.348698 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"5954b03a-534a-435b-9106-6be013bcecb1","Type":"ContainerStarted","Data":"53b3986dc513c9d1d812b25c41e5a8c50ac02790e4aa17daeeafab7617edf16d"} Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.348714 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"5954b03a-534a-435b-9106-6be013bcecb1","Type":"ContainerStarted","Data":"fbb088b25078e73453fa6dcb49f54ba18565a6b4724c918713c41effe5074752"} Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.352747 5050 generic.go:334] "Generic (PLEG): container finished" podID="ad227747-4a44-4fc2-be9e-64f911bdd923" containerID="c6bb79375086dd527994f11bb4946119ce1e75f49d786a78d088d1fc09d0bf0b" exitCode=0 Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.352818 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ad227747-4a44-4fc2-be9e-64f911bdd923","Type":"ContainerDied","Data":"c6bb79375086dd527994f11bb4946119ce1e75f49d786a78d088d1fc09d0bf0b"} Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.352857 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ad227747-4a44-4fc2-be9e-64f911bdd923","Type":"ContainerDied","Data":"db6a6cfd6f5dbb494b99ec28d0eef4d8c69c0df418eb4e981e35b493f3bfa4cf"} Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.352882 5050 scope.go:117] "RemoveContainer" containerID="c6bb79375086dd527994f11bb4946119ce1e75f49d786a78d088d1fc09d0bf0b" Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.353066 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.377577 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.377553736 podStartE2EDuration="2.377553736s" podCreationTimestamp="2025-11-23 16:14:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:14:46.364212769 +0000 UTC m=+5581.531209254" watchObservedRunningTime="2025-11-23 16:14:46.377553736 +0000 UTC m=+5581.544550221" Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.389321 5050 scope.go:117] "RemoveContainer" containerID="c6bb79375086dd527994f11bb4946119ce1e75f49d786a78d088d1fc09d0bf0b" Nov 23 16:14:46 crc kubenswrapper[5050]: E1123 16:14:46.390657 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6bb79375086dd527994f11bb4946119ce1e75f49d786a78d088d1fc09d0bf0b\": container with ID starting with c6bb79375086dd527994f11bb4946119ce1e75f49d786a78d088d1fc09d0bf0b not found: ID does not exist" containerID="c6bb79375086dd527994f11bb4946119ce1e75f49d786a78d088d1fc09d0bf0b" Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.390733 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6bb79375086dd527994f11bb4946119ce1e75f49d786a78d088d1fc09d0bf0b"} err="failed to get container status \"c6bb79375086dd527994f11bb4946119ce1e75f49d786a78d088d1fc09d0bf0b\": rpc error: code = NotFound desc = could not find container \"c6bb79375086dd527994f11bb4946119ce1e75f49d786a78d088d1fc09d0bf0b\": container with ID starting with c6bb79375086dd527994f11bb4946119ce1e75f49d786a78d088d1fc09d0bf0b not found: ID does not exist" Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.415026 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.414994783 podStartE2EDuration="2.414994783s" podCreationTimestamp="2025-11-23 16:14:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:14:46.390517172 +0000 UTC m=+5581.557513667" watchObservedRunningTime="2025-11-23 16:14:46.414994783 +0000 UTC m=+5581.581991268" Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.434539 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.469234 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.481213 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 16:14:46 crc kubenswrapper[5050]: E1123 16:14:46.481887 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad227747-4a44-4fc2-be9e-64f911bdd923" containerName="nova-scheduler-scheduler" Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.481918 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad227747-4a44-4fc2-be9e-64f911bdd923" containerName="nova-scheduler-scheduler" Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.482138 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad227747-4a44-4fc2-be9e-64f911bdd923" containerName="nova-scheduler-scheduler" Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.482912 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.485133 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.496196 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.559462 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18561cc9-8520-4621-a8c5-4efd4196a100-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"18561cc9-8520-4621-a8c5-4efd4196a100\") " pod="openstack/nova-scheduler-0" Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.559549 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18561cc9-8520-4621-a8c5-4efd4196a100-config-data\") pod \"nova-scheduler-0\" (UID: \"18561cc9-8520-4621-a8c5-4efd4196a100\") " pod="openstack/nova-scheduler-0" Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.559577 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tbqkf\" (UniqueName: \"kubernetes.io/projected/18561cc9-8520-4621-a8c5-4efd4196a100-kube-api-access-tbqkf\") pod \"nova-scheduler-0\" (UID: \"18561cc9-8520-4621-a8c5-4efd4196a100\") " pod="openstack/nova-scheduler-0" Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.662106 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18561cc9-8520-4621-a8c5-4efd4196a100-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"18561cc9-8520-4621-a8c5-4efd4196a100\") " pod="openstack/nova-scheduler-0" Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.662243 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18561cc9-8520-4621-a8c5-4efd4196a100-config-data\") pod \"nova-scheduler-0\" (UID: \"18561cc9-8520-4621-a8c5-4efd4196a100\") " pod="openstack/nova-scheduler-0" Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.662320 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tbqkf\" (UniqueName: \"kubernetes.io/projected/18561cc9-8520-4621-a8c5-4efd4196a100-kube-api-access-tbqkf\") pod \"nova-scheduler-0\" (UID: \"18561cc9-8520-4621-a8c5-4efd4196a100\") " pod="openstack/nova-scheduler-0" Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.668849 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18561cc9-8520-4621-a8c5-4efd4196a100-config-data\") pod \"nova-scheduler-0\" (UID: \"18561cc9-8520-4621-a8c5-4efd4196a100\") " pod="openstack/nova-scheduler-0" Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.669701 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18561cc9-8520-4621-a8c5-4efd4196a100-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"18561cc9-8520-4621-a8c5-4efd4196a100\") " pod="openstack/nova-scheduler-0" Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.682575 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tbqkf\" (UniqueName: \"kubernetes.io/projected/18561cc9-8520-4621-a8c5-4efd4196a100-kube-api-access-tbqkf\") pod \"nova-scheduler-0\" (UID: \"18561cc9-8520-4621-a8c5-4efd4196a100\") " pod="openstack/nova-scheduler-0" Nov 23 16:14:46 crc kubenswrapper[5050]: I1123 16:14:46.804246 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 16:14:47 crc kubenswrapper[5050]: I1123 16:14:47.116940 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 16:14:47 crc kubenswrapper[5050]: I1123 16:14:47.368107 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"18561cc9-8520-4621-a8c5-4efd4196a100","Type":"ContainerStarted","Data":"8b23181638ed02d1f1292a69ecd2c43ef106cacf9c533b6269a8579572a11477"} Nov 23 16:14:47 crc kubenswrapper[5050]: I1123 16:14:47.368151 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"18561cc9-8520-4621-a8c5-4efd4196a100","Type":"ContainerStarted","Data":"a13026cd01b98ad59f649726bb7609800d6af3fa3f925e5054cc8407c6ba44c9"} Nov 23 16:14:47 crc kubenswrapper[5050]: I1123 16:14:47.571254 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad227747-4a44-4fc2-be9e-64f911bdd923" path="/var/lib/kubelet/pods/ad227747-4a44-4fc2-be9e-64f911bdd923/volumes" Nov 23 16:14:50 crc kubenswrapper[5050]: I1123 16:14:50.073528 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 23 16:14:50 crc kubenswrapper[5050]: I1123 16:14:50.075877 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 23 16:14:51 crc kubenswrapper[5050]: I1123 16:14:51.804564 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 23 16:14:55 crc kubenswrapper[5050]: I1123 16:14:55.073074 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 23 16:14:55 crc kubenswrapper[5050]: I1123 16:14:55.073622 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 23 16:14:55 crc kubenswrapper[5050]: I1123 16:14:55.105667 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 23 16:14:55 crc kubenswrapper[5050]: I1123 16:14:55.105774 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 23 16:14:56 crc kubenswrapper[5050]: I1123 16:14:56.114906 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="5954b03a-534a-435b-9106-6be013bcecb1" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.70:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 23 16:14:56 crc kubenswrapper[5050]: I1123 16:14:56.238069 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b43a331c-cdd6-41e1-b962-ce6709d1d200" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.71:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 23 16:14:56 crc kubenswrapper[5050]: I1123 16:14:56.238096 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="5954b03a-534a-435b-9106-6be013bcecb1" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.70:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 23 16:14:56 crc kubenswrapper[5050]: I1123 16:14:56.238622 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b43a331c-cdd6-41e1-b962-ce6709d1d200" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.71:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 23 16:14:56 crc kubenswrapper[5050]: I1123 16:14:56.804902 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 23 16:14:56 crc kubenswrapper[5050]: I1123 16:14:56.835597 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 23 16:14:56 crc kubenswrapper[5050]: I1123 16:14:56.865482 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=10.86545947 podStartE2EDuration="10.86545947s" podCreationTimestamp="2025-11-23 16:14:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:14:47.39332736 +0000 UTC m=+5582.560323845" watchObservedRunningTime="2025-11-23 16:14:56.86545947 +0000 UTC m=+5592.032455955" Nov 23 16:14:57 crc kubenswrapper[5050]: I1123 16:14:57.569667 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 23 16:14:59 crc kubenswrapper[5050]: I1123 16:14:59.224688 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:14:59 crc kubenswrapper[5050]: I1123 16:14:59.225231 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:14:59 crc kubenswrapper[5050]: I1123 16:14:59.225307 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 16:14:59 crc kubenswrapper[5050]: I1123 16:14:59.226800 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 16:14:59 crc kubenswrapper[5050]: I1123 16:14:59.226913 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00" gracePeriod=600 Nov 23 16:14:59 crc kubenswrapper[5050]: E1123 16:14:59.365774 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:14:59 crc kubenswrapper[5050]: I1123 16:14:59.552578 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00" exitCode=0 Nov 23 16:14:59 crc kubenswrapper[5050]: I1123 16:14:59.562586 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00"} Nov 23 16:14:59 crc kubenswrapper[5050]: I1123 16:14:59.562659 5050 scope.go:117] "RemoveContainer" containerID="b87f9772414b4630105844191caea96d4b6e03191eb9f84073781caf3da21f1e" Nov 23 16:14:59 crc kubenswrapper[5050]: I1123 16:14:59.563716 5050 scope.go:117] "RemoveContainer" containerID="131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00" Nov 23 16:14:59 crc kubenswrapper[5050]: E1123 16:14:59.564154 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:15:00 crc kubenswrapper[5050]: I1123 16:15:00.152432 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398575-jcbhs"] Nov 23 16:15:00 crc kubenswrapper[5050]: I1123 16:15:00.155241 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398575-jcbhs" Nov 23 16:15:00 crc kubenswrapper[5050]: I1123 16:15:00.162897 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 23 16:15:00 crc kubenswrapper[5050]: I1123 16:15:00.165126 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 23 16:15:00 crc kubenswrapper[5050]: I1123 16:15:00.171291 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398575-jcbhs"] Nov 23 16:15:00 crc kubenswrapper[5050]: I1123 16:15:00.288520 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6f91221e-c49b-4426-8db5-94a008eb456d-secret-volume\") pod \"collect-profiles-29398575-jcbhs\" (UID: \"6f91221e-c49b-4426-8db5-94a008eb456d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398575-jcbhs" Nov 23 16:15:00 crc kubenswrapper[5050]: I1123 16:15:00.288743 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8hkwp\" (UniqueName: \"kubernetes.io/projected/6f91221e-c49b-4426-8db5-94a008eb456d-kube-api-access-8hkwp\") pod \"collect-profiles-29398575-jcbhs\" (UID: \"6f91221e-c49b-4426-8db5-94a008eb456d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398575-jcbhs" Nov 23 16:15:00 crc kubenswrapper[5050]: I1123 16:15:00.288827 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6f91221e-c49b-4426-8db5-94a008eb456d-config-volume\") pod \"collect-profiles-29398575-jcbhs\" (UID: \"6f91221e-c49b-4426-8db5-94a008eb456d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398575-jcbhs" Nov 23 16:15:00 crc kubenswrapper[5050]: I1123 16:15:00.390640 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8hkwp\" (UniqueName: \"kubernetes.io/projected/6f91221e-c49b-4426-8db5-94a008eb456d-kube-api-access-8hkwp\") pod \"collect-profiles-29398575-jcbhs\" (UID: \"6f91221e-c49b-4426-8db5-94a008eb456d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398575-jcbhs" Nov 23 16:15:00 crc kubenswrapper[5050]: I1123 16:15:00.390945 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6f91221e-c49b-4426-8db5-94a008eb456d-config-volume\") pod \"collect-profiles-29398575-jcbhs\" (UID: \"6f91221e-c49b-4426-8db5-94a008eb456d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398575-jcbhs" Nov 23 16:15:00 crc kubenswrapper[5050]: I1123 16:15:00.391027 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6f91221e-c49b-4426-8db5-94a008eb456d-secret-volume\") pod \"collect-profiles-29398575-jcbhs\" (UID: \"6f91221e-c49b-4426-8db5-94a008eb456d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398575-jcbhs" Nov 23 16:15:00 crc kubenswrapper[5050]: I1123 16:15:00.393507 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6f91221e-c49b-4426-8db5-94a008eb456d-config-volume\") pod \"collect-profiles-29398575-jcbhs\" (UID: \"6f91221e-c49b-4426-8db5-94a008eb456d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398575-jcbhs" Nov 23 16:15:00 crc kubenswrapper[5050]: I1123 16:15:00.404927 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6f91221e-c49b-4426-8db5-94a008eb456d-secret-volume\") pod \"collect-profiles-29398575-jcbhs\" (UID: \"6f91221e-c49b-4426-8db5-94a008eb456d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398575-jcbhs" Nov 23 16:15:00 crc kubenswrapper[5050]: I1123 16:15:00.423668 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8hkwp\" (UniqueName: \"kubernetes.io/projected/6f91221e-c49b-4426-8db5-94a008eb456d-kube-api-access-8hkwp\") pod \"collect-profiles-29398575-jcbhs\" (UID: \"6f91221e-c49b-4426-8db5-94a008eb456d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398575-jcbhs" Nov 23 16:15:00 crc kubenswrapper[5050]: I1123 16:15:00.476840 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398575-jcbhs" Nov 23 16:15:00 crc kubenswrapper[5050]: I1123 16:15:00.995206 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398575-jcbhs"] Nov 23 16:15:01 crc kubenswrapper[5050]: W1123 16:15:01.001138 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6f91221e_c49b_4426_8db5_94a008eb456d.slice/crio-6ee050a65241cafd185680171a271505efd09800ce8a6fb22457039f8cfc7582 WatchSource:0}: Error finding container 6ee050a65241cafd185680171a271505efd09800ce8a6fb22457039f8cfc7582: Status 404 returned error can't find the container with id 6ee050a65241cafd185680171a271505efd09800ce8a6fb22457039f8cfc7582 Nov 23 16:15:01 crc kubenswrapper[5050]: I1123 16:15:01.588800 5050 generic.go:334] "Generic (PLEG): container finished" podID="6f91221e-c49b-4426-8db5-94a008eb456d" containerID="82debed6bd07622fc11a45346590cf2ab8a1142f6542b5ef408f9b70aa6d77bc" exitCode=0 Nov 23 16:15:01 crc kubenswrapper[5050]: I1123 16:15:01.588875 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398575-jcbhs" event={"ID":"6f91221e-c49b-4426-8db5-94a008eb456d","Type":"ContainerDied","Data":"82debed6bd07622fc11a45346590cf2ab8a1142f6542b5ef408f9b70aa6d77bc"} Nov 23 16:15:01 crc kubenswrapper[5050]: I1123 16:15:01.590326 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398575-jcbhs" event={"ID":"6f91221e-c49b-4426-8db5-94a008eb456d","Type":"ContainerStarted","Data":"6ee050a65241cafd185680171a271505efd09800ce8a6fb22457039f8cfc7582"} Nov 23 16:15:03 crc kubenswrapper[5050]: I1123 16:15:03.025992 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398575-jcbhs" Nov 23 16:15:03 crc kubenswrapper[5050]: I1123 16:15:03.203063 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8hkwp\" (UniqueName: \"kubernetes.io/projected/6f91221e-c49b-4426-8db5-94a008eb456d-kube-api-access-8hkwp\") pod \"6f91221e-c49b-4426-8db5-94a008eb456d\" (UID: \"6f91221e-c49b-4426-8db5-94a008eb456d\") " Nov 23 16:15:03 crc kubenswrapper[5050]: I1123 16:15:03.203123 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6f91221e-c49b-4426-8db5-94a008eb456d-secret-volume\") pod \"6f91221e-c49b-4426-8db5-94a008eb456d\" (UID: \"6f91221e-c49b-4426-8db5-94a008eb456d\") " Nov 23 16:15:03 crc kubenswrapper[5050]: I1123 16:15:03.203335 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6f91221e-c49b-4426-8db5-94a008eb456d-config-volume\") pod \"6f91221e-c49b-4426-8db5-94a008eb456d\" (UID: \"6f91221e-c49b-4426-8db5-94a008eb456d\") " Nov 23 16:15:03 crc kubenswrapper[5050]: I1123 16:15:03.204411 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f91221e-c49b-4426-8db5-94a008eb456d-config-volume" (OuterVolumeSpecName: "config-volume") pod "6f91221e-c49b-4426-8db5-94a008eb456d" (UID: "6f91221e-c49b-4426-8db5-94a008eb456d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:15:03 crc kubenswrapper[5050]: I1123 16:15:03.211188 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f91221e-c49b-4426-8db5-94a008eb456d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "6f91221e-c49b-4426-8db5-94a008eb456d" (UID: "6f91221e-c49b-4426-8db5-94a008eb456d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:15:03 crc kubenswrapper[5050]: I1123 16:15:03.214347 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f91221e-c49b-4426-8db5-94a008eb456d-kube-api-access-8hkwp" (OuterVolumeSpecName: "kube-api-access-8hkwp") pod "6f91221e-c49b-4426-8db5-94a008eb456d" (UID: "6f91221e-c49b-4426-8db5-94a008eb456d"). InnerVolumeSpecName "kube-api-access-8hkwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:15:03 crc kubenswrapper[5050]: I1123 16:15:03.306075 5050 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6f91221e-c49b-4426-8db5-94a008eb456d-config-volume\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:03 crc kubenswrapper[5050]: I1123 16:15:03.306139 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8hkwp\" (UniqueName: \"kubernetes.io/projected/6f91221e-c49b-4426-8db5-94a008eb456d-kube-api-access-8hkwp\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:03 crc kubenswrapper[5050]: I1123 16:15:03.306161 5050 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6f91221e-c49b-4426-8db5-94a008eb456d-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:03 crc kubenswrapper[5050]: I1123 16:15:03.617520 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398575-jcbhs" event={"ID":"6f91221e-c49b-4426-8db5-94a008eb456d","Type":"ContainerDied","Data":"6ee050a65241cafd185680171a271505efd09800ce8a6fb22457039f8cfc7582"} Nov 23 16:15:03 crc kubenswrapper[5050]: I1123 16:15:03.617575 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6ee050a65241cafd185680171a271505efd09800ce8a6fb22457039f8cfc7582" Nov 23 16:15:03 crc kubenswrapper[5050]: I1123 16:15:03.617624 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398575-jcbhs" Nov 23 16:15:04 crc kubenswrapper[5050]: I1123 16:15:04.146253 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398530-vhkcp"] Nov 23 16:15:04 crc kubenswrapper[5050]: I1123 16:15:04.159249 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398530-vhkcp"] Nov 23 16:15:05 crc kubenswrapper[5050]: I1123 16:15:05.076134 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 23 16:15:05 crc kubenswrapper[5050]: I1123 16:15:05.076275 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 23 16:15:05 crc kubenswrapper[5050]: I1123 16:15:05.079849 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 23 16:15:05 crc kubenswrapper[5050]: I1123 16:15:05.083185 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 23 16:15:05 crc kubenswrapper[5050]: I1123 16:15:05.119080 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 23 16:15:05 crc kubenswrapper[5050]: I1123 16:15:05.119677 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 23 16:15:05 crc kubenswrapper[5050]: I1123 16:15:05.119753 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 23 16:15:05 crc kubenswrapper[5050]: I1123 16:15:05.129721 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 23 16:15:05 crc kubenswrapper[5050]: I1123 16:15:05.563879 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="76cd80a6-c49c-4663-8483-3c144a527de9" path="/var/lib/kubelet/pods/76cd80a6-c49c-4663-8483-3c144a527de9/volumes" Nov 23 16:15:05 crc kubenswrapper[5050]: I1123 16:15:05.644411 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 23 16:15:05 crc kubenswrapper[5050]: I1123 16:15:05.660599 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 23 16:15:05 crc kubenswrapper[5050]: I1123 16:15:05.899288 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-c57cbf6dc-hspdk"] Nov 23 16:15:05 crc kubenswrapper[5050]: E1123 16:15:05.899826 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f91221e-c49b-4426-8db5-94a008eb456d" containerName="collect-profiles" Nov 23 16:15:05 crc kubenswrapper[5050]: I1123 16:15:05.899845 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f91221e-c49b-4426-8db5-94a008eb456d" containerName="collect-profiles" Nov 23 16:15:05 crc kubenswrapper[5050]: I1123 16:15:05.900053 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f91221e-c49b-4426-8db5-94a008eb456d" containerName="collect-profiles" Nov 23 16:15:05 crc kubenswrapper[5050]: I1123 16:15:05.901190 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" Nov 23 16:15:05 crc kubenswrapper[5050]: I1123 16:15:05.924612 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-c57cbf6dc-hspdk"] Nov 23 16:15:05 crc kubenswrapper[5050]: I1123 16:15:05.983084 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/10fbcf8c-4e29-4982-adf6-5d9240dc4293-ovsdbserver-nb\") pod \"dnsmasq-dns-c57cbf6dc-hspdk\" (UID: \"10fbcf8c-4e29-4982-adf6-5d9240dc4293\") " pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" Nov 23 16:15:05 crc kubenswrapper[5050]: I1123 16:15:05.983164 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/10fbcf8c-4e29-4982-adf6-5d9240dc4293-ovsdbserver-sb\") pod \"dnsmasq-dns-c57cbf6dc-hspdk\" (UID: \"10fbcf8c-4e29-4982-adf6-5d9240dc4293\") " pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" Nov 23 16:15:05 crc kubenswrapper[5050]: I1123 16:15:05.983286 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10fbcf8c-4e29-4982-adf6-5d9240dc4293-config\") pod \"dnsmasq-dns-c57cbf6dc-hspdk\" (UID: \"10fbcf8c-4e29-4982-adf6-5d9240dc4293\") " pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" Nov 23 16:15:05 crc kubenswrapper[5050]: I1123 16:15:05.983685 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7l8n\" (UniqueName: \"kubernetes.io/projected/10fbcf8c-4e29-4982-adf6-5d9240dc4293-kube-api-access-p7l8n\") pod \"dnsmasq-dns-c57cbf6dc-hspdk\" (UID: \"10fbcf8c-4e29-4982-adf6-5d9240dc4293\") " pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" Nov 23 16:15:05 crc kubenswrapper[5050]: I1123 16:15:05.983805 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/10fbcf8c-4e29-4982-adf6-5d9240dc4293-dns-svc\") pod \"dnsmasq-dns-c57cbf6dc-hspdk\" (UID: \"10fbcf8c-4e29-4982-adf6-5d9240dc4293\") " pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" Nov 23 16:15:06 crc kubenswrapper[5050]: I1123 16:15:06.086716 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10fbcf8c-4e29-4982-adf6-5d9240dc4293-config\") pod \"dnsmasq-dns-c57cbf6dc-hspdk\" (UID: \"10fbcf8c-4e29-4982-adf6-5d9240dc4293\") " pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" Nov 23 16:15:06 crc kubenswrapper[5050]: I1123 16:15:06.086891 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7l8n\" (UniqueName: \"kubernetes.io/projected/10fbcf8c-4e29-4982-adf6-5d9240dc4293-kube-api-access-p7l8n\") pod \"dnsmasq-dns-c57cbf6dc-hspdk\" (UID: \"10fbcf8c-4e29-4982-adf6-5d9240dc4293\") " pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" Nov 23 16:15:06 crc kubenswrapper[5050]: I1123 16:15:06.086938 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/10fbcf8c-4e29-4982-adf6-5d9240dc4293-dns-svc\") pod \"dnsmasq-dns-c57cbf6dc-hspdk\" (UID: \"10fbcf8c-4e29-4982-adf6-5d9240dc4293\") " pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" Nov 23 16:15:06 crc kubenswrapper[5050]: I1123 16:15:06.087013 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/10fbcf8c-4e29-4982-adf6-5d9240dc4293-ovsdbserver-nb\") pod \"dnsmasq-dns-c57cbf6dc-hspdk\" (UID: \"10fbcf8c-4e29-4982-adf6-5d9240dc4293\") " pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" Nov 23 16:15:06 crc kubenswrapper[5050]: I1123 16:15:06.087041 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/10fbcf8c-4e29-4982-adf6-5d9240dc4293-ovsdbserver-sb\") pod \"dnsmasq-dns-c57cbf6dc-hspdk\" (UID: \"10fbcf8c-4e29-4982-adf6-5d9240dc4293\") " pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" Nov 23 16:15:06 crc kubenswrapper[5050]: I1123 16:15:06.088204 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/10fbcf8c-4e29-4982-adf6-5d9240dc4293-ovsdbserver-sb\") pod \"dnsmasq-dns-c57cbf6dc-hspdk\" (UID: \"10fbcf8c-4e29-4982-adf6-5d9240dc4293\") " pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" Nov 23 16:15:06 crc kubenswrapper[5050]: I1123 16:15:06.089289 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/10fbcf8c-4e29-4982-adf6-5d9240dc4293-dns-svc\") pod \"dnsmasq-dns-c57cbf6dc-hspdk\" (UID: \"10fbcf8c-4e29-4982-adf6-5d9240dc4293\") " pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" Nov 23 16:15:06 crc kubenswrapper[5050]: I1123 16:15:06.089423 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/10fbcf8c-4e29-4982-adf6-5d9240dc4293-ovsdbserver-nb\") pod \"dnsmasq-dns-c57cbf6dc-hspdk\" (UID: \"10fbcf8c-4e29-4982-adf6-5d9240dc4293\") " pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" Nov 23 16:15:06 crc kubenswrapper[5050]: I1123 16:15:06.089845 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10fbcf8c-4e29-4982-adf6-5d9240dc4293-config\") pod \"dnsmasq-dns-c57cbf6dc-hspdk\" (UID: \"10fbcf8c-4e29-4982-adf6-5d9240dc4293\") " pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" Nov 23 16:15:06 crc kubenswrapper[5050]: I1123 16:15:06.110713 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7l8n\" (UniqueName: \"kubernetes.io/projected/10fbcf8c-4e29-4982-adf6-5d9240dc4293-kube-api-access-p7l8n\") pod \"dnsmasq-dns-c57cbf6dc-hspdk\" (UID: \"10fbcf8c-4e29-4982-adf6-5d9240dc4293\") " pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" Nov 23 16:15:06 crc kubenswrapper[5050]: I1123 16:15:06.225101 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" Nov 23 16:15:06 crc kubenswrapper[5050]: I1123 16:15:06.603730 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-c57cbf6dc-hspdk"] Nov 23 16:15:06 crc kubenswrapper[5050]: I1123 16:15:06.661726 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" event={"ID":"10fbcf8c-4e29-4982-adf6-5d9240dc4293","Type":"ContainerStarted","Data":"58c9db494fac2369e87feb73c52f5d5ba7f69b383a64307cb6d475faed9ea418"} Nov 23 16:15:07 crc kubenswrapper[5050]: I1123 16:15:07.567407 5050 scope.go:117] "RemoveContainer" containerID="7169d7655d76cd53ed06613f891f4123bd166ef3ddb806df5a4673f855d62369" Nov 23 16:15:07 crc kubenswrapper[5050]: I1123 16:15:07.674116 5050 generic.go:334] "Generic (PLEG): container finished" podID="10fbcf8c-4e29-4982-adf6-5d9240dc4293" containerID="5a89951850408cd8024ff1744f69ceab8edac64952afa6e849cfa98fb783e273" exitCode=0 Nov 23 16:15:07 crc kubenswrapper[5050]: I1123 16:15:07.674202 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" event={"ID":"10fbcf8c-4e29-4982-adf6-5d9240dc4293","Type":"ContainerDied","Data":"5a89951850408cd8024ff1744f69ceab8edac64952afa6e849cfa98fb783e273"} Nov 23 16:15:08 crc kubenswrapper[5050]: I1123 16:15:08.692226 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" event={"ID":"10fbcf8c-4e29-4982-adf6-5d9240dc4293","Type":"ContainerStarted","Data":"35cdde4c34663833cf2bdc0774d1abc0d1adfa4ab8d562168e173aaf0697119d"} Nov 23 16:15:08 crc kubenswrapper[5050]: I1123 16:15:08.692766 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" Nov 23 16:15:08 crc kubenswrapper[5050]: I1123 16:15:08.722000 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" podStartSLOduration=3.721967159 podStartE2EDuration="3.721967159s" podCreationTimestamp="2025-11-23 16:15:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:15:08.71670288 +0000 UTC m=+5603.883699365" watchObservedRunningTime="2025-11-23 16:15:08.721967159 +0000 UTC m=+5603.888963654" Nov 23 16:15:13 crc kubenswrapper[5050]: I1123 16:15:13.549134 5050 scope.go:117] "RemoveContainer" containerID="131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00" Nov 23 16:15:13 crc kubenswrapper[5050]: E1123 16:15:13.550106 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:15:16 crc kubenswrapper[5050]: I1123 16:15:16.228326 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" Nov 23 16:15:16 crc kubenswrapper[5050]: I1123 16:15:16.325985 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7786bd4bc7-kqvfg"] Nov 23 16:15:16 crc kubenswrapper[5050]: I1123 16:15:16.326677 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" podUID="d01a8a6c-70b5-47ec-a11b-303c4cb986cd" containerName="dnsmasq-dns" containerID="cri-o://6cd0583fd5de83374a868ba75fe75fbfe9a69bf8ab41e6a6bf694bf3c2227515" gracePeriod=10 Nov 23 16:15:16 crc kubenswrapper[5050]: I1123 16:15:16.823797 5050 generic.go:334] "Generic (PLEG): container finished" podID="d01a8a6c-70b5-47ec-a11b-303c4cb986cd" containerID="6cd0583fd5de83374a868ba75fe75fbfe9a69bf8ab41e6a6bf694bf3c2227515" exitCode=0 Nov 23 16:15:16 crc kubenswrapper[5050]: I1123 16:15:16.823856 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" event={"ID":"d01a8a6c-70b5-47ec-a11b-303c4cb986cd","Type":"ContainerDied","Data":"6cd0583fd5de83374a868ba75fe75fbfe9a69bf8ab41e6a6bf694bf3c2227515"} Nov 23 16:15:16 crc kubenswrapper[5050]: I1123 16:15:16.924662 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" Nov 23 16:15:17 crc kubenswrapper[5050]: I1123 16:15:17.038459 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-config\") pod \"d01a8a6c-70b5-47ec-a11b-303c4cb986cd\" (UID: \"d01a8a6c-70b5-47ec-a11b-303c4cb986cd\") " Nov 23 16:15:17 crc kubenswrapper[5050]: I1123 16:15:17.038562 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4xzhz\" (UniqueName: \"kubernetes.io/projected/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-kube-api-access-4xzhz\") pod \"d01a8a6c-70b5-47ec-a11b-303c4cb986cd\" (UID: \"d01a8a6c-70b5-47ec-a11b-303c4cb986cd\") " Nov 23 16:15:17 crc kubenswrapper[5050]: I1123 16:15:17.038640 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-ovsdbserver-sb\") pod \"d01a8a6c-70b5-47ec-a11b-303c4cb986cd\" (UID: \"d01a8a6c-70b5-47ec-a11b-303c4cb986cd\") " Nov 23 16:15:17 crc kubenswrapper[5050]: I1123 16:15:17.038765 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-dns-svc\") pod \"d01a8a6c-70b5-47ec-a11b-303c4cb986cd\" (UID: \"d01a8a6c-70b5-47ec-a11b-303c4cb986cd\") " Nov 23 16:15:17 crc kubenswrapper[5050]: I1123 16:15:17.038796 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-ovsdbserver-nb\") pod \"d01a8a6c-70b5-47ec-a11b-303c4cb986cd\" (UID: \"d01a8a6c-70b5-47ec-a11b-303c4cb986cd\") " Nov 23 16:15:17 crc kubenswrapper[5050]: I1123 16:15:17.049650 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-kube-api-access-4xzhz" (OuterVolumeSpecName: "kube-api-access-4xzhz") pod "d01a8a6c-70b5-47ec-a11b-303c4cb986cd" (UID: "d01a8a6c-70b5-47ec-a11b-303c4cb986cd"). InnerVolumeSpecName "kube-api-access-4xzhz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:15:17 crc kubenswrapper[5050]: I1123 16:15:17.090388 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d01a8a6c-70b5-47ec-a11b-303c4cb986cd" (UID: "d01a8a6c-70b5-47ec-a11b-303c4cb986cd"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:15:17 crc kubenswrapper[5050]: I1123 16:15:17.098641 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d01a8a6c-70b5-47ec-a11b-303c4cb986cd" (UID: "d01a8a6c-70b5-47ec-a11b-303c4cb986cd"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:15:17 crc kubenswrapper[5050]: I1123 16:15:17.100305 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d01a8a6c-70b5-47ec-a11b-303c4cb986cd" (UID: "d01a8a6c-70b5-47ec-a11b-303c4cb986cd"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:15:17 crc kubenswrapper[5050]: I1123 16:15:17.104069 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-config" (OuterVolumeSpecName: "config") pod "d01a8a6c-70b5-47ec-a11b-303c4cb986cd" (UID: "d01a8a6c-70b5-47ec-a11b-303c4cb986cd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:15:17 crc kubenswrapper[5050]: I1123 16:15:17.142138 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-config\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:17 crc kubenswrapper[5050]: I1123 16:15:17.142220 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4xzhz\" (UniqueName: \"kubernetes.io/projected/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-kube-api-access-4xzhz\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:17 crc kubenswrapper[5050]: I1123 16:15:17.142242 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:17 crc kubenswrapper[5050]: I1123 16:15:17.142260 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:17 crc kubenswrapper[5050]: I1123 16:15:17.142277 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d01a8a6c-70b5-47ec-a11b-303c4cb986cd-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:17 crc kubenswrapper[5050]: I1123 16:15:17.850228 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" event={"ID":"d01a8a6c-70b5-47ec-a11b-303c4cb986cd","Type":"ContainerDied","Data":"4e9ad49505d451aef6ba6ce0a02e899969a8530f9dca11d229d495feb1656102"} Nov 23 16:15:17 crc kubenswrapper[5050]: I1123 16:15:17.850328 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7786bd4bc7-kqvfg" Nov 23 16:15:17 crc kubenswrapper[5050]: I1123 16:15:17.850344 5050 scope.go:117] "RemoveContainer" containerID="6cd0583fd5de83374a868ba75fe75fbfe9a69bf8ab41e6a6bf694bf3c2227515" Nov 23 16:15:17 crc kubenswrapper[5050]: I1123 16:15:17.887715 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7786bd4bc7-kqvfg"] Nov 23 16:15:17 crc kubenswrapper[5050]: I1123 16:15:17.898649 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7786bd4bc7-kqvfg"] Nov 23 16:15:17 crc kubenswrapper[5050]: I1123 16:15:17.900087 5050 scope.go:117] "RemoveContainer" containerID="e2a40ca89884aa8b907acd55d8f67046a5abdb05207b259410bbd46bfa258ce3" Nov 23 16:15:19 crc kubenswrapper[5050]: I1123 16:15:19.271070 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-6fn7f"] Nov 23 16:15:19 crc kubenswrapper[5050]: E1123 16:15:19.271709 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d01a8a6c-70b5-47ec-a11b-303c4cb986cd" containerName="init" Nov 23 16:15:19 crc kubenswrapper[5050]: I1123 16:15:19.271729 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="d01a8a6c-70b5-47ec-a11b-303c4cb986cd" containerName="init" Nov 23 16:15:19 crc kubenswrapper[5050]: E1123 16:15:19.271768 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d01a8a6c-70b5-47ec-a11b-303c4cb986cd" containerName="dnsmasq-dns" Nov 23 16:15:19 crc kubenswrapper[5050]: I1123 16:15:19.271778 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="d01a8a6c-70b5-47ec-a11b-303c4cb986cd" containerName="dnsmasq-dns" Nov 23 16:15:19 crc kubenswrapper[5050]: I1123 16:15:19.272017 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="d01a8a6c-70b5-47ec-a11b-303c4cb986cd" containerName="dnsmasq-dns" Nov 23 16:15:19 crc kubenswrapper[5050]: I1123 16:15:19.272964 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6fn7f" Nov 23 16:15:19 crc kubenswrapper[5050]: I1123 16:15:19.281305 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-6fn7f"] Nov 23 16:15:19 crc kubenswrapper[5050]: I1123 16:15:19.380290 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-8046-account-create-hkqfk"] Nov 23 16:15:19 crc kubenswrapper[5050]: I1123 16:15:19.381679 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-8046-account-create-hkqfk" Nov 23 16:15:19 crc kubenswrapper[5050]: I1123 16:15:19.384941 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 23 16:15:19 crc kubenswrapper[5050]: I1123 16:15:19.393678 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-8046-account-create-hkqfk"] Nov 23 16:15:19 crc kubenswrapper[5050]: I1123 16:15:19.420818 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dk58j\" (UniqueName: \"kubernetes.io/projected/50036d79-f35b-430c-803b-f41a189aef45-kube-api-access-dk58j\") pod \"cinder-db-create-6fn7f\" (UID: \"50036d79-f35b-430c-803b-f41a189aef45\") " pod="openstack/cinder-db-create-6fn7f" Nov 23 16:15:19 crc kubenswrapper[5050]: I1123 16:15:19.420900 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50036d79-f35b-430c-803b-f41a189aef45-operator-scripts\") pod \"cinder-db-create-6fn7f\" (UID: \"50036d79-f35b-430c-803b-f41a189aef45\") " pod="openstack/cinder-db-create-6fn7f" Nov 23 16:15:19 crc kubenswrapper[5050]: I1123 16:15:19.523266 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6n4d\" (UniqueName: \"kubernetes.io/projected/1fa8aef4-fb4b-4655-b03e-6707ee37bd48-kube-api-access-k6n4d\") pod \"cinder-8046-account-create-hkqfk\" (UID: \"1fa8aef4-fb4b-4655-b03e-6707ee37bd48\") " pod="openstack/cinder-8046-account-create-hkqfk" Nov 23 16:15:19 crc kubenswrapper[5050]: I1123 16:15:19.523376 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50036d79-f35b-430c-803b-f41a189aef45-operator-scripts\") pod \"cinder-db-create-6fn7f\" (UID: \"50036d79-f35b-430c-803b-f41a189aef45\") " pod="openstack/cinder-db-create-6fn7f" Nov 23 16:15:19 crc kubenswrapper[5050]: I1123 16:15:19.523533 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1fa8aef4-fb4b-4655-b03e-6707ee37bd48-operator-scripts\") pod \"cinder-8046-account-create-hkqfk\" (UID: \"1fa8aef4-fb4b-4655-b03e-6707ee37bd48\") " pod="openstack/cinder-8046-account-create-hkqfk" Nov 23 16:15:19 crc kubenswrapper[5050]: I1123 16:15:19.523627 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dk58j\" (UniqueName: \"kubernetes.io/projected/50036d79-f35b-430c-803b-f41a189aef45-kube-api-access-dk58j\") pod \"cinder-db-create-6fn7f\" (UID: \"50036d79-f35b-430c-803b-f41a189aef45\") " pod="openstack/cinder-db-create-6fn7f" Nov 23 16:15:19 crc kubenswrapper[5050]: I1123 16:15:19.524399 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50036d79-f35b-430c-803b-f41a189aef45-operator-scripts\") pod \"cinder-db-create-6fn7f\" (UID: \"50036d79-f35b-430c-803b-f41a189aef45\") " pod="openstack/cinder-db-create-6fn7f" Nov 23 16:15:19 crc kubenswrapper[5050]: I1123 16:15:19.560858 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dk58j\" (UniqueName: \"kubernetes.io/projected/50036d79-f35b-430c-803b-f41a189aef45-kube-api-access-dk58j\") pod \"cinder-db-create-6fn7f\" (UID: \"50036d79-f35b-430c-803b-f41a189aef45\") " pod="openstack/cinder-db-create-6fn7f" Nov 23 16:15:19 crc kubenswrapper[5050]: I1123 16:15:19.565522 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d01a8a6c-70b5-47ec-a11b-303c4cb986cd" path="/var/lib/kubelet/pods/d01a8a6c-70b5-47ec-a11b-303c4cb986cd/volumes" Nov 23 16:15:19 crc kubenswrapper[5050]: I1123 16:15:19.626000 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1fa8aef4-fb4b-4655-b03e-6707ee37bd48-operator-scripts\") pod \"cinder-8046-account-create-hkqfk\" (UID: \"1fa8aef4-fb4b-4655-b03e-6707ee37bd48\") " pod="openstack/cinder-8046-account-create-hkqfk" Nov 23 16:15:19 crc kubenswrapper[5050]: I1123 16:15:19.626496 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6n4d\" (UniqueName: \"kubernetes.io/projected/1fa8aef4-fb4b-4655-b03e-6707ee37bd48-kube-api-access-k6n4d\") pod \"cinder-8046-account-create-hkqfk\" (UID: \"1fa8aef4-fb4b-4655-b03e-6707ee37bd48\") " pod="openstack/cinder-8046-account-create-hkqfk" Nov 23 16:15:19 crc kubenswrapper[5050]: I1123 16:15:19.626816 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6fn7f" Nov 23 16:15:19 crc kubenswrapper[5050]: I1123 16:15:19.627960 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1fa8aef4-fb4b-4655-b03e-6707ee37bd48-operator-scripts\") pod \"cinder-8046-account-create-hkqfk\" (UID: \"1fa8aef4-fb4b-4655-b03e-6707ee37bd48\") " pod="openstack/cinder-8046-account-create-hkqfk" Nov 23 16:15:19 crc kubenswrapper[5050]: I1123 16:15:19.658296 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6n4d\" (UniqueName: \"kubernetes.io/projected/1fa8aef4-fb4b-4655-b03e-6707ee37bd48-kube-api-access-k6n4d\") pod \"cinder-8046-account-create-hkqfk\" (UID: \"1fa8aef4-fb4b-4655-b03e-6707ee37bd48\") " pod="openstack/cinder-8046-account-create-hkqfk" Nov 23 16:15:19 crc kubenswrapper[5050]: I1123 16:15:19.701177 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-8046-account-create-hkqfk" Nov 23 16:15:20 crc kubenswrapper[5050]: I1123 16:15:20.142368 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-6fn7f"] Nov 23 16:15:20 crc kubenswrapper[5050]: W1123 16:15:20.147746 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod50036d79_f35b_430c_803b_f41a189aef45.slice/crio-584000bd7458f907f5ef1853a56d621faff2e633c99e20efce327ce4ec1c797a WatchSource:0}: Error finding container 584000bd7458f907f5ef1853a56d621faff2e633c99e20efce327ce4ec1c797a: Status 404 returned error can't find the container with id 584000bd7458f907f5ef1853a56d621faff2e633c99e20efce327ce4ec1c797a Nov 23 16:15:20 crc kubenswrapper[5050]: W1123 16:15:20.445251 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1fa8aef4_fb4b_4655_b03e_6707ee37bd48.slice/crio-09916a47237087351ecff9d0ebb9257593bf2cea01b5466b97f0f3e6fde4b81c WatchSource:0}: Error finding container 09916a47237087351ecff9d0ebb9257593bf2cea01b5466b97f0f3e6fde4b81c: Status 404 returned error can't find the container with id 09916a47237087351ecff9d0ebb9257593bf2cea01b5466b97f0f3e6fde4b81c Nov 23 16:15:20 crc kubenswrapper[5050]: I1123 16:15:20.446741 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-8046-account-create-hkqfk"] Nov 23 16:15:20 crc kubenswrapper[5050]: I1123 16:15:20.902374 5050 generic.go:334] "Generic (PLEG): container finished" podID="1fa8aef4-fb4b-4655-b03e-6707ee37bd48" containerID="eae5b93fb507e149054a38ea712d10d2b101134a7996786579ad0798b07d71d2" exitCode=0 Nov 23 16:15:20 crc kubenswrapper[5050]: I1123 16:15:20.903479 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-8046-account-create-hkqfk" event={"ID":"1fa8aef4-fb4b-4655-b03e-6707ee37bd48","Type":"ContainerDied","Data":"eae5b93fb507e149054a38ea712d10d2b101134a7996786579ad0798b07d71d2"} Nov 23 16:15:20 crc kubenswrapper[5050]: I1123 16:15:20.903615 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-8046-account-create-hkqfk" event={"ID":"1fa8aef4-fb4b-4655-b03e-6707ee37bd48","Type":"ContainerStarted","Data":"09916a47237087351ecff9d0ebb9257593bf2cea01b5466b97f0f3e6fde4b81c"} Nov 23 16:15:20 crc kubenswrapper[5050]: I1123 16:15:20.908398 5050 generic.go:334] "Generic (PLEG): container finished" podID="50036d79-f35b-430c-803b-f41a189aef45" containerID="7f5573ffd97138d8eb1f60674b7d15cedd559f14cca884b678f03d8db9ffddd9" exitCode=0 Nov 23 16:15:20 crc kubenswrapper[5050]: I1123 16:15:20.908502 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-6fn7f" event={"ID":"50036d79-f35b-430c-803b-f41a189aef45","Type":"ContainerDied","Data":"7f5573ffd97138d8eb1f60674b7d15cedd559f14cca884b678f03d8db9ffddd9"} Nov 23 16:15:20 crc kubenswrapper[5050]: I1123 16:15:20.908553 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-6fn7f" event={"ID":"50036d79-f35b-430c-803b-f41a189aef45","Type":"ContainerStarted","Data":"584000bd7458f907f5ef1853a56d621faff2e633c99e20efce327ce4ec1c797a"} Nov 23 16:15:22 crc kubenswrapper[5050]: I1123 16:15:22.420950 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6fn7f" Nov 23 16:15:22 crc kubenswrapper[5050]: I1123 16:15:22.428165 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-8046-account-create-hkqfk" Nov 23 16:15:22 crc kubenswrapper[5050]: I1123 16:15:22.508284 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1fa8aef4-fb4b-4655-b03e-6707ee37bd48-operator-scripts\") pod \"1fa8aef4-fb4b-4655-b03e-6707ee37bd48\" (UID: \"1fa8aef4-fb4b-4655-b03e-6707ee37bd48\") " Nov 23 16:15:22 crc kubenswrapper[5050]: I1123 16:15:22.508733 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50036d79-f35b-430c-803b-f41a189aef45-operator-scripts\") pod \"50036d79-f35b-430c-803b-f41a189aef45\" (UID: \"50036d79-f35b-430c-803b-f41a189aef45\") " Nov 23 16:15:22 crc kubenswrapper[5050]: I1123 16:15:22.508814 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6n4d\" (UniqueName: \"kubernetes.io/projected/1fa8aef4-fb4b-4655-b03e-6707ee37bd48-kube-api-access-k6n4d\") pod \"1fa8aef4-fb4b-4655-b03e-6707ee37bd48\" (UID: \"1fa8aef4-fb4b-4655-b03e-6707ee37bd48\") " Nov 23 16:15:22 crc kubenswrapper[5050]: I1123 16:15:22.508960 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dk58j\" (UniqueName: \"kubernetes.io/projected/50036d79-f35b-430c-803b-f41a189aef45-kube-api-access-dk58j\") pod \"50036d79-f35b-430c-803b-f41a189aef45\" (UID: \"50036d79-f35b-430c-803b-f41a189aef45\") " Nov 23 16:15:22 crc kubenswrapper[5050]: I1123 16:15:22.510126 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/50036d79-f35b-430c-803b-f41a189aef45-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "50036d79-f35b-430c-803b-f41a189aef45" (UID: "50036d79-f35b-430c-803b-f41a189aef45"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:15:22 crc kubenswrapper[5050]: I1123 16:15:22.510125 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1fa8aef4-fb4b-4655-b03e-6707ee37bd48-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1fa8aef4-fb4b-4655-b03e-6707ee37bd48" (UID: "1fa8aef4-fb4b-4655-b03e-6707ee37bd48"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:15:22 crc kubenswrapper[5050]: I1123 16:15:22.516010 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50036d79-f35b-430c-803b-f41a189aef45-kube-api-access-dk58j" (OuterVolumeSpecName: "kube-api-access-dk58j") pod "50036d79-f35b-430c-803b-f41a189aef45" (UID: "50036d79-f35b-430c-803b-f41a189aef45"). InnerVolumeSpecName "kube-api-access-dk58j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:15:22 crc kubenswrapper[5050]: I1123 16:15:22.517488 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1fa8aef4-fb4b-4655-b03e-6707ee37bd48-kube-api-access-k6n4d" (OuterVolumeSpecName: "kube-api-access-k6n4d") pod "1fa8aef4-fb4b-4655-b03e-6707ee37bd48" (UID: "1fa8aef4-fb4b-4655-b03e-6707ee37bd48"). InnerVolumeSpecName "kube-api-access-k6n4d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:15:22 crc kubenswrapper[5050]: I1123 16:15:22.612146 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6n4d\" (UniqueName: \"kubernetes.io/projected/1fa8aef4-fb4b-4655-b03e-6707ee37bd48-kube-api-access-k6n4d\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:22 crc kubenswrapper[5050]: I1123 16:15:22.612232 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dk58j\" (UniqueName: \"kubernetes.io/projected/50036d79-f35b-430c-803b-f41a189aef45-kube-api-access-dk58j\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:22 crc kubenswrapper[5050]: I1123 16:15:22.612255 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1fa8aef4-fb4b-4655-b03e-6707ee37bd48-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:22 crc kubenswrapper[5050]: I1123 16:15:22.612273 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50036d79-f35b-430c-803b-f41a189aef45-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:22 crc kubenswrapper[5050]: I1123 16:15:22.939518 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-6fn7f" event={"ID":"50036d79-f35b-430c-803b-f41a189aef45","Type":"ContainerDied","Data":"584000bd7458f907f5ef1853a56d621faff2e633c99e20efce327ce4ec1c797a"} Nov 23 16:15:22 crc kubenswrapper[5050]: I1123 16:15:22.939588 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="584000bd7458f907f5ef1853a56d621faff2e633c99e20efce327ce4ec1c797a" Nov 23 16:15:22 crc kubenswrapper[5050]: I1123 16:15:22.939584 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6fn7f" Nov 23 16:15:22 crc kubenswrapper[5050]: I1123 16:15:22.943059 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-8046-account-create-hkqfk" event={"ID":"1fa8aef4-fb4b-4655-b03e-6707ee37bd48","Type":"ContainerDied","Data":"09916a47237087351ecff9d0ebb9257593bf2cea01b5466b97f0f3e6fde4b81c"} Nov 23 16:15:22 crc kubenswrapper[5050]: I1123 16:15:22.943126 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-8046-account-create-hkqfk" Nov 23 16:15:22 crc kubenswrapper[5050]: I1123 16:15:22.943138 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="09916a47237087351ecff9d0ebb9257593bf2cea01b5466b97f0f3e6fde4b81c" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.655569 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-nvm9x"] Nov 23 16:15:24 crc kubenswrapper[5050]: E1123 16:15:24.656370 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50036d79-f35b-430c-803b-f41a189aef45" containerName="mariadb-database-create" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.656388 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="50036d79-f35b-430c-803b-f41a189aef45" containerName="mariadb-database-create" Nov 23 16:15:24 crc kubenswrapper[5050]: E1123 16:15:24.656407 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1fa8aef4-fb4b-4655-b03e-6707ee37bd48" containerName="mariadb-account-create" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.656417 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="1fa8aef4-fb4b-4655-b03e-6707ee37bd48" containerName="mariadb-account-create" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.656678 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="50036d79-f35b-430c-803b-f41a189aef45" containerName="mariadb-database-create" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.656699 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="1fa8aef4-fb4b-4655-b03e-6707ee37bd48" containerName="mariadb-account-create" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.657464 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-nvm9x" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.662244 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.662298 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.665849 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-4vjnp" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.692678 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-nvm9x"] Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.765366 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rb8lm\" (UniqueName: \"kubernetes.io/projected/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-kube-api-access-rb8lm\") pod \"cinder-db-sync-nvm9x\" (UID: \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\") " pod="openstack/cinder-db-sync-nvm9x" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.765559 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-scripts\") pod \"cinder-db-sync-nvm9x\" (UID: \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\") " pod="openstack/cinder-db-sync-nvm9x" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.765845 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-config-data\") pod \"cinder-db-sync-nvm9x\" (UID: \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\") " pod="openstack/cinder-db-sync-nvm9x" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.766286 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-combined-ca-bundle\") pod \"cinder-db-sync-nvm9x\" (UID: \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\") " pod="openstack/cinder-db-sync-nvm9x" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.766439 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-etc-machine-id\") pod \"cinder-db-sync-nvm9x\" (UID: \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\") " pod="openstack/cinder-db-sync-nvm9x" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.766759 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-db-sync-config-data\") pod \"cinder-db-sync-nvm9x\" (UID: \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\") " pod="openstack/cinder-db-sync-nvm9x" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.868718 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-config-data\") pod \"cinder-db-sync-nvm9x\" (UID: \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\") " pod="openstack/cinder-db-sync-nvm9x" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.868833 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-combined-ca-bundle\") pod \"cinder-db-sync-nvm9x\" (UID: \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\") " pod="openstack/cinder-db-sync-nvm9x" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.868917 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-etc-machine-id\") pod \"cinder-db-sync-nvm9x\" (UID: \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\") " pod="openstack/cinder-db-sync-nvm9x" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.869021 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-etc-machine-id\") pod \"cinder-db-sync-nvm9x\" (UID: \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\") " pod="openstack/cinder-db-sync-nvm9x" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.869512 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-db-sync-config-data\") pod \"cinder-db-sync-nvm9x\" (UID: \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\") " pod="openstack/cinder-db-sync-nvm9x" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.870082 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rb8lm\" (UniqueName: \"kubernetes.io/projected/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-kube-api-access-rb8lm\") pod \"cinder-db-sync-nvm9x\" (UID: \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\") " pod="openstack/cinder-db-sync-nvm9x" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.870119 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-scripts\") pod \"cinder-db-sync-nvm9x\" (UID: \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\") " pod="openstack/cinder-db-sync-nvm9x" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.875698 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-scripts\") pod \"cinder-db-sync-nvm9x\" (UID: \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\") " pod="openstack/cinder-db-sync-nvm9x" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.876863 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-db-sync-config-data\") pod \"cinder-db-sync-nvm9x\" (UID: \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\") " pod="openstack/cinder-db-sync-nvm9x" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.885329 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-config-data\") pod \"cinder-db-sync-nvm9x\" (UID: \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\") " pod="openstack/cinder-db-sync-nvm9x" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.893708 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-combined-ca-bundle\") pod \"cinder-db-sync-nvm9x\" (UID: \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\") " pod="openstack/cinder-db-sync-nvm9x" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.906882 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rb8lm\" (UniqueName: \"kubernetes.io/projected/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-kube-api-access-rb8lm\") pod \"cinder-db-sync-nvm9x\" (UID: \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\") " pod="openstack/cinder-db-sync-nvm9x" Nov 23 16:15:24 crc kubenswrapper[5050]: I1123 16:15:24.991336 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-nvm9x" Nov 23 16:15:25 crc kubenswrapper[5050]: I1123 16:15:25.328376 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-nvm9x"] Nov 23 16:15:25 crc kubenswrapper[5050]: W1123 16:15:25.330693 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf35435e8_7b27_4b47_83e1_cb469f7bd7b0.slice/crio-6afd950a35018d7cb9331e2f86f927d33da1864838d9f1495ec4ab4a496be91d WatchSource:0}: Error finding container 6afd950a35018d7cb9331e2f86f927d33da1864838d9f1495ec4ab4a496be91d: Status 404 returned error can't find the container with id 6afd950a35018d7cb9331e2f86f927d33da1864838d9f1495ec4ab4a496be91d Nov 23 16:15:25 crc kubenswrapper[5050]: I1123 16:15:25.978949 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-nvm9x" event={"ID":"f35435e8-7b27-4b47-83e1-cb469f7bd7b0","Type":"ContainerStarted","Data":"6afd950a35018d7cb9331e2f86f927d33da1864838d9f1495ec4ab4a496be91d"} Nov 23 16:15:26 crc kubenswrapper[5050]: I1123 16:15:26.997965 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-nvm9x" event={"ID":"f35435e8-7b27-4b47-83e1-cb469f7bd7b0","Type":"ContainerStarted","Data":"f3997e49e5ca1823e5a10cfd87f46f57645ed93ae81ab4e356df1d9c093d5206"} Nov 23 16:15:27 crc kubenswrapper[5050]: I1123 16:15:27.026319 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-nvm9x" podStartSLOduration=3.026298229 podStartE2EDuration="3.026298229s" podCreationTimestamp="2025-11-23 16:15:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:15:27.025007563 +0000 UTC m=+5622.192004048" watchObservedRunningTime="2025-11-23 16:15:27.026298229 +0000 UTC m=+5622.193294714" Nov 23 16:15:28 crc kubenswrapper[5050]: I1123 16:15:28.549616 5050 scope.go:117] "RemoveContainer" containerID="131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00" Nov 23 16:15:28 crc kubenswrapper[5050]: E1123 16:15:28.549956 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:15:29 crc kubenswrapper[5050]: I1123 16:15:29.024992 5050 generic.go:334] "Generic (PLEG): container finished" podID="f35435e8-7b27-4b47-83e1-cb469f7bd7b0" containerID="f3997e49e5ca1823e5a10cfd87f46f57645ed93ae81ab4e356df1d9c093d5206" exitCode=0 Nov 23 16:15:29 crc kubenswrapper[5050]: I1123 16:15:29.025085 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-nvm9x" event={"ID":"f35435e8-7b27-4b47-83e1-cb469f7bd7b0","Type":"ContainerDied","Data":"f3997e49e5ca1823e5a10cfd87f46f57645ed93ae81ab4e356df1d9c093d5206"} Nov 23 16:15:30 crc kubenswrapper[5050]: I1123 16:15:30.518929 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-nvm9x" Nov 23 16:15:30 crc kubenswrapper[5050]: I1123 16:15:30.616759 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-config-data\") pod \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\" (UID: \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\") " Nov 23 16:15:30 crc kubenswrapper[5050]: I1123 16:15:30.616816 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-scripts\") pod \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\" (UID: \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\") " Nov 23 16:15:30 crc kubenswrapper[5050]: I1123 16:15:30.616882 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-db-sync-config-data\") pod \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\" (UID: \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\") " Nov 23 16:15:30 crc kubenswrapper[5050]: I1123 16:15:30.616900 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-etc-machine-id\") pod \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\" (UID: \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\") " Nov 23 16:15:30 crc kubenswrapper[5050]: I1123 16:15:30.616922 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rb8lm\" (UniqueName: \"kubernetes.io/projected/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-kube-api-access-rb8lm\") pod \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\" (UID: \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\") " Nov 23 16:15:30 crc kubenswrapper[5050]: I1123 16:15:30.616995 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "f35435e8-7b27-4b47-83e1-cb469f7bd7b0" (UID: "f35435e8-7b27-4b47-83e1-cb469f7bd7b0"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 16:15:30 crc kubenswrapper[5050]: I1123 16:15:30.617696 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-combined-ca-bundle\") pod \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\" (UID: \"f35435e8-7b27-4b47-83e1-cb469f7bd7b0\") " Nov 23 16:15:30 crc kubenswrapper[5050]: I1123 16:15:30.618482 5050 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:30 crc kubenswrapper[5050]: I1123 16:15:30.624362 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "f35435e8-7b27-4b47-83e1-cb469f7bd7b0" (UID: "f35435e8-7b27-4b47-83e1-cb469f7bd7b0"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:15:30 crc kubenswrapper[5050]: I1123 16:15:30.625017 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-kube-api-access-rb8lm" (OuterVolumeSpecName: "kube-api-access-rb8lm") pod "f35435e8-7b27-4b47-83e1-cb469f7bd7b0" (UID: "f35435e8-7b27-4b47-83e1-cb469f7bd7b0"). InnerVolumeSpecName "kube-api-access-rb8lm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:15:30 crc kubenswrapper[5050]: I1123 16:15:30.627690 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-scripts" (OuterVolumeSpecName: "scripts") pod "f35435e8-7b27-4b47-83e1-cb469f7bd7b0" (UID: "f35435e8-7b27-4b47-83e1-cb469f7bd7b0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:15:30 crc kubenswrapper[5050]: I1123 16:15:30.659828 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f35435e8-7b27-4b47-83e1-cb469f7bd7b0" (UID: "f35435e8-7b27-4b47-83e1-cb469f7bd7b0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:15:30 crc kubenswrapper[5050]: I1123 16:15:30.681391 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-config-data" (OuterVolumeSpecName: "config-data") pod "f35435e8-7b27-4b47-83e1-cb469f7bd7b0" (UID: "f35435e8-7b27-4b47-83e1-cb469f7bd7b0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:15:30 crc kubenswrapper[5050]: I1123 16:15:30.720369 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:30 crc kubenswrapper[5050]: I1123 16:15:30.720414 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:30 crc kubenswrapper[5050]: I1123 16:15:30.720427 5050 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:30 crc kubenswrapper[5050]: I1123 16:15:30.720457 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rb8lm\" (UniqueName: \"kubernetes.io/projected/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-kube-api-access-rb8lm\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:30 crc kubenswrapper[5050]: I1123 16:15:30.720471 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f35435e8-7b27-4b47-83e1-cb469f7bd7b0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.059266 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-nvm9x" event={"ID":"f35435e8-7b27-4b47-83e1-cb469f7bd7b0","Type":"ContainerDied","Data":"6afd950a35018d7cb9331e2f86f927d33da1864838d9f1495ec4ab4a496be91d"} Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.059360 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6afd950a35018d7cb9331e2f86f927d33da1864838d9f1495ec4ab4a496be91d" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.059482 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-nvm9x" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.502614 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-95c9fdf89-5ptdz"] Nov 23 16:15:31 crc kubenswrapper[5050]: E1123 16:15:31.503164 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f35435e8-7b27-4b47-83e1-cb469f7bd7b0" containerName="cinder-db-sync" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.503178 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f35435e8-7b27-4b47-83e1-cb469f7bd7b0" containerName="cinder-db-sync" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.503403 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="f35435e8-7b27-4b47-83e1-cb469f7bd7b0" containerName="cinder-db-sync" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.504725 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.530791 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-95c9fdf89-5ptdz"] Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.645095 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bca35152-4426-4f06-8268-ad2f82241949-dns-svc\") pod \"dnsmasq-dns-95c9fdf89-5ptdz\" (UID: \"bca35152-4426-4f06-8268-ad2f82241949\") " pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.645142 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bca35152-4426-4f06-8268-ad2f82241949-ovsdbserver-sb\") pod \"dnsmasq-dns-95c9fdf89-5ptdz\" (UID: \"bca35152-4426-4f06-8268-ad2f82241949\") " pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.645166 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bca35152-4426-4f06-8268-ad2f82241949-config\") pod \"dnsmasq-dns-95c9fdf89-5ptdz\" (UID: \"bca35152-4426-4f06-8268-ad2f82241949\") " pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.645231 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-228j8\" (UniqueName: \"kubernetes.io/projected/bca35152-4426-4f06-8268-ad2f82241949-kube-api-access-228j8\") pod \"dnsmasq-dns-95c9fdf89-5ptdz\" (UID: \"bca35152-4426-4f06-8268-ad2f82241949\") " pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.645255 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bca35152-4426-4f06-8268-ad2f82241949-ovsdbserver-nb\") pod \"dnsmasq-dns-95c9fdf89-5ptdz\" (UID: \"bca35152-4426-4f06-8268-ad2f82241949\") " pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.656148 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.657774 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.671876 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.711854 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-4vjnp" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.711906 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.712089 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.712729 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.758485 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8a75ff78-417c-4487-9b35-c9dbae5dcca6-config-data-custom\") pod \"cinder-api-0\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " pod="openstack/cinder-api-0" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.758662 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a75ff78-417c-4487-9b35-c9dbae5dcca6-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " pod="openstack/cinder-api-0" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.758740 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a75ff78-417c-4487-9b35-c9dbae5dcca6-scripts\") pod \"cinder-api-0\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " pod="openstack/cinder-api-0" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.758769 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a75ff78-417c-4487-9b35-c9dbae5dcca6-config-data\") pod \"cinder-api-0\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " pod="openstack/cinder-api-0" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.758845 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bca35152-4426-4f06-8268-ad2f82241949-dns-svc\") pod \"dnsmasq-dns-95c9fdf89-5ptdz\" (UID: \"bca35152-4426-4f06-8268-ad2f82241949\") " pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.758878 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a75ff78-417c-4487-9b35-c9dbae5dcca6-logs\") pod \"cinder-api-0\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " pod="openstack/cinder-api-0" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.758900 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bca35152-4426-4f06-8268-ad2f82241949-ovsdbserver-sb\") pod \"dnsmasq-dns-95c9fdf89-5ptdz\" (UID: \"bca35152-4426-4f06-8268-ad2f82241949\") " pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.758970 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bca35152-4426-4f06-8268-ad2f82241949-config\") pod \"dnsmasq-dns-95c9fdf89-5ptdz\" (UID: \"bca35152-4426-4f06-8268-ad2f82241949\") " pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.759255 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8a75ff78-417c-4487-9b35-c9dbae5dcca6-etc-machine-id\") pod \"cinder-api-0\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " pod="openstack/cinder-api-0" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.759361 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-228j8\" (UniqueName: \"kubernetes.io/projected/bca35152-4426-4f06-8268-ad2f82241949-kube-api-access-228j8\") pod \"dnsmasq-dns-95c9fdf89-5ptdz\" (UID: \"bca35152-4426-4f06-8268-ad2f82241949\") " pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.759393 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bca35152-4426-4f06-8268-ad2f82241949-ovsdbserver-nb\") pod \"dnsmasq-dns-95c9fdf89-5ptdz\" (UID: \"bca35152-4426-4f06-8268-ad2f82241949\") " pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.759435 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b8nh7\" (UniqueName: \"kubernetes.io/projected/8a75ff78-417c-4487-9b35-c9dbae5dcca6-kube-api-access-b8nh7\") pod \"cinder-api-0\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " pod="openstack/cinder-api-0" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.760494 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bca35152-4426-4f06-8268-ad2f82241949-dns-svc\") pod \"dnsmasq-dns-95c9fdf89-5ptdz\" (UID: \"bca35152-4426-4f06-8268-ad2f82241949\") " pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.760695 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bca35152-4426-4f06-8268-ad2f82241949-ovsdbserver-sb\") pod \"dnsmasq-dns-95c9fdf89-5ptdz\" (UID: \"bca35152-4426-4f06-8268-ad2f82241949\") " pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.761085 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bca35152-4426-4f06-8268-ad2f82241949-ovsdbserver-nb\") pod \"dnsmasq-dns-95c9fdf89-5ptdz\" (UID: \"bca35152-4426-4f06-8268-ad2f82241949\") " pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.763264 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bca35152-4426-4f06-8268-ad2f82241949-config\") pod \"dnsmasq-dns-95c9fdf89-5ptdz\" (UID: \"bca35152-4426-4f06-8268-ad2f82241949\") " pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.784532 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-228j8\" (UniqueName: \"kubernetes.io/projected/bca35152-4426-4f06-8268-ad2f82241949-kube-api-access-228j8\") pod \"dnsmasq-dns-95c9fdf89-5ptdz\" (UID: \"bca35152-4426-4f06-8268-ad2f82241949\") " pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.840985 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.861561 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8a75ff78-417c-4487-9b35-c9dbae5dcca6-etc-machine-id\") pod \"cinder-api-0\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " pod="openstack/cinder-api-0" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.861673 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b8nh7\" (UniqueName: \"kubernetes.io/projected/8a75ff78-417c-4487-9b35-c9dbae5dcca6-kube-api-access-b8nh7\") pod \"cinder-api-0\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " pod="openstack/cinder-api-0" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.861721 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8a75ff78-417c-4487-9b35-c9dbae5dcca6-config-data-custom\") pod \"cinder-api-0\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " pod="openstack/cinder-api-0" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.861762 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a75ff78-417c-4487-9b35-c9dbae5dcca6-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " pod="openstack/cinder-api-0" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.861787 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a75ff78-417c-4487-9b35-c9dbae5dcca6-scripts\") pod \"cinder-api-0\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " pod="openstack/cinder-api-0" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.861810 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a75ff78-417c-4487-9b35-c9dbae5dcca6-config-data\") pod \"cinder-api-0\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " pod="openstack/cinder-api-0" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.861835 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a75ff78-417c-4487-9b35-c9dbae5dcca6-logs\") pod \"cinder-api-0\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " pod="openstack/cinder-api-0" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.862263 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a75ff78-417c-4487-9b35-c9dbae5dcca6-logs\") pod \"cinder-api-0\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " pod="openstack/cinder-api-0" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.863551 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8a75ff78-417c-4487-9b35-c9dbae5dcca6-etc-machine-id\") pod \"cinder-api-0\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " pod="openstack/cinder-api-0" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.877438 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8a75ff78-417c-4487-9b35-c9dbae5dcca6-config-data-custom\") pod \"cinder-api-0\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " pod="openstack/cinder-api-0" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.877872 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a75ff78-417c-4487-9b35-c9dbae5dcca6-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " pod="openstack/cinder-api-0" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.879012 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a75ff78-417c-4487-9b35-c9dbae5dcca6-config-data\") pod \"cinder-api-0\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " pod="openstack/cinder-api-0" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.883783 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a75ff78-417c-4487-9b35-c9dbae5dcca6-scripts\") pod \"cinder-api-0\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " pod="openstack/cinder-api-0" Nov 23 16:15:31 crc kubenswrapper[5050]: I1123 16:15:31.889025 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b8nh7\" (UniqueName: \"kubernetes.io/projected/8a75ff78-417c-4487-9b35-c9dbae5dcca6-kube-api-access-b8nh7\") pod \"cinder-api-0\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " pod="openstack/cinder-api-0" Nov 23 16:15:32 crc kubenswrapper[5050]: I1123 16:15:32.045108 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 23 16:15:32 crc kubenswrapper[5050]: I1123 16:15:32.413365 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-95c9fdf89-5ptdz"] Nov 23 16:15:32 crc kubenswrapper[5050]: I1123 16:15:32.617971 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 23 16:15:33 crc kubenswrapper[5050]: I1123 16:15:33.083751 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8a75ff78-417c-4487-9b35-c9dbae5dcca6","Type":"ContainerStarted","Data":"c5fe240b8f72ea1e3996bf54c8de480ca3019b37402e0d9a7ee205c902475f20"} Nov 23 16:15:33 crc kubenswrapper[5050]: I1123 16:15:33.088017 5050 generic.go:334] "Generic (PLEG): container finished" podID="bca35152-4426-4f06-8268-ad2f82241949" containerID="154251a98360974e93141cbc8f1fbfd995d7e086316dbb6417ba7a86069fb28d" exitCode=0 Nov 23 16:15:33 crc kubenswrapper[5050]: I1123 16:15:33.088088 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" event={"ID":"bca35152-4426-4f06-8268-ad2f82241949","Type":"ContainerDied","Data":"154251a98360974e93141cbc8f1fbfd995d7e086316dbb6417ba7a86069fb28d"} Nov 23 16:15:33 crc kubenswrapper[5050]: I1123 16:15:33.088125 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" event={"ID":"bca35152-4426-4f06-8268-ad2f82241949","Type":"ContainerStarted","Data":"b932666f1e8a763012b55798f43dbf28d215814218fcaf5feb8cc830b31270af"} Nov 23 16:15:34 crc kubenswrapper[5050]: I1123 16:15:34.111905 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8a75ff78-417c-4487-9b35-c9dbae5dcca6","Type":"ContainerStarted","Data":"6e8f1df424a95618ab5da6c8694c5e37743478a264f86c301cf216d0ac7756ec"} Nov 23 16:15:34 crc kubenswrapper[5050]: I1123 16:15:34.118671 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" event={"ID":"bca35152-4426-4f06-8268-ad2f82241949","Type":"ContainerStarted","Data":"4cf53a2831965d9d530323389471986b18ffdb18110228100065f2d45a3b4123"} Nov 23 16:15:34 crc kubenswrapper[5050]: I1123 16:15:34.119652 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" Nov 23 16:15:34 crc kubenswrapper[5050]: I1123 16:15:34.145705 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" podStartSLOduration=3.145680709 podStartE2EDuration="3.145680709s" podCreationTimestamp="2025-11-23 16:15:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:15:34.143906139 +0000 UTC m=+5629.310902644" watchObservedRunningTime="2025-11-23 16:15:34.145680709 +0000 UTC m=+5629.312677204" Nov 23 16:15:34 crc kubenswrapper[5050]: I1123 16:15:34.502879 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 16:15:34 crc kubenswrapper[5050]: I1123 16:15:34.503225 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="18561cc9-8520-4621-a8c5-4efd4196a100" containerName="nova-scheduler-scheduler" containerID="cri-o://8b23181638ed02d1f1292a69ecd2c43ef106cacf9c533b6269a8579572a11477" gracePeriod=30 Nov 23 16:15:34 crc kubenswrapper[5050]: I1123 16:15:34.524756 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 23 16:15:34 crc kubenswrapper[5050]: I1123 16:15:34.525067 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="b67f9b82-2a40-46a6-af4f-1439ba842a82" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://4a2b66faf2c54e742a5a9153f39b99cc972ff08e36dec093cf99963511f79456" gracePeriod=30 Nov 23 16:15:34 crc kubenswrapper[5050]: I1123 16:15:34.542928 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 23 16:15:34 crc kubenswrapper[5050]: I1123 16:15:34.543288 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="bddd9398-a4d1-45ef-b671-8d370afc13a3" containerName="nova-cell0-conductor-conductor" containerID="cri-o://ae548bb41956ddfb6a2967b0fae62592d8080c28444f96a3eb8e2bfe0a16faa0" gracePeriod=30 Nov 23 16:15:34 crc kubenswrapper[5050]: I1123 16:15:34.562265 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 16:15:34 crc kubenswrapper[5050]: I1123 16:15:34.562616 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="5954b03a-534a-435b-9106-6be013bcecb1" containerName="nova-metadata-log" containerID="cri-o://53b3986dc513c9d1d812b25c41e5a8c50ac02790e4aa17daeeafab7617edf16d" gracePeriod=30 Nov 23 16:15:34 crc kubenswrapper[5050]: I1123 16:15:34.562822 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="5954b03a-534a-435b-9106-6be013bcecb1" containerName="nova-metadata-metadata" containerID="cri-o://5eb68b9a5c49d7f12e9db73e6ee87d20414540759fb2229375f7093e6a977ce6" gracePeriod=30 Nov 23 16:15:34 crc kubenswrapper[5050]: I1123 16:15:34.580414 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 23 16:15:34 crc kubenswrapper[5050]: I1123 16:15:34.580770 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b43a331c-cdd6-41e1-b962-ce6709d1d200" containerName="nova-api-log" containerID="cri-o://30e11a93d6fdc77b1c2a02e908870e7098aaf58f6b99802562946afc4bfd9c26" gracePeriod=30 Nov 23 16:15:34 crc kubenswrapper[5050]: I1123 16:15:34.581638 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b43a331c-cdd6-41e1-b962-ce6709d1d200" containerName="nova-api-api" containerID="cri-o://c8d373669db01fc469a20d7444ad7a4a2c3381438472cad0965a20c8cd5d7ce2" gracePeriod=30 Nov 23 16:15:35 crc kubenswrapper[5050]: I1123 16:15:35.139857 5050 generic.go:334] "Generic (PLEG): container finished" podID="b43a331c-cdd6-41e1-b962-ce6709d1d200" containerID="30e11a93d6fdc77b1c2a02e908870e7098aaf58f6b99802562946afc4bfd9c26" exitCode=143 Nov 23 16:15:35 crc kubenswrapper[5050]: I1123 16:15:35.140328 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b43a331c-cdd6-41e1-b962-ce6709d1d200","Type":"ContainerDied","Data":"30e11a93d6fdc77b1c2a02e908870e7098aaf58f6b99802562946afc4bfd9c26"} Nov 23 16:15:35 crc kubenswrapper[5050]: I1123 16:15:35.145303 5050 generic.go:334] "Generic (PLEG): container finished" podID="b67f9b82-2a40-46a6-af4f-1439ba842a82" containerID="4a2b66faf2c54e742a5a9153f39b99cc972ff08e36dec093cf99963511f79456" exitCode=0 Nov 23 16:15:35 crc kubenswrapper[5050]: I1123 16:15:35.145521 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b67f9b82-2a40-46a6-af4f-1439ba842a82","Type":"ContainerDied","Data":"4a2b66faf2c54e742a5a9153f39b99cc972ff08e36dec093cf99963511f79456"} Nov 23 16:15:35 crc kubenswrapper[5050]: I1123 16:15:35.149495 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8a75ff78-417c-4487-9b35-c9dbae5dcca6","Type":"ContainerStarted","Data":"b0a894b01da730960d9113a2ac70f24be262176d6d594e532be7ac3dc95f7c77"} Nov 23 16:15:35 crc kubenswrapper[5050]: I1123 16:15:35.149682 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 23 16:15:35 crc kubenswrapper[5050]: I1123 16:15:35.164292 5050 generic.go:334] "Generic (PLEG): container finished" podID="5954b03a-534a-435b-9106-6be013bcecb1" containerID="53b3986dc513c9d1d812b25c41e5a8c50ac02790e4aa17daeeafab7617edf16d" exitCode=143 Nov 23 16:15:35 crc kubenswrapper[5050]: I1123 16:15:35.164304 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"5954b03a-534a-435b-9106-6be013bcecb1","Type":"ContainerDied","Data":"53b3986dc513c9d1d812b25c41e5a8c50ac02790e4aa17daeeafab7617edf16d"} Nov 23 16:15:35 crc kubenswrapper[5050]: I1123 16:15:35.179789 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.179761131 podStartE2EDuration="4.179761131s" podCreationTimestamp="2025-11-23 16:15:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:15:35.17829559 +0000 UTC m=+5630.345292075" watchObservedRunningTime="2025-11-23 16:15:35.179761131 +0000 UTC m=+5630.346757616" Nov 23 16:15:35 crc kubenswrapper[5050]: I1123 16:15:35.486345 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:15:35 crc kubenswrapper[5050]: I1123 16:15:35.546177 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dx6zz\" (UniqueName: \"kubernetes.io/projected/b67f9b82-2a40-46a6-af4f-1439ba842a82-kube-api-access-dx6zz\") pod \"b67f9b82-2a40-46a6-af4f-1439ba842a82\" (UID: \"b67f9b82-2a40-46a6-af4f-1439ba842a82\") " Nov 23 16:15:35 crc kubenswrapper[5050]: I1123 16:15:35.546755 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b67f9b82-2a40-46a6-af4f-1439ba842a82-combined-ca-bundle\") pod \"b67f9b82-2a40-46a6-af4f-1439ba842a82\" (UID: \"b67f9b82-2a40-46a6-af4f-1439ba842a82\") " Nov 23 16:15:35 crc kubenswrapper[5050]: I1123 16:15:35.546839 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b67f9b82-2a40-46a6-af4f-1439ba842a82-config-data\") pod \"b67f9b82-2a40-46a6-af4f-1439ba842a82\" (UID: \"b67f9b82-2a40-46a6-af4f-1439ba842a82\") " Nov 23 16:15:35 crc kubenswrapper[5050]: I1123 16:15:35.577477 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b67f9b82-2a40-46a6-af4f-1439ba842a82-kube-api-access-dx6zz" (OuterVolumeSpecName: "kube-api-access-dx6zz") pod "b67f9b82-2a40-46a6-af4f-1439ba842a82" (UID: "b67f9b82-2a40-46a6-af4f-1439ba842a82"). InnerVolumeSpecName "kube-api-access-dx6zz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:15:35 crc kubenswrapper[5050]: I1123 16:15:35.604319 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b67f9b82-2a40-46a6-af4f-1439ba842a82-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b67f9b82-2a40-46a6-af4f-1439ba842a82" (UID: "b67f9b82-2a40-46a6-af4f-1439ba842a82"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:15:35 crc kubenswrapper[5050]: I1123 16:15:35.604384 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b67f9b82-2a40-46a6-af4f-1439ba842a82-config-data" (OuterVolumeSpecName: "config-data") pod "b67f9b82-2a40-46a6-af4f-1439ba842a82" (UID: "b67f9b82-2a40-46a6-af4f-1439ba842a82"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:15:35 crc kubenswrapper[5050]: I1123 16:15:35.650605 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dx6zz\" (UniqueName: \"kubernetes.io/projected/b67f9b82-2a40-46a6-af4f-1439ba842a82-kube-api-access-dx6zz\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:35 crc kubenswrapper[5050]: I1123 16:15:35.650638 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b67f9b82-2a40-46a6-af4f-1439ba842a82-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:35 crc kubenswrapper[5050]: I1123 16:15:35.650653 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b67f9b82-2a40-46a6-af4f-1439ba842a82-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:35 crc kubenswrapper[5050]: I1123 16:15:35.874123 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 16:15:35 crc kubenswrapper[5050]: I1123 16:15:35.958112 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tbqkf\" (UniqueName: \"kubernetes.io/projected/18561cc9-8520-4621-a8c5-4efd4196a100-kube-api-access-tbqkf\") pod \"18561cc9-8520-4621-a8c5-4efd4196a100\" (UID: \"18561cc9-8520-4621-a8c5-4efd4196a100\") " Nov 23 16:15:35 crc kubenswrapper[5050]: I1123 16:15:35.958174 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18561cc9-8520-4621-a8c5-4efd4196a100-config-data\") pod \"18561cc9-8520-4621-a8c5-4efd4196a100\" (UID: \"18561cc9-8520-4621-a8c5-4efd4196a100\") " Nov 23 16:15:35 crc kubenswrapper[5050]: I1123 16:15:35.958204 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18561cc9-8520-4621-a8c5-4efd4196a100-combined-ca-bundle\") pod \"18561cc9-8520-4621-a8c5-4efd4196a100\" (UID: \"18561cc9-8520-4621-a8c5-4efd4196a100\") " Nov 23 16:15:35 crc kubenswrapper[5050]: I1123 16:15:35.962136 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18561cc9-8520-4621-a8c5-4efd4196a100-kube-api-access-tbqkf" (OuterVolumeSpecName: "kube-api-access-tbqkf") pod "18561cc9-8520-4621-a8c5-4efd4196a100" (UID: "18561cc9-8520-4621-a8c5-4efd4196a100"). InnerVolumeSpecName "kube-api-access-tbqkf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:15:35 crc kubenswrapper[5050]: I1123 16:15:35.984906 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18561cc9-8520-4621-a8c5-4efd4196a100-config-data" (OuterVolumeSpecName: "config-data") pod "18561cc9-8520-4621-a8c5-4efd4196a100" (UID: "18561cc9-8520-4621-a8c5-4efd4196a100"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:15:35 crc kubenswrapper[5050]: I1123 16:15:35.987069 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18561cc9-8520-4621-a8c5-4efd4196a100-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "18561cc9-8520-4621-a8c5-4efd4196a100" (UID: "18561cc9-8520-4621-a8c5-4efd4196a100"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.060457 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tbqkf\" (UniqueName: \"kubernetes.io/projected/18561cc9-8520-4621-a8c5-4efd4196a100-kube-api-access-tbqkf\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.060506 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18561cc9-8520-4621-a8c5-4efd4196a100-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.060517 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18561cc9-8520-4621-a8c5-4efd4196a100-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.189420 5050 generic.go:334] "Generic (PLEG): container finished" podID="18561cc9-8520-4621-a8c5-4efd4196a100" containerID="8b23181638ed02d1f1292a69ecd2c43ef106cacf9c533b6269a8579572a11477" exitCode=0 Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.189747 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.191302 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"18561cc9-8520-4621-a8c5-4efd4196a100","Type":"ContainerDied","Data":"8b23181638ed02d1f1292a69ecd2c43ef106cacf9c533b6269a8579572a11477"} Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.191398 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"18561cc9-8520-4621-a8c5-4efd4196a100","Type":"ContainerDied","Data":"a13026cd01b98ad59f649726bb7609800d6af3fa3f925e5054cc8407c6ba44c9"} Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.191463 5050 scope.go:117] "RemoveContainer" containerID="8b23181638ed02d1f1292a69ecd2c43ef106cacf9c533b6269a8579572a11477" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.244004 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.244082 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b67f9b82-2a40-46a6-af4f-1439ba842a82","Type":"ContainerDied","Data":"dc4dcc025151a7e51a2cf78eb1886c71d14779f1d7da3c7e65d9b8d09102dc4a"} Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.262867 5050 scope.go:117] "RemoveContainer" containerID="8b23181638ed02d1f1292a69ecd2c43ef106cacf9c533b6269a8579572a11477" Nov 23 16:15:36 crc kubenswrapper[5050]: E1123 16:15:36.266612 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b23181638ed02d1f1292a69ecd2c43ef106cacf9c533b6269a8579572a11477\": container with ID starting with 8b23181638ed02d1f1292a69ecd2c43ef106cacf9c533b6269a8579572a11477 not found: ID does not exist" containerID="8b23181638ed02d1f1292a69ecd2c43ef106cacf9c533b6269a8579572a11477" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.266675 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b23181638ed02d1f1292a69ecd2c43ef106cacf9c533b6269a8579572a11477"} err="failed to get container status \"8b23181638ed02d1f1292a69ecd2c43ef106cacf9c533b6269a8579572a11477\": rpc error: code = NotFound desc = could not find container \"8b23181638ed02d1f1292a69ecd2c43ef106cacf9c533b6269a8579572a11477\": container with ID starting with 8b23181638ed02d1f1292a69ecd2c43ef106cacf9c533b6269a8579572a11477 not found: ID does not exist" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.266719 5050 scope.go:117] "RemoveContainer" containerID="4a2b66faf2c54e742a5a9153f39b99cc972ff08e36dec093cf99963511f79456" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.291981 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.307814 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.330110 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.350842 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 16:15:36 crc kubenswrapper[5050]: E1123 16:15:36.352826 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18561cc9-8520-4621-a8c5-4efd4196a100" containerName="nova-scheduler-scheduler" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.352964 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="18561cc9-8520-4621-a8c5-4efd4196a100" containerName="nova-scheduler-scheduler" Nov 23 16:15:36 crc kubenswrapper[5050]: E1123 16:15:36.353038 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b67f9b82-2a40-46a6-af4f-1439ba842a82" containerName="nova-cell1-novncproxy-novncproxy" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.353098 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="b67f9b82-2a40-46a6-af4f-1439ba842a82" containerName="nova-cell1-novncproxy-novncproxy" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.353393 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="18561cc9-8520-4621-a8c5-4efd4196a100" containerName="nova-scheduler-scheduler" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.353479 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="b67f9b82-2a40-46a6-af4f-1439ba842a82" containerName="nova-cell1-novncproxy-novncproxy" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.354482 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.357638 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.380320 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.401527 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.410815 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.413506 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.421937 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.429631 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.472817 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4xkr\" (UniqueName: \"kubernetes.io/projected/af6314fe-5ec9-4fbe-a1fb-dc2c4adfa1fa-kube-api-access-l4xkr\") pod \"nova-cell1-novncproxy-0\" (UID: \"af6314fe-5ec9-4fbe-a1fb-dc2c4adfa1fa\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.473119 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af6314fe-5ec9-4fbe-a1fb-dc2c4adfa1fa-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"af6314fe-5ec9-4fbe-a1fb-dc2c4adfa1fa\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.473294 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4n5vd\" (UniqueName: \"kubernetes.io/projected/ddc00b7c-3525-45d6-863f-20e9e26ff02f-kube-api-access-4n5vd\") pod \"nova-scheduler-0\" (UID: \"ddc00b7c-3525-45d6-863f-20e9e26ff02f\") " pod="openstack/nova-scheduler-0" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.473420 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddc00b7c-3525-45d6-863f-20e9e26ff02f-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ddc00b7c-3525-45d6-863f-20e9e26ff02f\") " pod="openstack/nova-scheduler-0" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.473677 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ddc00b7c-3525-45d6-863f-20e9e26ff02f-config-data\") pod \"nova-scheduler-0\" (UID: \"ddc00b7c-3525-45d6-863f-20e9e26ff02f\") " pod="openstack/nova-scheduler-0" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.473787 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af6314fe-5ec9-4fbe-a1fb-dc2c4adfa1fa-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"af6314fe-5ec9-4fbe-a1fb-dc2c4adfa1fa\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.575543 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af6314fe-5ec9-4fbe-a1fb-dc2c4adfa1fa-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"af6314fe-5ec9-4fbe-a1fb-dc2c4adfa1fa\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.575669 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4xkr\" (UniqueName: \"kubernetes.io/projected/af6314fe-5ec9-4fbe-a1fb-dc2c4adfa1fa-kube-api-access-l4xkr\") pod \"nova-cell1-novncproxy-0\" (UID: \"af6314fe-5ec9-4fbe-a1fb-dc2c4adfa1fa\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.575756 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af6314fe-5ec9-4fbe-a1fb-dc2c4adfa1fa-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"af6314fe-5ec9-4fbe-a1fb-dc2c4adfa1fa\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.575800 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4n5vd\" (UniqueName: \"kubernetes.io/projected/ddc00b7c-3525-45d6-863f-20e9e26ff02f-kube-api-access-4n5vd\") pod \"nova-scheduler-0\" (UID: \"ddc00b7c-3525-45d6-863f-20e9e26ff02f\") " pod="openstack/nova-scheduler-0" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.575822 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddc00b7c-3525-45d6-863f-20e9e26ff02f-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ddc00b7c-3525-45d6-863f-20e9e26ff02f\") " pod="openstack/nova-scheduler-0" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.575850 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ddc00b7c-3525-45d6-863f-20e9e26ff02f-config-data\") pod \"nova-scheduler-0\" (UID: \"ddc00b7c-3525-45d6-863f-20e9e26ff02f\") " pod="openstack/nova-scheduler-0" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.580638 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddc00b7c-3525-45d6-863f-20e9e26ff02f-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ddc00b7c-3525-45d6-863f-20e9e26ff02f\") " pod="openstack/nova-scheduler-0" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.581825 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af6314fe-5ec9-4fbe-a1fb-dc2c4adfa1fa-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"af6314fe-5ec9-4fbe-a1fb-dc2c4adfa1fa\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.582493 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af6314fe-5ec9-4fbe-a1fb-dc2c4adfa1fa-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"af6314fe-5ec9-4fbe-a1fb-dc2c4adfa1fa\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.583883 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ddc00b7c-3525-45d6-863f-20e9e26ff02f-config-data\") pod \"nova-scheduler-0\" (UID: \"ddc00b7c-3525-45d6-863f-20e9e26ff02f\") " pod="openstack/nova-scheduler-0" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.595647 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4n5vd\" (UniqueName: \"kubernetes.io/projected/ddc00b7c-3525-45d6-863f-20e9e26ff02f-kube-api-access-4n5vd\") pod \"nova-scheduler-0\" (UID: \"ddc00b7c-3525-45d6-863f-20e9e26ff02f\") " pod="openstack/nova-scheduler-0" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.600404 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4xkr\" (UniqueName: \"kubernetes.io/projected/af6314fe-5ec9-4fbe-a1fb-dc2c4adfa1fa-kube-api-access-l4xkr\") pod \"nova-cell1-novncproxy-0\" (UID: \"af6314fe-5ec9-4fbe-a1fb-dc2c4adfa1fa\") " pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.707923 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 16:15:36 crc kubenswrapper[5050]: I1123 16:15:36.782502 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:15:37 crc kubenswrapper[5050]: I1123 16:15:37.110763 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 23 16:15:37 crc kubenswrapper[5050]: W1123 16:15:37.155710 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf6314fe_5ec9_4fbe_a1fb_dc2c4adfa1fa.slice/crio-3fd8cac5e9bc6a0fbcedbb29fa83af173fc78b7866cffca257980daa4946297e WatchSource:0}: Error finding container 3fd8cac5e9bc6a0fbcedbb29fa83af173fc78b7866cffca257980daa4946297e: Status 404 returned error can't find the container with id 3fd8cac5e9bc6a0fbcedbb29fa83af173fc78b7866cffca257980daa4946297e Nov 23 16:15:37 crc kubenswrapper[5050]: W1123 16:15:37.201327 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podddc00b7c_3525_45d6_863f_20e9e26ff02f.slice/crio-20e00326165ce74029db632578475beab87e75703a79c2b8956f30427ba044b9 WatchSource:0}: Error finding container 20e00326165ce74029db632578475beab87e75703a79c2b8956f30427ba044b9: Status 404 returned error can't find the container with id 20e00326165ce74029db632578475beab87e75703a79c2b8956f30427ba044b9 Nov 23 16:15:37 crc kubenswrapper[5050]: I1123 16:15:37.206359 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 16:15:37 crc kubenswrapper[5050]: I1123 16:15:37.256065 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"af6314fe-5ec9-4fbe-a1fb-dc2c4adfa1fa","Type":"ContainerStarted","Data":"3fd8cac5e9bc6a0fbcedbb29fa83af173fc78b7866cffca257980daa4946297e"} Nov 23 16:15:37 crc kubenswrapper[5050]: I1123 16:15:37.257793 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ddc00b7c-3525-45d6-863f-20e9e26ff02f","Type":"ContainerStarted","Data":"20e00326165ce74029db632578475beab87e75703a79c2b8956f30427ba044b9"} Nov 23 16:15:37 crc kubenswrapper[5050]: I1123 16:15:37.260162 5050 generic.go:334] "Generic (PLEG): container finished" podID="bddd9398-a4d1-45ef-b671-8d370afc13a3" containerID="ae548bb41956ddfb6a2967b0fae62592d8080c28444f96a3eb8e2bfe0a16faa0" exitCode=0 Nov 23 16:15:37 crc kubenswrapper[5050]: I1123 16:15:37.260285 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"bddd9398-a4d1-45ef-b671-8d370afc13a3","Type":"ContainerDied","Data":"ae548bb41956ddfb6a2967b0fae62592d8080c28444f96a3eb8e2bfe0a16faa0"} Nov 23 16:15:37 crc kubenswrapper[5050]: I1123 16:15:37.479153 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 23 16:15:37 crc kubenswrapper[5050]: I1123 16:15:37.563599 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="18561cc9-8520-4621-a8c5-4efd4196a100" path="/var/lib/kubelet/pods/18561cc9-8520-4621-a8c5-4efd4196a100/volumes" Nov 23 16:15:37 crc kubenswrapper[5050]: I1123 16:15:37.564429 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b67f9b82-2a40-46a6-af4f-1439ba842a82" path="/var/lib/kubelet/pods/b67f9b82-2a40-46a6-af4f-1439ba842a82/volumes" Nov 23 16:15:37 crc kubenswrapper[5050]: I1123 16:15:37.603882 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bddd9398-a4d1-45ef-b671-8d370afc13a3-combined-ca-bundle\") pod \"bddd9398-a4d1-45ef-b671-8d370afc13a3\" (UID: \"bddd9398-a4d1-45ef-b671-8d370afc13a3\") " Nov 23 16:15:37 crc kubenswrapper[5050]: I1123 16:15:37.604013 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bddd9398-a4d1-45ef-b671-8d370afc13a3-config-data\") pod \"bddd9398-a4d1-45ef-b671-8d370afc13a3\" (UID: \"bddd9398-a4d1-45ef-b671-8d370afc13a3\") " Nov 23 16:15:37 crc kubenswrapper[5050]: I1123 16:15:37.604251 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qnpdx\" (UniqueName: \"kubernetes.io/projected/bddd9398-a4d1-45ef-b671-8d370afc13a3-kube-api-access-qnpdx\") pod \"bddd9398-a4d1-45ef-b671-8d370afc13a3\" (UID: \"bddd9398-a4d1-45ef-b671-8d370afc13a3\") " Nov 23 16:15:37 crc kubenswrapper[5050]: I1123 16:15:37.610058 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bddd9398-a4d1-45ef-b671-8d370afc13a3-kube-api-access-qnpdx" (OuterVolumeSpecName: "kube-api-access-qnpdx") pod "bddd9398-a4d1-45ef-b671-8d370afc13a3" (UID: "bddd9398-a4d1-45ef-b671-8d370afc13a3"). InnerVolumeSpecName "kube-api-access-qnpdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:15:37 crc kubenswrapper[5050]: I1123 16:15:37.632502 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bddd9398-a4d1-45ef-b671-8d370afc13a3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bddd9398-a4d1-45ef-b671-8d370afc13a3" (UID: "bddd9398-a4d1-45ef-b671-8d370afc13a3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:15:37 crc kubenswrapper[5050]: I1123 16:15:37.634182 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bddd9398-a4d1-45ef-b671-8d370afc13a3-config-data" (OuterVolumeSpecName: "config-data") pod "bddd9398-a4d1-45ef-b671-8d370afc13a3" (UID: "bddd9398-a4d1-45ef-b671-8d370afc13a3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:15:37 crc kubenswrapper[5050]: I1123 16:15:37.706896 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qnpdx\" (UniqueName: \"kubernetes.io/projected/bddd9398-a4d1-45ef-b671-8d370afc13a3-kube-api-access-qnpdx\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:37 crc kubenswrapper[5050]: I1123 16:15:37.706961 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bddd9398-a4d1-45ef-b671-8d370afc13a3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:37 crc kubenswrapper[5050]: I1123 16:15:37.706976 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bddd9398-a4d1-45ef-b671-8d370afc13a3-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:37 crc kubenswrapper[5050]: I1123 16:15:37.762982 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="5954b03a-534a-435b-9106-6be013bcecb1" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.70:8775/\": read tcp 10.217.0.2:49718->10.217.1.70:8775: read: connection reset by peer" Nov 23 16:15:37 crc kubenswrapper[5050]: I1123 16:15:37.762987 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="5954b03a-534a-435b-9106-6be013bcecb1" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.70:8775/\": read tcp 10.217.0.2:49734->10.217.1.70:8775: read: connection reset by peer" Nov 23 16:15:37 crc kubenswrapper[5050]: I1123 16:15:37.806987 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 23 16:15:37 crc kubenswrapper[5050]: I1123 16:15:37.807808 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="b8269f32-ce74-4977-aa1b-e7324d6d9935" containerName="nova-cell1-conductor-conductor" containerID="cri-o://9ab652d7f87e93ebb30e4dff16ee739ee0e999fbf81494e2affcaf7c5b2080a0" gracePeriod=30 Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.178679 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.216197 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-brkct\" (UniqueName: \"kubernetes.io/projected/5954b03a-534a-435b-9106-6be013bcecb1-kube-api-access-brkct\") pod \"5954b03a-534a-435b-9106-6be013bcecb1\" (UID: \"5954b03a-534a-435b-9106-6be013bcecb1\") " Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.216352 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5954b03a-534a-435b-9106-6be013bcecb1-config-data\") pod \"5954b03a-534a-435b-9106-6be013bcecb1\" (UID: \"5954b03a-534a-435b-9106-6be013bcecb1\") " Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.216555 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5954b03a-534a-435b-9106-6be013bcecb1-logs\") pod \"5954b03a-534a-435b-9106-6be013bcecb1\" (UID: \"5954b03a-534a-435b-9106-6be013bcecb1\") " Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.216634 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5954b03a-534a-435b-9106-6be013bcecb1-combined-ca-bundle\") pod \"5954b03a-534a-435b-9106-6be013bcecb1\" (UID: \"5954b03a-534a-435b-9106-6be013bcecb1\") " Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.220803 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5954b03a-534a-435b-9106-6be013bcecb1-logs" (OuterVolumeSpecName: "logs") pod "5954b03a-534a-435b-9106-6be013bcecb1" (UID: "5954b03a-534a-435b-9106-6be013bcecb1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.252211 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5954b03a-534a-435b-9106-6be013bcecb1-kube-api-access-brkct" (OuterVolumeSpecName: "kube-api-access-brkct") pod "5954b03a-534a-435b-9106-6be013bcecb1" (UID: "5954b03a-534a-435b-9106-6be013bcecb1"). InnerVolumeSpecName "kube-api-access-brkct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.267610 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5954b03a-534a-435b-9106-6be013bcecb1-config-data" (OuterVolumeSpecName: "config-data") pod "5954b03a-534a-435b-9106-6be013bcecb1" (UID: "5954b03a-534a-435b-9106-6be013bcecb1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.285821 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5954b03a-534a-435b-9106-6be013bcecb1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5954b03a-534a-435b-9106-6be013bcecb1" (UID: "5954b03a-534a-435b-9106-6be013bcecb1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.304481 5050 generic.go:334] "Generic (PLEG): container finished" podID="5954b03a-534a-435b-9106-6be013bcecb1" containerID="5eb68b9a5c49d7f12e9db73e6ee87d20414540759fb2229375f7093e6a977ce6" exitCode=0 Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.304554 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"5954b03a-534a-435b-9106-6be013bcecb1","Type":"ContainerDied","Data":"5eb68b9a5c49d7f12e9db73e6ee87d20414540759fb2229375f7093e6a977ce6"} Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.304585 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"5954b03a-534a-435b-9106-6be013bcecb1","Type":"ContainerDied","Data":"fbb088b25078e73453fa6dcb49f54ba18565a6b4724c918713c41effe5074752"} Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.304604 5050 scope.go:117] "RemoveContainer" containerID="5eb68b9a5c49d7f12e9db73e6ee87d20414540759fb2229375f7093e6a977ce6" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.304734 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.318988 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5954b03a-534a-435b-9106-6be013bcecb1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.319021 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-brkct\" (UniqueName: \"kubernetes.io/projected/5954b03a-534a-435b-9106-6be013bcecb1-kube-api-access-brkct\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.319034 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5954b03a-534a-435b-9106-6be013bcecb1-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.319043 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5954b03a-534a-435b-9106-6be013bcecb1-logs\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.326808 5050 generic.go:334] "Generic (PLEG): container finished" podID="b43a331c-cdd6-41e1-b962-ce6709d1d200" containerID="c8d373669db01fc469a20d7444ad7a4a2c3381438472cad0965a20c8cd5d7ce2" exitCode=0 Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.326986 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b43a331c-cdd6-41e1-b962-ce6709d1d200","Type":"ContainerDied","Data":"c8d373669db01fc469a20d7444ad7a4a2c3381438472cad0965a20c8cd5d7ce2"} Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.336812 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"af6314fe-5ec9-4fbe-a1fb-dc2c4adfa1fa","Type":"ContainerStarted","Data":"e2acff8d9c1ad33653c88be584a950d2b5974f0ba6a513bc83e83c4040a2f613"} Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.352238 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ddc00b7c-3525-45d6-863f-20e9e26ff02f","Type":"ContainerStarted","Data":"4ede08fe48e6b2bbb659c27fe3fc3562174453c3e85d985533d4a88c6f997545"} Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.363869 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"bddd9398-a4d1-45ef-b671-8d370afc13a3","Type":"ContainerDied","Data":"fe5e0c9aa04021e00a2200be51525c47098f3c14fae93917ec67a41e49b32e50"} Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.363958 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.377814 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.377789392 podStartE2EDuration="2.377789392s" podCreationTimestamp="2025-11-23 16:15:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:15:38.358100406 +0000 UTC m=+5633.525096891" watchObservedRunningTime="2025-11-23 16:15:38.377789392 +0000 UTC m=+5633.544785877" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.405845 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.405817453 podStartE2EDuration="2.405817453s" podCreationTimestamp="2025-11-23 16:15:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:15:38.383841502 +0000 UTC m=+5633.550837987" watchObservedRunningTime="2025-11-23 16:15:38.405817453 +0000 UTC m=+5633.572813928" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.435806 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.440509 5050 scope.go:117] "RemoveContainer" containerID="53b3986dc513c9d1d812b25c41e5a8c50ac02790e4aa17daeeafab7617edf16d" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.446837 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.455271 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.461503 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 23 16:15:38 crc kubenswrapper[5050]: E1123 16:15:38.462319 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5954b03a-534a-435b-9106-6be013bcecb1" containerName="nova-metadata-metadata" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.462345 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="5954b03a-534a-435b-9106-6be013bcecb1" containerName="nova-metadata-metadata" Nov 23 16:15:38 crc kubenswrapper[5050]: E1123 16:15:38.462374 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5954b03a-534a-435b-9106-6be013bcecb1" containerName="nova-metadata-log" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.462382 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="5954b03a-534a-435b-9106-6be013bcecb1" containerName="nova-metadata-log" Nov 23 16:15:38 crc kubenswrapper[5050]: E1123 16:15:38.462410 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bddd9398-a4d1-45ef-b671-8d370afc13a3" containerName="nova-cell0-conductor-conductor" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.462418 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="bddd9398-a4d1-45ef-b671-8d370afc13a3" containerName="nova-cell0-conductor-conductor" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.462667 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="5954b03a-534a-435b-9106-6be013bcecb1" containerName="nova-metadata-metadata" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.462684 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="5954b03a-534a-435b-9106-6be013bcecb1" containerName="nova-metadata-log" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.462702 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="bddd9398-a4d1-45ef-b671-8d370afc13a3" containerName="nova-cell0-conductor-conductor" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.464322 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.468390 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.492017 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.503580 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.512653 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 23 16:15:38 crc kubenswrapper[5050]: E1123 16:15:38.513212 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b43a331c-cdd6-41e1-b962-ce6709d1d200" containerName="nova-api-api" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.513234 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="b43a331c-cdd6-41e1-b962-ce6709d1d200" containerName="nova-api-api" Nov 23 16:15:38 crc kubenswrapper[5050]: E1123 16:15:38.513286 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b43a331c-cdd6-41e1-b962-ce6709d1d200" containerName="nova-api-log" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.513293 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="b43a331c-cdd6-41e1-b962-ce6709d1d200" containerName="nova-api-log" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.513531 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="b43a331c-cdd6-41e1-b962-ce6709d1d200" containerName="nova-api-api" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.513548 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="b43a331c-cdd6-41e1-b962-ce6709d1d200" containerName="nova-api-log" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.521725 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.522528 5050 scope.go:117] "RemoveContainer" containerID="5eb68b9a5c49d7f12e9db73e6ee87d20414540759fb2229375f7093e6a977ce6" Nov 23 16:15:38 crc kubenswrapper[5050]: E1123 16:15:38.523983 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5eb68b9a5c49d7f12e9db73e6ee87d20414540759fb2229375f7093e6a977ce6\": container with ID starting with 5eb68b9a5c49d7f12e9db73e6ee87d20414540759fb2229375f7093e6a977ce6 not found: ID does not exist" containerID="5eb68b9a5c49d7f12e9db73e6ee87d20414540759fb2229375f7093e6a977ce6" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.524041 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5eb68b9a5c49d7f12e9db73e6ee87d20414540759fb2229375f7093e6a977ce6"} err="failed to get container status \"5eb68b9a5c49d7f12e9db73e6ee87d20414540759fb2229375f7093e6a977ce6\": rpc error: code = NotFound desc = could not find container \"5eb68b9a5c49d7f12e9db73e6ee87d20414540759fb2229375f7093e6a977ce6\": container with ID starting with 5eb68b9a5c49d7f12e9db73e6ee87d20414540759fb2229375f7093e6a977ce6 not found: ID does not exist" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.524065 5050 scope.go:117] "RemoveContainer" containerID="53b3986dc513c9d1d812b25c41e5a8c50ac02790e4aa17daeeafab7617edf16d" Nov 23 16:15:38 crc kubenswrapper[5050]: E1123 16:15:38.524344 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"53b3986dc513c9d1d812b25c41e5a8c50ac02790e4aa17daeeafab7617edf16d\": container with ID starting with 53b3986dc513c9d1d812b25c41e5a8c50ac02790e4aa17daeeafab7617edf16d not found: ID does not exist" containerID="53b3986dc513c9d1d812b25c41e5a8c50ac02790e4aa17daeeafab7617edf16d" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.524364 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"53b3986dc513c9d1d812b25c41e5a8c50ac02790e4aa17daeeafab7617edf16d"} err="failed to get container status \"53b3986dc513c9d1d812b25c41e5a8c50ac02790e4aa17daeeafab7617edf16d\": rpc error: code = NotFound desc = could not find container \"53b3986dc513c9d1d812b25c41e5a8c50ac02790e4aa17daeeafab7617edf16d\": container with ID starting with 53b3986dc513c9d1d812b25c41e5a8c50ac02790e4aa17daeeafab7617edf16d not found: ID does not exist" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.524378 5050 scope.go:117] "RemoveContainer" containerID="ae548bb41956ddfb6a2967b0fae62592d8080c28444f96a3eb8e2bfe0a16faa0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.528325 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.546177 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48851bd1-4829-4f8d-ae93-a6cb3266ce68-config-data\") pod \"nova-metadata-0\" (UID: \"48851bd1-4829-4f8d-ae93-a6cb3266ce68\") " pod="openstack/nova-metadata-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.546289 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48851bd1-4829-4f8d-ae93-a6cb3266ce68-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"48851bd1-4829-4f8d-ae93-a6cb3266ce68\") " pod="openstack/nova-metadata-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.546397 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9mkv\" (UniqueName: \"kubernetes.io/projected/48851bd1-4829-4f8d-ae93-a6cb3266ce68-kube-api-access-q9mkv\") pod \"nova-metadata-0\" (UID: \"48851bd1-4829-4f8d-ae93-a6cb3266ce68\") " pod="openstack/nova-metadata-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.546619 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/48851bd1-4829-4f8d-ae93-a6cb3266ce68-logs\") pod \"nova-metadata-0\" (UID: \"48851bd1-4829-4f8d-ae93-a6cb3266ce68\") " pod="openstack/nova-metadata-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.580777 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.595379 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.650209 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b43a331c-cdd6-41e1-b962-ce6709d1d200-logs\") pod \"b43a331c-cdd6-41e1-b962-ce6709d1d200\" (UID: \"b43a331c-cdd6-41e1-b962-ce6709d1d200\") " Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.650502 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b43a331c-cdd6-41e1-b962-ce6709d1d200-combined-ca-bundle\") pod \"b43a331c-cdd6-41e1-b962-ce6709d1d200\" (UID: \"b43a331c-cdd6-41e1-b962-ce6709d1d200\") " Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.650679 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cnjnv\" (UniqueName: \"kubernetes.io/projected/b43a331c-cdd6-41e1-b962-ce6709d1d200-kube-api-access-cnjnv\") pod \"b43a331c-cdd6-41e1-b962-ce6709d1d200\" (UID: \"b43a331c-cdd6-41e1-b962-ce6709d1d200\") " Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.650723 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b43a331c-cdd6-41e1-b962-ce6709d1d200-config-data\") pod \"b43a331c-cdd6-41e1-b962-ce6709d1d200\" (UID: \"b43a331c-cdd6-41e1-b962-ce6709d1d200\") " Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.651052 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9mkv\" (UniqueName: \"kubernetes.io/projected/48851bd1-4829-4f8d-ae93-a6cb3266ce68-kube-api-access-q9mkv\") pod \"nova-metadata-0\" (UID: \"48851bd1-4829-4f8d-ae93-a6cb3266ce68\") " pod="openstack/nova-metadata-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.651228 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a3ba510-87a6-443f-b74c-ac6361726b1b-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"8a3ba510-87a6-443f-b74c-ac6361726b1b\") " pod="openstack/nova-cell0-conductor-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.651288 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlw4m\" (UniqueName: \"kubernetes.io/projected/8a3ba510-87a6-443f-b74c-ac6361726b1b-kube-api-access-hlw4m\") pod \"nova-cell0-conductor-0\" (UID: \"8a3ba510-87a6-443f-b74c-ac6361726b1b\") " pod="openstack/nova-cell0-conductor-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.651342 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/48851bd1-4829-4f8d-ae93-a6cb3266ce68-logs\") pod \"nova-metadata-0\" (UID: \"48851bd1-4829-4f8d-ae93-a6cb3266ce68\") " pod="openstack/nova-metadata-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.651368 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a3ba510-87a6-443f-b74c-ac6361726b1b-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"8a3ba510-87a6-443f-b74c-ac6361726b1b\") " pod="openstack/nova-cell0-conductor-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.651478 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48851bd1-4829-4f8d-ae93-a6cb3266ce68-config-data\") pod \"nova-metadata-0\" (UID: \"48851bd1-4829-4f8d-ae93-a6cb3266ce68\") " pod="openstack/nova-metadata-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.651536 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48851bd1-4829-4f8d-ae93-a6cb3266ce68-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"48851bd1-4829-4f8d-ae93-a6cb3266ce68\") " pod="openstack/nova-metadata-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.652828 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b43a331c-cdd6-41e1-b962-ce6709d1d200-logs" (OuterVolumeSpecName: "logs") pod "b43a331c-cdd6-41e1-b962-ce6709d1d200" (UID: "b43a331c-cdd6-41e1-b962-ce6709d1d200"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.657408 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/48851bd1-4829-4f8d-ae93-a6cb3266ce68-logs\") pod \"nova-metadata-0\" (UID: \"48851bd1-4829-4f8d-ae93-a6cb3266ce68\") " pod="openstack/nova-metadata-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.661563 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b43a331c-cdd6-41e1-b962-ce6709d1d200-kube-api-access-cnjnv" (OuterVolumeSpecName: "kube-api-access-cnjnv") pod "b43a331c-cdd6-41e1-b962-ce6709d1d200" (UID: "b43a331c-cdd6-41e1-b962-ce6709d1d200"). InnerVolumeSpecName "kube-api-access-cnjnv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.664200 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48851bd1-4829-4f8d-ae93-a6cb3266ce68-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"48851bd1-4829-4f8d-ae93-a6cb3266ce68\") " pod="openstack/nova-metadata-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.664645 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48851bd1-4829-4f8d-ae93-a6cb3266ce68-config-data\") pod \"nova-metadata-0\" (UID: \"48851bd1-4829-4f8d-ae93-a6cb3266ce68\") " pod="openstack/nova-metadata-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.680744 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9mkv\" (UniqueName: \"kubernetes.io/projected/48851bd1-4829-4f8d-ae93-a6cb3266ce68-kube-api-access-q9mkv\") pod \"nova-metadata-0\" (UID: \"48851bd1-4829-4f8d-ae93-a6cb3266ce68\") " pod="openstack/nova-metadata-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.688591 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b43a331c-cdd6-41e1-b962-ce6709d1d200-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b43a331c-cdd6-41e1-b962-ce6709d1d200" (UID: "b43a331c-cdd6-41e1-b962-ce6709d1d200"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.702053 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b43a331c-cdd6-41e1-b962-ce6709d1d200-config-data" (OuterVolumeSpecName: "config-data") pod "b43a331c-cdd6-41e1-b962-ce6709d1d200" (UID: "b43a331c-cdd6-41e1-b962-ce6709d1d200"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.753721 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a3ba510-87a6-443f-b74c-ac6361726b1b-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"8a3ba510-87a6-443f-b74c-ac6361726b1b\") " pod="openstack/nova-cell0-conductor-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.753785 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlw4m\" (UniqueName: \"kubernetes.io/projected/8a3ba510-87a6-443f-b74c-ac6361726b1b-kube-api-access-hlw4m\") pod \"nova-cell0-conductor-0\" (UID: \"8a3ba510-87a6-443f-b74c-ac6361726b1b\") " pod="openstack/nova-cell0-conductor-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.753826 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a3ba510-87a6-443f-b74c-ac6361726b1b-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"8a3ba510-87a6-443f-b74c-ac6361726b1b\") " pod="openstack/nova-cell0-conductor-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.753891 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cnjnv\" (UniqueName: \"kubernetes.io/projected/b43a331c-cdd6-41e1-b962-ce6709d1d200-kube-api-access-cnjnv\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.753904 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b43a331c-cdd6-41e1-b962-ce6709d1d200-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.753913 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b43a331c-cdd6-41e1-b962-ce6709d1d200-logs\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.753924 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b43a331c-cdd6-41e1-b962-ce6709d1d200-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.760342 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a3ba510-87a6-443f-b74c-ac6361726b1b-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"8a3ba510-87a6-443f-b74c-ac6361726b1b\") " pod="openstack/nova-cell0-conductor-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.762071 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a3ba510-87a6-443f-b74c-ac6361726b1b-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"8a3ba510-87a6-443f-b74c-ac6361726b1b\") " pod="openstack/nova-cell0-conductor-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.772685 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlw4m\" (UniqueName: \"kubernetes.io/projected/8a3ba510-87a6-443f-b74c-ac6361726b1b-kube-api-access-hlw4m\") pod \"nova-cell0-conductor-0\" (UID: \"8a3ba510-87a6-443f-b74c-ac6361726b1b\") " pod="openstack/nova-cell0-conductor-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.799108 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 16:15:38 crc kubenswrapper[5050]: I1123 16:15:38.868285 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.357498 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.382529 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b43a331c-cdd6-41e1-b962-ce6709d1d200","Type":"ContainerDied","Data":"572e23ba368571fe4e94de8c03b9fbbef2b79c6bde612d9c523c56b9ebf03a45"} Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.382596 5050 scope.go:117] "RemoveContainer" containerID="c8d373669db01fc469a20d7444ad7a4a2c3381438472cad0965a20c8cd5d7ce2" Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.382736 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.392279 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"48851bd1-4829-4f8d-ae93-a6cb3266ce68","Type":"ContainerStarted","Data":"c3e7f6296d2a3d30c488de62908ccfe5a8c475977f5675533dfa4038c34f1449"} Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.475600 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.477347 5050 scope.go:117] "RemoveContainer" containerID="30e11a93d6fdc77b1c2a02e908870e7098aaf58f6b99802562946afc4bfd9c26" Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.502687 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.520280 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.541422 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.543071 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.545677 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.549434 5050 scope.go:117] "RemoveContainer" containerID="131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00" Nov 23 16:15:39 crc kubenswrapper[5050]: E1123 16:15:39.549933 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.575628 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5954b03a-534a-435b-9106-6be013bcecb1" path="/var/lib/kubelet/pods/5954b03a-534a-435b-9106-6be013bcecb1/volumes" Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.576305 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b43a331c-cdd6-41e1-b962-ce6709d1d200" path="/var/lib/kubelet/pods/b43a331c-cdd6-41e1-b962-ce6709d1d200/volumes" Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.576987 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bddd9398-a4d1-45ef-b671-8d370afc13a3" path="/var/lib/kubelet/pods/bddd9398-a4d1-45ef-b671-8d370afc13a3/volumes" Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.578241 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.674909 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8098c44a-b797-4fba-8d0d-2138ca9b367a-logs\") pod \"nova-api-0\" (UID: \"8098c44a-b797-4fba-8d0d-2138ca9b367a\") " pod="openstack/nova-api-0" Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.675478 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8098c44a-b797-4fba-8d0d-2138ca9b367a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8098c44a-b797-4fba-8d0d-2138ca9b367a\") " pod="openstack/nova-api-0" Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.675630 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8098c44a-b797-4fba-8d0d-2138ca9b367a-config-data\") pod \"nova-api-0\" (UID: \"8098c44a-b797-4fba-8d0d-2138ca9b367a\") " pod="openstack/nova-api-0" Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.675700 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hpjkr\" (UniqueName: \"kubernetes.io/projected/8098c44a-b797-4fba-8d0d-2138ca9b367a-kube-api-access-hpjkr\") pod \"nova-api-0\" (UID: \"8098c44a-b797-4fba-8d0d-2138ca9b367a\") " pod="openstack/nova-api-0" Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.777837 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8098c44a-b797-4fba-8d0d-2138ca9b367a-logs\") pod \"nova-api-0\" (UID: \"8098c44a-b797-4fba-8d0d-2138ca9b367a\") " pod="openstack/nova-api-0" Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.777941 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8098c44a-b797-4fba-8d0d-2138ca9b367a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8098c44a-b797-4fba-8d0d-2138ca9b367a\") " pod="openstack/nova-api-0" Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.778750 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8098c44a-b797-4fba-8d0d-2138ca9b367a-config-data\") pod \"nova-api-0\" (UID: \"8098c44a-b797-4fba-8d0d-2138ca9b367a\") " pod="openstack/nova-api-0" Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.778798 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hpjkr\" (UniqueName: \"kubernetes.io/projected/8098c44a-b797-4fba-8d0d-2138ca9b367a-kube-api-access-hpjkr\") pod \"nova-api-0\" (UID: \"8098c44a-b797-4fba-8d0d-2138ca9b367a\") " pod="openstack/nova-api-0" Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.782114 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8098c44a-b797-4fba-8d0d-2138ca9b367a-logs\") pod \"nova-api-0\" (UID: \"8098c44a-b797-4fba-8d0d-2138ca9b367a\") " pod="openstack/nova-api-0" Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.784969 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8098c44a-b797-4fba-8d0d-2138ca9b367a-config-data\") pod \"nova-api-0\" (UID: \"8098c44a-b797-4fba-8d0d-2138ca9b367a\") " pod="openstack/nova-api-0" Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.785284 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8098c44a-b797-4fba-8d0d-2138ca9b367a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8098c44a-b797-4fba-8d0d-2138ca9b367a\") " pod="openstack/nova-api-0" Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.800467 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hpjkr\" (UniqueName: \"kubernetes.io/projected/8098c44a-b797-4fba-8d0d-2138ca9b367a-kube-api-access-hpjkr\") pod \"nova-api-0\" (UID: \"8098c44a-b797-4fba-8d0d-2138ca9b367a\") " pod="openstack/nova-api-0" Nov 23 16:15:39 crc kubenswrapper[5050]: I1123 16:15:39.935383 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 16:15:40 crc kubenswrapper[5050]: I1123 16:15:40.411538 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"8a3ba510-87a6-443f-b74c-ac6361726b1b","Type":"ContainerStarted","Data":"1935d93f340a395c5187c9ed793c8e3f3a89c31b0d8f0424a4bc98b1b7156e82"} Nov 23 16:15:40 crc kubenswrapper[5050]: I1123 16:15:40.412257 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"8a3ba510-87a6-443f-b74c-ac6361726b1b","Type":"ContainerStarted","Data":"a70583a4269e073dd033e8b75dc0adacec3dcab3c1ea87e532196798f633095c"} Nov 23 16:15:40 crc kubenswrapper[5050]: I1123 16:15:40.412310 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 23 16:15:40 crc kubenswrapper[5050]: I1123 16:15:40.415217 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"48851bd1-4829-4f8d-ae93-a6cb3266ce68","Type":"ContainerStarted","Data":"212e5898b8ac9c7400429c63b5c0b7573e62e87f8d170f83efe286d8cbd392fe"} Nov 23 16:15:40 crc kubenswrapper[5050]: I1123 16:15:40.415285 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"48851bd1-4829-4f8d-ae93-a6cb3266ce68","Type":"ContainerStarted","Data":"dcc58da0d08a430d214f6e7965bf67d82bfee9eb6a89de2efe49a48485c28c40"} Nov 23 16:15:40 crc kubenswrapper[5050]: I1123 16:15:40.451960 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.451930715 podStartE2EDuration="2.451930715s" podCreationTimestamp="2025-11-23 16:15:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:15:40.443782295 +0000 UTC m=+5635.610778830" watchObservedRunningTime="2025-11-23 16:15:40.451930715 +0000 UTC m=+5635.618927240" Nov 23 16:15:40 crc kubenswrapper[5050]: I1123 16:15:40.501912 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.501869275 podStartE2EDuration="2.501869275s" podCreationTimestamp="2025-11-23 16:15:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:15:40.48647426 +0000 UTC m=+5635.653470765" watchObservedRunningTime="2025-11-23 16:15:40.501869275 +0000 UTC m=+5635.668865810" Nov 23 16:15:40 crc kubenswrapper[5050]: I1123 16:15:40.530332 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 23 16:15:41 crc kubenswrapper[5050]: E1123 16:15:41.361262 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9ab652d7f87e93ebb30e4dff16ee739ee0e999fbf81494e2affcaf7c5b2080a0 is running failed: container process not found" containerID="9ab652d7f87e93ebb30e4dff16ee739ee0e999fbf81494e2affcaf7c5b2080a0" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 23 16:15:41 crc kubenswrapper[5050]: E1123 16:15:41.363166 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9ab652d7f87e93ebb30e4dff16ee739ee0e999fbf81494e2affcaf7c5b2080a0 is running failed: container process not found" containerID="9ab652d7f87e93ebb30e4dff16ee739ee0e999fbf81494e2affcaf7c5b2080a0" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 23 16:15:41 crc kubenswrapper[5050]: E1123 16:15:41.363604 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9ab652d7f87e93ebb30e4dff16ee739ee0e999fbf81494e2affcaf7c5b2080a0 is running failed: container process not found" containerID="9ab652d7f87e93ebb30e4dff16ee739ee0e999fbf81494e2affcaf7c5b2080a0" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 23 16:15:41 crc kubenswrapper[5050]: E1123 16:15:41.363653 5050 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9ab652d7f87e93ebb30e4dff16ee739ee0e999fbf81494e2affcaf7c5b2080a0 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="b8269f32-ce74-4977-aa1b-e7324d6d9935" containerName="nova-cell1-conductor-conductor" Nov 23 16:15:41 crc kubenswrapper[5050]: I1123 16:15:41.446261 5050 generic.go:334] "Generic (PLEG): container finished" podID="b8269f32-ce74-4977-aa1b-e7324d6d9935" containerID="9ab652d7f87e93ebb30e4dff16ee739ee0e999fbf81494e2affcaf7c5b2080a0" exitCode=0 Nov 23 16:15:41 crc kubenswrapper[5050]: I1123 16:15:41.446358 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 23 16:15:41 crc kubenswrapper[5050]: I1123 16:15:41.446354 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"b8269f32-ce74-4977-aa1b-e7324d6d9935","Type":"ContainerDied","Data":"9ab652d7f87e93ebb30e4dff16ee739ee0e999fbf81494e2affcaf7c5b2080a0"} Nov 23 16:15:41 crc kubenswrapper[5050]: I1123 16:15:41.446432 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"b8269f32-ce74-4977-aa1b-e7324d6d9935","Type":"ContainerDied","Data":"b75dd840f0c346435d79616b1598b40d84b456e8a7976d025f3e9bd6602b1897"} Nov 23 16:15:41 crc kubenswrapper[5050]: I1123 16:15:41.446499 5050 scope.go:117] "RemoveContainer" containerID="9ab652d7f87e93ebb30e4dff16ee739ee0e999fbf81494e2affcaf7c5b2080a0" Nov 23 16:15:41 crc kubenswrapper[5050]: I1123 16:15:41.451699 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8098c44a-b797-4fba-8d0d-2138ca9b367a","Type":"ContainerStarted","Data":"6b4b736b2f6317c29343fa48432ccdef3758c170dc6360ed6cb25caec383b0eb"} Nov 23 16:15:41 crc kubenswrapper[5050]: I1123 16:15:41.451769 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8098c44a-b797-4fba-8d0d-2138ca9b367a","Type":"ContainerStarted","Data":"13f460433dc3293b2854c9d73e0084d0f750ebd30de0e229dfc7cfd9257c9b61"} Nov 23 16:15:41 crc kubenswrapper[5050]: I1123 16:15:41.451791 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8098c44a-b797-4fba-8d0d-2138ca9b367a","Type":"ContainerStarted","Data":"45fa5f45e24923a6c1ca202a72ce406f17a5be3147314c86610275116cd2dbeb"} Nov 23 16:15:41 crc kubenswrapper[5050]: I1123 16:15:41.543936 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.543903811 podStartE2EDuration="2.543903811s" podCreationTimestamp="2025-11-23 16:15:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:15:41.515489899 +0000 UTC m=+5636.682486394" watchObservedRunningTime="2025-11-23 16:15:41.543903811 +0000 UTC m=+5636.710900296" Nov 23 16:15:41 crc kubenswrapper[5050]: I1123 16:15:41.545238 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gjqdm\" (UniqueName: \"kubernetes.io/projected/b8269f32-ce74-4977-aa1b-e7324d6d9935-kube-api-access-gjqdm\") pod \"b8269f32-ce74-4977-aa1b-e7324d6d9935\" (UID: \"b8269f32-ce74-4977-aa1b-e7324d6d9935\") " Nov 23 16:15:41 crc kubenswrapper[5050]: I1123 16:15:41.545317 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8269f32-ce74-4977-aa1b-e7324d6d9935-config-data\") pod \"b8269f32-ce74-4977-aa1b-e7324d6d9935\" (UID: \"b8269f32-ce74-4977-aa1b-e7324d6d9935\") " Nov 23 16:15:41 crc kubenswrapper[5050]: I1123 16:15:41.545409 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8269f32-ce74-4977-aa1b-e7324d6d9935-combined-ca-bundle\") pod \"b8269f32-ce74-4977-aa1b-e7324d6d9935\" (UID: \"b8269f32-ce74-4977-aa1b-e7324d6d9935\") " Nov 23 16:15:41 crc kubenswrapper[5050]: I1123 16:15:41.572683 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8269f32-ce74-4977-aa1b-e7324d6d9935-kube-api-access-gjqdm" (OuterVolumeSpecName: "kube-api-access-gjqdm") pod "b8269f32-ce74-4977-aa1b-e7324d6d9935" (UID: "b8269f32-ce74-4977-aa1b-e7324d6d9935"). InnerVolumeSpecName "kube-api-access-gjqdm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:15:41 crc kubenswrapper[5050]: I1123 16:15:41.581325 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8269f32-ce74-4977-aa1b-e7324d6d9935-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b8269f32-ce74-4977-aa1b-e7324d6d9935" (UID: "b8269f32-ce74-4977-aa1b-e7324d6d9935"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:15:41 crc kubenswrapper[5050]: I1123 16:15:41.583044 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8269f32-ce74-4977-aa1b-e7324d6d9935-config-data" (OuterVolumeSpecName: "config-data") pod "b8269f32-ce74-4977-aa1b-e7324d6d9935" (UID: "b8269f32-ce74-4977-aa1b-e7324d6d9935"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:15:41 crc kubenswrapper[5050]: I1123 16:15:41.648772 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8269f32-ce74-4977-aa1b-e7324d6d9935-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:41 crc kubenswrapper[5050]: I1123 16:15:41.648821 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8269f32-ce74-4977-aa1b-e7324d6d9935-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:41 crc kubenswrapper[5050]: I1123 16:15:41.648836 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gjqdm\" (UniqueName: \"kubernetes.io/projected/b8269f32-ce74-4977-aa1b-e7324d6d9935-kube-api-access-gjqdm\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:41 crc kubenswrapper[5050]: I1123 16:15:41.708535 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 23 16:15:41 crc kubenswrapper[5050]: I1123 16:15:41.783699 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:15:41 crc kubenswrapper[5050]: I1123 16:15:41.841572 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" Nov 23 16:15:41 crc kubenswrapper[5050]: I1123 16:15:41.901968 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-c57cbf6dc-hspdk"] Nov 23 16:15:41 crc kubenswrapper[5050]: I1123 16:15:41.902394 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" podUID="10fbcf8c-4e29-4982-adf6-5d9240dc4293" containerName="dnsmasq-dns" containerID="cri-o://35cdde4c34663833cf2bdc0774d1abc0d1adfa4ab8d562168e173aaf0697119d" gracePeriod=10 Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.428553 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.463387 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.478779 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.478795 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" event={"ID":"10fbcf8c-4e29-4982-adf6-5d9240dc4293","Type":"ContainerDied","Data":"35cdde4c34663833cf2bdc0774d1abc0d1adfa4ab8d562168e173aaf0697119d"} Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.478872 5050 scope.go:117] "RemoveContainer" containerID="35cdde4c34663833cf2bdc0774d1abc0d1adfa4ab8d562168e173aaf0697119d" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.478657 5050 generic.go:334] "Generic (PLEG): container finished" podID="10fbcf8c-4e29-4982-adf6-5d9240dc4293" containerID="35cdde4c34663833cf2bdc0774d1abc0d1adfa4ab8d562168e173aaf0697119d" exitCode=0 Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.480818 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c57cbf6dc-hspdk" event={"ID":"10fbcf8c-4e29-4982-adf6-5d9240dc4293","Type":"ContainerDied","Data":"58c9db494fac2369e87feb73c52f5d5ba7f69b383a64307cb6d475faed9ea418"} Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.511203 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.516741 5050 scope.go:117] "RemoveContainer" containerID="5a89951850408cd8024ff1744f69ceab8edac64952afa6e849cfa98fb783e273" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.522970 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.532065 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 23 16:15:42 crc kubenswrapper[5050]: E1123 16:15:42.532871 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10fbcf8c-4e29-4982-adf6-5d9240dc4293" containerName="dnsmasq-dns" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.532888 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="10fbcf8c-4e29-4982-adf6-5d9240dc4293" containerName="dnsmasq-dns" Nov 23 16:15:42 crc kubenswrapper[5050]: E1123 16:15:42.532912 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8269f32-ce74-4977-aa1b-e7324d6d9935" containerName="nova-cell1-conductor-conductor" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.532918 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8269f32-ce74-4977-aa1b-e7324d6d9935" containerName="nova-cell1-conductor-conductor" Nov 23 16:15:42 crc kubenswrapper[5050]: E1123 16:15:42.532940 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10fbcf8c-4e29-4982-adf6-5d9240dc4293" containerName="init" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.532946 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="10fbcf8c-4e29-4982-adf6-5d9240dc4293" containerName="init" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.533135 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8269f32-ce74-4977-aa1b-e7324d6d9935" containerName="nova-cell1-conductor-conductor" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.533149 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="10fbcf8c-4e29-4982-adf6-5d9240dc4293" containerName="dnsmasq-dns" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.533949 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.540154 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.546334 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.565781 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/10fbcf8c-4e29-4982-adf6-5d9240dc4293-dns-svc\") pod \"10fbcf8c-4e29-4982-adf6-5d9240dc4293\" (UID: \"10fbcf8c-4e29-4982-adf6-5d9240dc4293\") " Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.565861 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/10fbcf8c-4e29-4982-adf6-5d9240dc4293-ovsdbserver-sb\") pod \"10fbcf8c-4e29-4982-adf6-5d9240dc4293\" (UID: \"10fbcf8c-4e29-4982-adf6-5d9240dc4293\") " Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.566009 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/10fbcf8c-4e29-4982-adf6-5d9240dc4293-ovsdbserver-nb\") pod \"10fbcf8c-4e29-4982-adf6-5d9240dc4293\" (UID: \"10fbcf8c-4e29-4982-adf6-5d9240dc4293\") " Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.566068 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p7l8n\" (UniqueName: \"kubernetes.io/projected/10fbcf8c-4e29-4982-adf6-5d9240dc4293-kube-api-access-p7l8n\") pod \"10fbcf8c-4e29-4982-adf6-5d9240dc4293\" (UID: \"10fbcf8c-4e29-4982-adf6-5d9240dc4293\") " Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.566106 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10fbcf8c-4e29-4982-adf6-5d9240dc4293-config\") pod \"10fbcf8c-4e29-4982-adf6-5d9240dc4293\" (UID: \"10fbcf8c-4e29-4982-adf6-5d9240dc4293\") " Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.571264 5050 scope.go:117] "RemoveContainer" containerID="35cdde4c34663833cf2bdc0774d1abc0d1adfa4ab8d562168e173aaf0697119d" Nov 23 16:15:42 crc kubenswrapper[5050]: E1123 16:15:42.572133 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"35cdde4c34663833cf2bdc0774d1abc0d1adfa4ab8d562168e173aaf0697119d\": container with ID starting with 35cdde4c34663833cf2bdc0774d1abc0d1adfa4ab8d562168e173aaf0697119d not found: ID does not exist" containerID="35cdde4c34663833cf2bdc0774d1abc0d1adfa4ab8d562168e173aaf0697119d" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.572213 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35cdde4c34663833cf2bdc0774d1abc0d1adfa4ab8d562168e173aaf0697119d"} err="failed to get container status \"35cdde4c34663833cf2bdc0774d1abc0d1adfa4ab8d562168e173aaf0697119d\": rpc error: code = NotFound desc = could not find container \"35cdde4c34663833cf2bdc0774d1abc0d1adfa4ab8d562168e173aaf0697119d\": container with ID starting with 35cdde4c34663833cf2bdc0774d1abc0d1adfa4ab8d562168e173aaf0697119d not found: ID does not exist" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.572252 5050 scope.go:117] "RemoveContainer" containerID="5a89951850408cd8024ff1744f69ceab8edac64952afa6e849cfa98fb783e273" Nov 23 16:15:42 crc kubenswrapper[5050]: E1123 16:15:42.574185 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a89951850408cd8024ff1744f69ceab8edac64952afa6e849cfa98fb783e273\": container with ID starting with 5a89951850408cd8024ff1744f69ceab8edac64952afa6e849cfa98fb783e273 not found: ID does not exist" containerID="5a89951850408cd8024ff1744f69ceab8edac64952afa6e849cfa98fb783e273" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.574274 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a89951850408cd8024ff1744f69ceab8edac64952afa6e849cfa98fb783e273"} err="failed to get container status \"5a89951850408cd8024ff1744f69ceab8edac64952afa6e849cfa98fb783e273\": rpc error: code = NotFound desc = could not find container \"5a89951850408cd8024ff1744f69ceab8edac64952afa6e849cfa98fb783e273\": container with ID starting with 5a89951850408cd8024ff1744f69ceab8edac64952afa6e849cfa98fb783e273 not found: ID does not exist" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.576595 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10fbcf8c-4e29-4982-adf6-5d9240dc4293-kube-api-access-p7l8n" (OuterVolumeSpecName: "kube-api-access-p7l8n") pod "10fbcf8c-4e29-4982-adf6-5d9240dc4293" (UID: "10fbcf8c-4e29-4982-adf6-5d9240dc4293"). InnerVolumeSpecName "kube-api-access-p7l8n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.613803 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10fbcf8c-4e29-4982-adf6-5d9240dc4293-config" (OuterVolumeSpecName: "config") pod "10fbcf8c-4e29-4982-adf6-5d9240dc4293" (UID: "10fbcf8c-4e29-4982-adf6-5d9240dc4293"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.614509 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10fbcf8c-4e29-4982-adf6-5d9240dc4293-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "10fbcf8c-4e29-4982-adf6-5d9240dc4293" (UID: "10fbcf8c-4e29-4982-adf6-5d9240dc4293"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.615219 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10fbcf8c-4e29-4982-adf6-5d9240dc4293-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "10fbcf8c-4e29-4982-adf6-5d9240dc4293" (UID: "10fbcf8c-4e29-4982-adf6-5d9240dc4293"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.616614 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10fbcf8c-4e29-4982-adf6-5d9240dc4293-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "10fbcf8c-4e29-4982-adf6-5d9240dc4293" (UID: "10fbcf8c-4e29-4982-adf6-5d9240dc4293"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.671866 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/183ebe7a-5aa3-4a3f-8392-bbd7e3433b15-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"183ebe7a-5aa3-4a3f-8392-bbd7e3433b15\") " pod="openstack/nova-cell1-conductor-0" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.672172 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/183ebe7a-5aa3-4a3f-8392-bbd7e3433b15-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"183ebe7a-5aa3-4a3f-8392-bbd7e3433b15\") " pod="openstack/nova-cell1-conductor-0" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.672257 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxqx6\" (UniqueName: \"kubernetes.io/projected/183ebe7a-5aa3-4a3f-8392-bbd7e3433b15-kube-api-access-hxqx6\") pod \"nova-cell1-conductor-0\" (UID: \"183ebe7a-5aa3-4a3f-8392-bbd7e3433b15\") " pod="openstack/nova-cell1-conductor-0" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.672375 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/10fbcf8c-4e29-4982-adf6-5d9240dc4293-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.672404 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p7l8n\" (UniqueName: \"kubernetes.io/projected/10fbcf8c-4e29-4982-adf6-5d9240dc4293-kube-api-access-p7l8n\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.672421 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10fbcf8c-4e29-4982-adf6-5d9240dc4293-config\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.672957 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/10fbcf8c-4e29-4982-adf6-5d9240dc4293-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.672976 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/10fbcf8c-4e29-4982-adf6-5d9240dc4293-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.774403 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/183ebe7a-5aa3-4a3f-8392-bbd7e3433b15-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"183ebe7a-5aa3-4a3f-8392-bbd7e3433b15\") " pod="openstack/nova-cell1-conductor-0" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.774505 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxqx6\" (UniqueName: \"kubernetes.io/projected/183ebe7a-5aa3-4a3f-8392-bbd7e3433b15-kube-api-access-hxqx6\") pod \"nova-cell1-conductor-0\" (UID: \"183ebe7a-5aa3-4a3f-8392-bbd7e3433b15\") " pod="openstack/nova-cell1-conductor-0" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.774569 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/183ebe7a-5aa3-4a3f-8392-bbd7e3433b15-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"183ebe7a-5aa3-4a3f-8392-bbd7e3433b15\") " pod="openstack/nova-cell1-conductor-0" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.779942 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/183ebe7a-5aa3-4a3f-8392-bbd7e3433b15-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"183ebe7a-5aa3-4a3f-8392-bbd7e3433b15\") " pod="openstack/nova-cell1-conductor-0" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.786515 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/183ebe7a-5aa3-4a3f-8392-bbd7e3433b15-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"183ebe7a-5aa3-4a3f-8392-bbd7e3433b15\") " pod="openstack/nova-cell1-conductor-0" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.792503 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxqx6\" (UniqueName: \"kubernetes.io/projected/183ebe7a-5aa3-4a3f-8392-bbd7e3433b15-kube-api-access-hxqx6\") pod \"nova-cell1-conductor-0\" (UID: \"183ebe7a-5aa3-4a3f-8392-bbd7e3433b15\") " pod="openstack/nova-cell1-conductor-0" Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.813650 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-c57cbf6dc-hspdk"] Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.826547 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-c57cbf6dc-hspdk"] Nov 23 16:15:42 crc kubenswrapper[5050]: I1123 16:15:42.928262 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 23 16:15:43 crc kubenswrapper[5050]: I1123 16:15:43.471241 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 23 16:15:43 crc kubenswrapper[5050]: I1123 16:15:43.493239 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"183ebe7a-5aa3-4a3f-8392-bbd7e3433b15","Type":"ContainerStarted","Data":"baf8437cd84cdd6c08362052a56b76dd8d2fcb882ddc022c5ae4282f47a42d5a"} Nov 23 16:15:43 crc kubenswrapper[5050]: I1123 16:15:43.563873 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10fbcf8c-4e29-4982-adf6-5d9240dc4293" path="/var/lib/kubelet/pods/10fbcf8c-4e29-4982-adf6-5d9240dc4293/volumes" Nov 23 16:15:43 crc kubenswrapper[5050]: I1123 16:15:43.564819 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8269f32-ce74-4977-aa1b-e7324d6d9935" path="/var/lib/kubelet/pods/b8269f32-ce74-4977-aa1b-e7324d6d9935/volumes" Nov 23 16:15:43 crc kubenswrapper[5050]: I1123 16:15:43.799857 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 23 16:15:43 crc kubenswrapper[5050]: I1123 16:15:43.800332 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 23 16:15:44 crc kubenswrapper[5050]: I1123 16:15:44.016264 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 23 16:15:44 crc kubenswrapper[5050]: I1123 16:15:44.508774 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"183ebe7a-5aa3-4a3f-8392-bbd7e3433b15","Type":"ContainerStarted","Data":"7ebf5479e4cd1274bcd94267b382aefadd914349b5cce33cceeaa72468c30166"} Nov 23 16:15:44 crc kubenswrapper[5050]: I1123 16:15:44.508851 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 23 16:15:44 crc kubenswrapper[5050]: I1123 16:15:44.531571 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.531549033 podStartE2EDuration="2.531549033s" podCreationTimestamp="2025-11-23 16:15:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:15:44.527062376 +0000 UTC m=+5639.694058861" watchObservedRunningTime="2025-11-23 16:15:44.531549033 +0000 UTC m=+5639.698545518" Nov 23 16:15:46 crc kubenswrapper[5050]: I1123 16:15:46.708661 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 23 16:15:46 crc kubenswrapper[5050]: I1123 16:15:46.768866 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 23 16:15:46 crc kubenswrapper[5050]: I1123 16:15:46.783066 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:15:46 crc kubenswrapper[5050]: I1123 16:15:46.806300 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:15:47 crc kubenswrapper[5050]: I1123 16:15:47.581425 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 23 16:15:47 crc kubenswrapper[5050]: I1123 16:15:47.650247 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 23 16:15:48 crc kubenswrapper[5050]: I1123 16:15:48.800161 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 23 16:15:48 crc kubenswrapper[5050]: I1123 16:15:48.800623 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 23 16:15:48 crc kubenswrapper[5050]: I1123 16:15:48.921394 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 23 16:15:49 crc kubenswrapper[5050]: I1123 16:15:49.881782 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="48851bd1-4829-4f8d-ae93-a6cb3266ce68" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.82:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 23 16:15:49 crc kubenswrapper[5050]: I1123 16:15:49.881783 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="48851bd1-4829-4f8d-ae93-a6cb3266ce68" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.82:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 23 16:15:49 crc kubenswrapper[5050]: I1123 16:15:49.936828 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 23 16:15:49 crc kubenswrapper[5050]: I1123 16:15:49.936876 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 23 16:15:51 crc kubenswrapper[5050]: I1123 16:15:51.019755 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="8098c44a-b797-4fba-8d0d-2138ca9b367a" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.84:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 23 16:15:51 crc kubenswrapper[5050]: I1123 16:15:51.019823 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="8098c44a-b797-4fba-8d0d-2138ca9b367a" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.84:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 23 16:15:51 crc kubenswrapper[5050]: I1123 16:15:51.562201 5050 scope.go:117] "RemoveContainer" containerID="131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00" Nov 23 16:15:51 crc kubenswrapper[5050]: E1123 16:15:51.562539 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:15:52 crc kubenswrapper[5050]: I1123 16:15:52.978885 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 23 16:15:57 crc kubenswrapper[5050]: E1123 16:15:57.186094 5050 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.32:37606->38.102.83.32:33067: write tcp 38.102.83.32:37606->38.102.83.32:33067: write: connection reset by peer Nov 23 16:15:58 crc kubenswrapper[5050]: I1123 16:15:58.803629 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 23 16:15:58 crc kubenswrapper[5050]: I1123 16:15:58.803736 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 23 16:15:58 crc kubenswrapper[5050]: I1123 16:15:58.805827 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 23 16:15:58 crc kubenswrapper[5050]: I1123 16:15:58.806424 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 23 16:15:59 crc kubenswrapper[5050]: I1123 16:15:59.990965 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 23 16:15:59 crc kubenswrapper[5050]: I1123 16:15:59.991963 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 23 16:16:00 crc kubenswrapper[5050]: I1123 16:16:00.011215 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 23 16:16:00 crc kubenswrapper[5050]: I1123 16:16:00.034981 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 23 16:16:00 crc kubenswrapper[5050]: I1123 16:16:00.726995 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 23 16:16:00 crc kubenswrapper[5050]: I1123 16:16:00.731776 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 23 16:16:03 crc kubenswrapper[5050]: I1123 16:16:03.305923 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 23 16:16:03 crc kubenswrapper[5050]: I1123 16:16:03.307900 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 23 16:16:03 crc kubenswrapper[5050]: I1123 16:16:03.312023 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 23 16:16:03 crc kubenswrapper[5050]: I1123 16:16:03.347928 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 23 16:16:03 crc kubenswrapper[5050]: I1123 16:16:03.475112 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6krd\" (UniqueName: \"kubernetes.io/projected/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-kube-api-access-f6krd\") pod \"cinder-scheduler-0\" (UID: \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:03 crc kubenswrapper[5050]: I1123 16:16:03.475554 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:03 crc kubenswrapper[5050]: I1123 16:16:03.475904 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:03 crc kubenswrapper[5050]: I1123 16:16:03.476110 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-config-data\") pod \"cinder-scheduler-0\" (UID: \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:03 crc kubenswrapper[5050]: I1123 16:16:03.476196 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:03 crc kubenswrapper[5050]: I1123 16:16:03.476293 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-scripts\") pod \"cinder-scheduler-0\" (UID: \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:03 crc kubenswrapper[5050]: I1123 16:16:03.549339 5050 scope.go:117] "RemoveContainer" containerID="131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00" Nov 23 16:16:03 crc kubenswrapper[5050]: E1123 16:16:03.549888 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:16:03 crc kubenswrapper[5050]: I1123 16:16:03.578548 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6krd\" (UniqueName: \"kubernetes.io/projected/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-kube-api-access-f6krd\") pod \"cinder-scheduler-0\" (UID: \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:03 crc kubenswrapper[5050]: I1123 16:16:03.578676 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:03 crc kubenswrapper[5050]: I1123 16:16:03.578765 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:03 crc kubenswrapper[5050]: I1123 16:16:03.578840 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-config-data\") pod \"cinder-scheduler-0\" (UID: \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:03 crc kubenswrapper[5050]: I1123 16:16:03.578896 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:03 crc kubenswrapper[5050]: I1123 16:16:03.578909 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:03 crc kubenswrapper[5050]: I1123 16:16:03.578929 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-scripts\") pod \"cinder-scheduler-0\" (UID: \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:03 crc kubenswrapper[5050]: I1123 16:16:03.585521 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-scripts\") pod \"cinder-scheduler-0\" (UID: \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:03 crc kubenswrapper[5050]: I1123 16:16:03.585730 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:03 crc kubenswrapper[5050]: I1123 16:16:03.587698 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:03 crc kubenswrapper[5050]: I1123 16:16:03.589222 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-config-data\") pod \"cinder-scheduler-0\" (UID: \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:03 crc kubenswrapper[5050]: I1123 16:16:03.608223 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6krd\" (UniqueName: \"kubernetes.io/projected/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-kube-api-access-f6krd\") pod \"cinder-scheduler-0\" (UID: \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:03 crc kubenswrapper[5050]: I1123 16:16:03.629816 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 23 16:16:04 crc kubenswrapper[5050]: I1123 16:16:04.168921 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 23 16:16:04 crc kubenswrapper[5050]: I1123 16:16:04.794935 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5af01ad6-c261-4d39-b3d8-4fe7a73524cf","Type":"ContainerStarted","Data":"4518ddefdf9304c84885bc85c22aa2e193d0f76936b4afeb2642479a2f34d798"} Nov 23 16:16:05 crc kubenswrapper[5050]: I1123 16:16:05.329396 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 23 16:16:05 crc kubenswrapper[5050]: I1123 16:16:05.330214 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="8a75ff78-417c-4487-9b35-c9dbae5dcca6" containerName="cinder-api-log" containerID="cri-o://6e8f1df424a95618ab5da6c8694c5e37743478a264f86c301cf216d0ac7756ec" gracePeriod=30 Nov 23 16:16:05 crc kubenswrapper[5050]: I1123 16:16:05.330410 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="8a75ff78-417c-4487-9b35-c9dbae5dcca6" containerName="cinder-api" containerID="cri-o://b0a894b01da730960d9113a2ac70f24be262176d6d594e532be7ac3dc95f7c77" gracePeriod=30 Nov 23 16:16:05 crc kubenswrapper[5050]: I1123 16:16:05.808534 5050 generic.go:334] "Generic (PLEG): container finished" podID="8a75ff78-417c-4487-9b35-c9dbae5dcca6" containerID="6e8f1df424a95618ab5da6c8694c5e37743478a264f86c301cf216d0ac7756ec" exitCode=143 Nov 23 16:16:05 crc kubenswrapper[5050]: I1123 16:16:05.808625 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8a75ff78-417c-4487-9b35-c9dbae5dcca6","Type":"ContainerDied","Data":"6e8f1df424a95618ab5da6c8694c5e37743478a264f86c301cf216d0ac7756ec"} Nov 23 16:16:05 crc kubenswrapper[5050]: I1123 16:16:05.812734 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5af01ad6-c261-4d39-b3d8-4fe7a73524cf","Type":"ContainerStarted","Data":"4eaf80a1bcd5f62c60185ecb37c51df8529829694a56d6dd5702eed631f63f9d"} Nov 23 16:16:05 crc kubenswrapper[5050]: I1123 16:16:05.812795 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5af01ad6-c261-4d39-b3d8-4fe7a73524cf","Type":"ContainerStarted","Data":"c206774ae0b9813bda1fce04e9c5974f141ba16a926981b101f7a40459d9531d"} Nov 23 16:16:05 crc kubenswrapper[5050]: I1123 16:16:05.845850 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=2.845822135 podStartE2EDuration="2.845822135s" podCreationTimestamp="2025-11-23 16:16:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:16:05.832776837 +0000 UTC m=+5660.999773332" watchObservedRunningTime="2025-11-23 16:16:05.845822135 +0000 UTC m=+5661.012818620" Nov 23 16:16:05 crc kubenswrapper[5050]: I1123 16:16:05.913181 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 23 16:16:05 crc kubenswrapper[5050]: I1123 16:16:05.914932 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:05 crc kubenswrapper[5050]: I1123 16:16:05.919748 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-volume1-config-data" Nov 23 16:16:05 crc kubenswrapper[5050]: I1123 16:16:05.931668 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.026628 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-c962f"] Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.033261 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c962f" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.034978 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-dev\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.035036 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.035071 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05c9da2b-2e38-45c6-924b-641d802e03fd-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.035094 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.035108 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.035143 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-run\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.035169 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05c9da2b-2e38-45c6-924b-641d802e03fd-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.035191 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-sys\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.035217 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.035234 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05c9da2b-2e38-45c6-924b-641d802e03fd-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.035255 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/05c9da2b-2e38-45c6-924b-641d802e03fd-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.035275 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.035298 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/05c9da2b-2e38-45c6-924b-641d802e03fd-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.035359 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.035386 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mm6b7\" (UniqueName: \"kubernetes.io/projected/05c9da2b-2e38-45c6-924b-641d802e03fd-kube-api-access-mm6b7\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.035412 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.041560 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-c962f"] Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.137869 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05c9da2b-2e38-45c6-924b-641d802e03fd-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.137948 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.137971 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.138000 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4051fc5f-7beb-46ce-8c91-bb54c6b5003a-utilities\") pod \"redhat-marketplace-c962f\" (UID: \"4051fc5f-7beb-46ce-8c91-bb54c6b5003a\") " pod="openshift-marketplace/redhat-marketplace-c962f" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.138017 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wjp4j\" (UniqueName: \"kubernetes.io/projected/4051fc5f-7beb-46ce-8c91-bb54c6b5003a-kube-api-access-wjp4j\") pod \"redhat-marketplace-c962f\" (UID: \"4051fc5f-7beb-46ce-8c91-bb54c6b5003a\") " pod="openshift-marketplace/redhat-marketplace-c962f" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.138050 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-run\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.138079 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05c9da2b-2e38-45c6-924b-641d802e03fd-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.138103 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-sys\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.138132 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.138152 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05c9da2b-2e38-45c6-924b-641d802e03fd-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.138170 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/05c9da2b-2e38-45c6-924b-641d802e03fd-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.138189 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.138211 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/05c9da2b-2e38-45c6-924b-641d802e03fd-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.138235 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4051fc5f-7beb-46ce-8c91-bb54c6b5003a-catalog-content\") pod \"redhat-marketplace-c962f\" (UID: \"4051fc5f-7beb-46ce-8c91-bb54c6b5003a\") " pod="openshift-marketplace/redhat-marketplace-c962f" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.138291 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.138317 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mm6b7\" (UniqueName: \"kubernetes.io/projected/05c9da2b-2e38-45c6-924b-641d802e03fd-kube-api-access-mm6b7\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.138342 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.138371 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-dev\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.138389 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.138764 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.138877 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.141728 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.141806 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.141964 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.142555 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-dev\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.142589 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.142612 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.142712 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-run\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.142746 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/05c9da2b-2e38-45c6-924b-641d802e03fd-sys\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.148972 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/05c9da2b-2e38-45c6-924b-641d802e03fd-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.149127 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05c9da2b-2e38-45c6-924b-641d802e03fd-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.149156 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05c9da2b-2e38-45c6-924b-641d802e03fd-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.159662 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05c9da2b-2e38-45c6-924b-641d802e03fd-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.172477 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mm6b7\" (UniqueName: \"kubernetes.io/projected/05c9da2b-2e38-45c6-924b-641d802e03fd-kube-api-access-mm6b7\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.176220 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/05c9da2b-2e38-45c6-924b-641d802e03fd-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"05c9da2b-2e38-45c6-924b-641d802e03fd\") " pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.240717 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4051fc5f-7beb-46ce-8c91-bb54c6b5003a-catalog-content\") pod \"redhat-marketplace-c962f\" (UID: \"4051fc5f-7beb-46ce-8c91-bb54c6b5003a\") " pod="openshift-marketplace/redhat-marketplace-c962f" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.240917 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4051fc5f-7beb-46ce-8c91-bb54c6b5003a-utilities\") pod \"redhat-marketplace-c962f\" (UID: \"4051fc5f-7beb-46ce-8c91-bb54c6b5003a\") " pod="openshift-marketplace/redhat-marketplace-c962f" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.240944 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wjp4j\" (UniqueName: \"kubernetes.io/projected/4051fc5f-7beb-46ce-8c91-bb54c6b5003a-kube-api-access-wjp4j\") pod \"redhat-marketplace-c962f\" (UID: \"4051fc5f-7beb-46ce-8c91-bb54c6b5003a\") " pod="openshift-marketplace/redhat-marketplace-c962f" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.241979 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4051fc5f-7beb-46ce-8c91-bb54c6b5003a-catalog-content\") pod \"redhat-marketplace-c962f\" (UID: \"4051fc5f-7beb-46ce-8c91-bb54c6b5003a\") " pod="openshift-marketplace/redhat-marketplace-c962f" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.242103 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4051fc5f-7beb-46ce-8c91-bb54c6b5003a-utilities\") pod \"redhat-marketplace-c962f\" (UID: \"4051fc5f-7beb-46ce-8c91-bb54c6b5003a\") " pod="openshift-marketplace/redhat-marketplace-c962f" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.242224 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.272710 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wjp4j\" (UniqueName: \"kubernetes.io/projected/4051fc5f-7beb-46ce-8c91-bb54c6b5003a-kube-api-access-wjp4j\") pod \"redhat-marketplace-c962f\" (UID: \"4051fc5f-7beb-46ce-8c91-bb54c6b5003a\") " pod="openshift-marketplace/redhat-marketplace-c962f" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.371440 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c962f" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.667592 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-backup-0"] Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.670738 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.673337 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-backup-config-data" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.702677 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.759037 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.759091 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.759131 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-dev\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.759160 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebd99884-0673-4d7b-acf0-077be572599f-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.759229 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-lib-modules\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.759265 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ebd99884-0673-4d7b-acf0-077be572599f-config-data-custom\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.759291 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ebd99884-0673-4d7b-acf0-077be572599f-scripts\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.759354 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.759383 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p67vw\" (UniqueName: \"kubernetes.io/projected/ebd99884-0673-4d7b-acf0-077be572599f-kube-api-access-p67vw\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.759408 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-sys\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.759430 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-run\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.759476 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.759510 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-etc-nvme\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.759537 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.759557 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ebd99884-0673-4d7b-acf0-077be572599f-config-data\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.759581 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ebd99884-0673-4d7b-acf0-077be572599f-ceph\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.861584 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.861653 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p67vw\" (UniqueName: \"kubernetes.io/projected/ebd99884-0673-4d7b-acf0-077be572599f-kube-api-access-p67vw\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.861680 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-sys\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.861701 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-run\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.861722 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.861755 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-etc-nvme\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.861804 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.861826 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ebd99884-0673-4d7b-acf0-077be572599f-config-data\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.861828 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-sys\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.861866 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ebd99884-0673-4d7b-acf0-077be572599f-ceph\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.861903 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.862606 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-run\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.862713 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.862752 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-etc-nvme\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.862760 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.862736 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.862726 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.862801 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.862884 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-dev\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.862914 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-dev\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.862918 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebd99884-0673-4d7b-acf0-077be572599f-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.862920 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.863232 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-lib-modules\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.863293 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ebd99884-0673-4d7b-acf0-077be572599f-config-data-custom\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.863318 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ebd99884-0673-4d7b-acf0-077be572599f-scripts\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.863962 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/ebd99884-0673-4d7b-acf0-077be572599f-lib-modules\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.869083 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ebd99884-0673-4d7b-acf0-077be572599f-scripts\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.869618 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebd99884-0673-4d7b-acf0-077be572599f-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.870212 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ebd99884-0673-4d7b-acf0-077be572599f-config-data-custom\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.871929 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ebd99884-0673-4d7b-acf0-077be572599f-config-data\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.872985 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ebd99884-0673-4d7b-acf0-077be572599f-ceph\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.880075 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p67vw\" (UniqueName: \"kubernetes.io/projected/ebd99884-0673-4d7b-acf0-077be572599f-kube-api-access-p67vw\") pod \"cinder-backup-0\" (UID: \"ebd99884-0673-4d7b-acf0-077be572599f\") " pod="openstack/cinder-backup-0" Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.892818 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 23 16:16:06 crc kubenswrapper[5050]: W1123 16:16:06.894895 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod05c9da2b_2e38_45c6_924b_641d802e03fd.slice/crio-b99e82d34410c37753092d53bf3e08fb594d86bb0fa2b24dde3e09e2e4fe7f8a WatchSource:0}: Error finding container b99e82d34410c37753092d53bf3e08fb594d86bb0fa2b24dde3e09e2e4fe7f8a: Status 404 returned error can't find the container with id b99e82d34410c37753092d53bf3e08fb594d86bb0fa2b24dde3e09e2e4fe7f8a Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.897986 5050 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 23 16:16:06 crc kubenswrapper[5050]: I1123 16:16:06.963372 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-c962f"] Nov 23 16:16:07 crc kubenswrapper[5050]: I1123 16:16:07.040390 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 23 16:16:07 crc kubenswrapper[5050]: I1123 16:16:07.658229 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 23 16:16:07 crc kubenswrapper[5050]: W1123 16:16:07.667930 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podebd99884_0673_4d7b_acf0_077be572599f.slice/crio-5414911a07f0402b86ab392cd6b1d86b03c636b02805910a144601e634e53784 WatchSource:0}: Error finding container 5414911a07f0402b86ab392cd6b1d86b03c636b02805910a144601e634e53784: Status 404 returned error can't find the container with id 5414911a07f0402b86ab392cd6b1d86b03c636b02805910a144601e634e53784 Nov 23 16:16:07 crc kubenswrapper[5050]: I1123 16:16:07.785378 5050 scope.go:117] "RemoveContainer" containerID="73bc63ceb9fc8f8e714dfdec3f92cac43b237fc21b48af024b52e2c287989b58" Nov 23 16:16:07 crc kubenswrapper[5050]: I1123 16:16:07.835505 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"ebd99884-0673-4d7b-acf0-077be572599f","Type":"ContainerStarted","Data":"5414911a07f0402b86ab392cd6b1d86b03c636b02805910a144601e634e53784"} Nov 23 16:16:07 crc kubenswrapper[5050]: I1123 16:16:07.836922 5050 generic.go:334] "Generic (PLEG): container finished" podID="4051fc5f-7beb-46ce-8c91-bb54c6b5003a" containerID="0dcab51a2dd8d2521ee2e0101458880081da887bd2039c8401504f9007fceec9" exitCode=0 Nov 23 16:16:07 crc kubenswrapper[5050]: I1123 16:16:07.836974 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c962f" event={"ID":"4051fc5f-7beb-46ce-8c91-bb54c6b5003a","Type":"ContainerDied","Data":"0dcab51a2dd8d2521ee2e0101458880081da887bd2039c8401504f9007fceec9"} Nov 23 16:16:07 crc kubenswrapper[5050]: I1123 16:16:07.836992 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c962f" event={"ID":"4051fc5f-7beb-46ce-8c91-bb54c6b5003a","Type":"ContainerStarted","Data":"32be0e5ca13e538eae76c0c36be2784490570a2f391e2124196782aca1553ca2"} Nov 23 16:16:07 crc kubenswrapper[5050]: I1123 16:16:07.840476 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"05c9da2b-2e38-45c6-924b-641d802e03fd","Type":"ContainerStarted","Data":"b99e82d34410c37753092d53bf3e08fb594d86bb0fa2b24dde3e09e2e4fe7f8a"} Nov 23 16:16:08 crc kubenswrapper[5050]: I1123 16:16:08.028708 5050 scope.go:117] "RemoveContainer" containerID="5a60a1ec7afd185e23ae9692b30e5ca33b154e87cb9519568a201f6d399b917e" Nov 23 16:16:08 crc kubenswrapper[5050]: I1123 16:16:08.078167 5050 scope.go:117] "RemoveContainer" containerID="8fcfb38a51a934390fa271cf92ca33ff754fc1cce9c82eada066776b1a233546" Nov 23 16:16:08 crc kubenswrapper[5050]: I1123 16:16:08.493545 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="8a75ff78-417c-4487-9b35-c9dbae5dcca6" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.1.79:8776/healthcheck\": read tcp 10.217.0.2:39096->10.217.1.79:8776: read: connection reset by peer" Nov 23 16:16:08 crc kubenswrapper[5050]: I1123 16:16:08.631033 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 23 16:16:08 crc kubenswrapper[5050]: I1123 16:16:08.894879 5050 generic.go:334] "Generic (PLEG): container finished" podID="8a75ff78-417c-4487-9b35-c9dbae5dcca6" containerID="b0a894b01da730960d9113a2ac70f24be262176d6d594e532be7ac3dc95f7c77" exitCode=0 Nov 23 16:16:08 crc kubenswrapper[5050]: I1123 16:16:08.895266 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8a75ff78-417c-4487-9b35-c9dbae5dcca6","Type":"ContainerDied","Data":"b0a894b01da730960d9113a2ac70f24be262176d6d594e532be7ac3dc95f7c77"} Nov 23 16:16:08 crc kubenswrapper[5050]: I1123 16:16:08.918056 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"05c9da2b-2e38-45c6-924b-641d802e03fd","Type":"ContainerStarted","Data":"6f9a04977803eb735ee9a744bfb5e69808be149e00391c9c8e469202eac5b7ee"} Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.129678 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.230148 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b8nh7\" (UniqueName: \"kubernetes.io/projected/8a75ff78-417c-4487-9b35-c9dbae5dcca6-kube-api-access-b8nh7\") pod \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.230404 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8a75ff78-417c-4487-9b35-c9dbae5dcca6-config-data-custom\") pod \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.230652 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a75ff78-417c-4487-9b35-c9dbae5dcca6-scripts\") pod \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.230754 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a75ff78-417c-4487-9b35-c9dbae5dcca6-combined-ca-bundle\") pod \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.231224 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a75ff78-417c-4487-9b35-c9dbae5dcca6-logs\") pod \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.231277 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8a75ff78-417c-4487-9b35-c9dbae5dcca6-etc-machine-id\") pod \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.231300 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a75ff78-417c-4487-9b35-c9dbae5dcca6-config-data\") pod \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\" (UID: \"8a75ff78-417c-4487-9b35-c9dbae5dcca6\") " Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.231431 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a75ff78-417c-4487-9b35-c9dbae5dcca6-logs" (OuterVolumeSpecName: "logs") pod "8a75ff78-417c-4487-9b35-c9dbae5dcca6" (UID: "8a75ff78-417c-4487-9b35-c9dbae5dcca6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.231522 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8a75ff78-417c-4487-9b35-c9dbae5dcca6-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "8a75ff78-417c-4487-9b35-c9dbae5dcca6" (UID: "8a75ff78-417c-4487-9b35-c9dbae5dcca6"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.232163 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a75ff78-417c-4487-9b35-c9dbae5dcca6-logs\") on node \"crc\" DevicePath \"\"" Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.232187 5050 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8a75ff78-417c-4487-9b35-c9dbae5dcca6-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.236970 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a75ff78-417c-4487-9b35-c9dbae5dcca6-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "8a75ff78-417c-4487-9b35-c9dbae5dcca6" (UID: "8a75ff78-417c-4487-9b35-c9dbae5dcca6"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.238561 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a75ff78-417c-4487-9b35-c9dbae5dcca6-scripts" (OuterVolumeSpecName: "scripts") pod "8a75ff78-417c-4487-9b35-c9dbae5dcca6" (UID: "8a75ff78-417c-4487-9b35-c9dbae5dcca6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.238849 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a75ff78-417c-4487-9b35-c9dbae5dcca6-kube-api-access-b8nh7" (OuterVolumeSpecName: "kube-api-access-b8nh7") pod "8a75ff78-417c-4487-9b35-c9dbae5dcca6" (UID: "8a75ff78-417c-4487-9b35-c9dbae5dcca6"). InnerVolumeSpecName "kube-api-access-b8nh7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.297577 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a75ff78-417c-4487-9b35-c9dbae5dcca6-config-data" (OuterVolumeSpecName: "config-data") pod "8a75ff78-417c-4487-9b35-c9dbae5dcca6" (UID: "8a75ff78-417c-4487-9b35-c9dbae5dcca6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.306556 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a75ff78-417c-4487-9b35-c9dbae5dcca6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8a75ff78-417c-4487-9b35-c9dbae5dcca6" (UID: "8a75ff78-417c-4487-9b35-c9dbae5dcca6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.335624 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a75ff78-417c-4487-9b35-c9dbae5dcca6-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.336330 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a75ff78-417c-4487-9b35-c9dbae5dcca6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.336416 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a75ff78-417c-4487-9b35-c9dbae5dcca6-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.336516 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b8nh7\" (UniqueName: \"kubernetes.io/projected/8a75ff78-417c-4487-9b35-c9dbae5dcca6-kube-api-access-b8nh7\") on node \"crc\" DevicePath \"\"" Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.336592 5050 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8a75ff78-417c-4487-9b35-c9dbae5dcca6-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.936800 5050 generic.go:334] "Generic (PLEG): container finished" podID="4051fc5f-7beb-46ce-8c91-bb54c6b5003a" containerID="ff7572bc24ca289e935af37d8b3478b89a03ba341b22c31e22f729adb6c5bab6" exitCode=0 Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.936892 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c962f" event={"ID":"4051fc5f-7beb-46ce-8c91-bb54c6b5003a","Type":"ContainerDied","Data":"ff7572bc24ca289e935af37d8b3478b89a03ba341b22c31e22f729adb6c5bab6"} Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.946263 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"05c9da2b-2e38-45c6-924b-641d802e03fd","Type":"ContainerStarted","Data":"673bc5bc2dae9c3c7205b62d9ee0f4094db731272b431e63ac7c6dc6f9966914"} Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.961757 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8a75ff78-417c-4487-9b35-c9dbae5dcca6","Type":"ContainerDied","Data":"c5fe240b8f72ea1e3996bf54c8de480ca3019b37402e0d9a7ee205c902475f20"} Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.961891 5050 scope.go:117] "RemoveContainer" containerID="b0a894b01da730960d9113a2ac70f24be262176d6d594e532be7ac3dc95f7c77" Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.962246 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.973259 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"ebd99884-0673-4d7b-acf0-077be572599f","Type":"ContainerStarted","Data":"e9b224a3a74cfe58c32072fc1678a7ae47356475d8719ef51ade23cd28fffcbf"} Nov 23 16:16:09 crc kubenswrapper[5050]: I1123 16:16:09.973344 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"ebd99884-0673-4d7b-acf0-077be572599f","Type":"ContainerStarted","Data":"7dd8f4be711c4329175dec6512ab55d22afe16ab72096a83418fc855f5ea5203"} Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.001285 5050 scope.go:117] "RemoveContainer" containerID="6e8f1df424a95618ab5da6c8694c5e37743478a264f86c301cf216d0ac7756ec" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.027985 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-volume-volume1-0" podStartSLOduration=3.838531807 podStartE2EDuration="5.027961896s" podCreationTimestamp="2025-11-23 16:16:05 +0000 UTC" firstStartedPulling="2025-11-23 16:16:06.897711229 +0000 UTC m=+5662.064707704" lastFinishedPulling="2025-11-23 16:16:08.087141308 +0000 UTC m=+5663.254137793" observedRunningTime="2025-11-23 16:16:10.018053636 +0000 UTC m=+5665.185050161" watchObservedRunningTime="2025-11-23 16:16:10.027961896 +0000 UTC m=+5665.194958381" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.046655 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.056025 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.084400 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 23 16:16:10 crc kubenswrapper[5050]: E1123 16:16:10.086058 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a75ff78-417c-4487-9b35-c9dbae5dcca6" containerName="cinder-api-log" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.086085 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a75ff78-417c-4487-9b35-c9dbae5dcca6" containerName="cinder-api-log" Nov 23 16:16:10 crc kubenswrapper[5050]: E1123 16:16:10.086130 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a75ff78-417c-4487-9b35-c9dbae5dcca6" containerName="cinder-api" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.086137 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a75ff78-417c-4487-9b35-c9dbae5dcca6" containerName="cinder-api" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.086338 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a75ff78-417c-4487-9b35-c9dbae5dcca6" containerName="cinder-api-log" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.086362 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a75ff78-417c-4487-9b35-c9dbae5dcca6" containerName="cinder-api" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.087369 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.097770 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.102818 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-backup-0" podStartSLOduration=3.289448647 podStartE2EDuration="4.102788088s" podCreationTimestamp="2025-11-23 16:16:06 +0000 UTC" firstStartedPulling="2025-11-23 16:16:07.672254705 +0000 UTC m=+5662.839251190" lastFinishedPulling="2025-11-23 16:16:08.485594146 +0000 UTC m=+5663.652590631" observedRunningTime="2025-11-23 16:16:10.07771521 +0000 UTC m=+5665.244711695" watchObservedRunningTime="2025-11-23 16:16:10.102788088 +0000 UTC m=+5665.269784563" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.125345 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.162695 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/530062c4-a69d-46a1-9508-dfd72558f046-etc-machine-id\") pod \"cinder-api-0\" (UID: \"530062c4-a69d-46a1-9508-dfd72558f046\") " pod="openstack/cinder-api-0" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.162752 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/530062c4-a69d-46a1-9508-dfd72558f046-config-data\") pod \"cinder-api-0\" (UID: \"530062c4-a69d-46a1-9508-dfd72558f046\") " pod="openstack/cinder-api-0" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.162890 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/530062c4-a69d-46a1-9508-dfd72558f046-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"530062c4-a69d-46a1-9508-dfd72558f046\") " pod="openstack/cinder-api-0" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.163061 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bms4\" (UniqueName: \"kubernetes.io/projected/530062c4-a69d-46a1-9508-dfd72558f046-kube-api-access-2bms4\") pod \"cinder-api-0\" (UID: \"530062c4-a69d-46a1-9508-dfd72558f046\") " pod="openstack/cinder-api-0" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.163196 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/530062c4-a69d-46a1-9508-dfd72558f046-scripts\") pod \"cinder-api-0\" (UID: \"530062c4-a69d-46a1-9508-dfd72558f046\") " pod="openstack/cinder-api-0" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.163240 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/530062c4-a69d-46a1-9508-dfd72558f046-logs\") pod \"cinder-api-0\" (UID: \"530062c4-a69d-46a1-9508-dfd72558f046\") " pod="openstack/cinder-api-0" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.163312 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/530062c4-a69d-46a1-9508-dfd72558f046-config-data-custom\") pod \"cinder-api-0\" (UID: \"530062c4-a69d-46a1-9508-dfd72558f046\") " pod="openstack/cinder-api-0" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.266019 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/530062c4-a69d-46a1-9508-dfd72558f046-scripts\") pod \"cinder-api-0\" (UID: \"530062c4-a69d-46a1-9508-dfd72558f046\") " pod="openstack/cinder-api-0" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.266069 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/530062c4-a69d-46a1-9508-dfd72558f046-logs\") pod \"cinder-api-0\" (UID: \"530062c4-a69d-46a1-9508-dfd72558f046\") " pod="openstack/cinder-api-0" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.266103 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/530062c4-a69d-46a1-9508-dfd72558f046-config-data-custom\") pod \"cinder-api-0\" (UID: \"530062c4-a69d-46a1-9508-dfd72558f046\") " pod="openstack/cinder-api-0" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.266154 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/530062c4-a69d-46a1-9508-dfd72558f046-etc-machine-id\") pod \"cinder-api-0\" (UID: \"530062c4-a69d-46a1-9508-dfd72558f046\") " pod="openstack/cinder-api-0" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.266174 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/530062c4-a69d-46a1-9508-dfd72558f046-config-data\") pod \"cinder-api-0\" (UID: \"530062c4-a69d-46a1-9508-dfd72558f046\") " pod="openstack/cinder-api-0" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.266204 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/530062c4-a69d-46a1-9508-dfd72558f046-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"530062c4-a69d-46a1-9508-dfd72558f046\") " pod="openstack/cinder-api-0" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.266250 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bms4\" (UniqueName: \"kubernetes.io/projected/530062c4-a69d-46a1-9508-dfd72558f046-kube-api-access-2bms4\") pod \"cinder-api-0\" (UID: \"530062c4-a69d-46a1-9508-dfd72558f046\") " pod="openstack/cinder-api-0" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.267752 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/530062c4-a69d-46a1-9508-dfd72558f046-etc-machine-id\") pod \"cinder-api-0\" (UID: \"530062c4-a69d-46a1-9508-dfd72558f046\") " pod="openstack/cinder-api-0" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.273553 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/530062c4-a69d-46a1-9508-dfd72558f046-logs\") pod \"cinder-api-0\" (UID: \"530062c4-a69d-46a1-9508-dfd72558f046\") " pod="openstack/cinder-api-0" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.281249 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/530062c4-a69d-46a1-9508-dfd72558f046-scripts\") pod \"cinder-api-0\" (UID: \"530062c4-a69d-46a1-9508-dfd72558f046\") " pod="openstack/cinder-api-0" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.283239 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/530062c4-a69d-46a1-9508-dfd72558f046-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"530062c4-a69d-46a1-9508-dfd72558f046\") " pod="openstack/cinder-api-0" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.284148 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/530062c4-a69d-46a1-9508-dfd72558f046-config-data\") pod \"cinder-api-0\" (UID: \"530062c4-a69d-46a1-9508-dfd72558f046\") " pod="openstack/cinder-api-0" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.295054 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/530062c4-a69d-46a1-9508-dfd72558f046-config-data-custom\") pod \"cinder-api-0\" (UID: \"530062c4-a69d-46a1-9508-dfd72558f046\") " pod="openstack/cinder-api-0" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.298339 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bms4\" (UniqueName: \"kubernetes.io/projected/530062c4-a69d-46a1-9508-dfd72558f046-kube-api-access-2bms4\") pod \"cinder-api-0\" (UID: \"530062c4-a69d-46a1-9508-dfd72558f046\") " pod="openstack/cinder-api-0" Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.416164 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 23 16:16:10 crc kubenswrapper[5050]: W1123 16:16:10.941368 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod530062c4_a69d_46a1_9508_dfd72558f046.slice/crio-505cfa4728e98dcde2cb9031760962362fdf8d25431e12235d126597b3d4e8f2 WatchSource:0}: Error finding container 505cfa4728e98dcde2cb9031760962362fdf8d25431e12235d126597b3d4e8f2: Status 404 returned error can't find the container with id 505cfa4728e98dcde2cb9031760962362fdf8d25431e12235d126597b3d4e8f2 Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.951652 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.991594 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c962f" event={"ID":"4051fc5f-7beb-46ce-8c91-bb54c6b5003a","Type":"ContainerStarted","Data":"8375c1ec9e676e0a5c8f15c84163bc2b1541b1117c6a54bc36d432856160b62a"} Nov 23 16:16:10 crc kubenswrapper[5050]: I1123 16:16:10.994808 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"530062c4-a69d-46a1-9508-dfd72558f046","Type":"ContainerStarted","Data":"505cfa4728e98dcde2cb9031760962362fdf8d25431e12235d126597b3d4e8f2"} Nov 23 16:16:11 crc kubenswrapper[5050]: I1123 16:16:11.242776 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:11 crc kubenswrapper[5050]: I1123 16:16:11.561709 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a75ff78-417c-4487-9b35-c9dbae5dcca6" path="/var/lib/kubelet/pods/8a75ff78-417c-4487-9b35-c9dbae5dcca6/volumes" Nov 23 16:16:12 crc kubenswrapper[5050]: I1123 16:16:12.035341 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"530062c4-a69d-46a1-9508-dfd72558f046","Type":"ContainerStarted","Data":"965d8d77b324ba8ed9aef7e537f842f3957ddc12cfb35b686a0bcffa4f7c437e"} Nov 23 16:16:12 crc kubenswrapper[5050]: I1123 16:16:12.041469 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-backup-0" Nov 23 16:16:13 crc kubenswrapper[5050]: I1123 16:16:13.047323 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"530062c4-a69d-46a1-9508-dfd72558f046","Type":"ContainerStarted","Data":"f7ee480344dd6d69cfd878494a409edf490d089711c339902b2dd0b42f144a93"} Nov 23 16:16:13 crc kubenswrapper[5050]: I1123 16:16:13.048633 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 23 16:16:13 crc kubenswrapper[5050]: I1123 16:16:13.071432 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.071407383 podStartE2EDuration="3.071407383s" podCreationTimestamp="2025-11-23 16:16:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:16:13.070291861 +0000 UTC m=+5668.237288356" watchObservedRunningTime="2025-11-23 16:16:13.071407383 +0000 UTC m=+5668.238403868" Nov 23 16:16:13 crc kubenswrapper[5050]: I1123 16:16:13.079609 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-c962f" podStartSLOduration=4.731637401 podStartE2EDuration="7.079580123s" podCreationTimestamp="2025-11-23 16:16:06 +0000 UTC" firstStartedPulling="2025-11-23 16:16:08.008747994 +0000 UTC m=+5663.175744489" lastFinishedPulling="2025-11-23 16:16:10.356690706 +0000 UTC m=+5665.523687211" observedRunningTime="2025-11-23 16:16:11.022755129 +0000 UTC m=+5666.189751644" watchObservedRunningTime="2025-11-23 16:16:13.079580123 +0000 UTC m=+5668.246576608" Nov 23 16:16:13 crc kubenswrapper[5050]: I1123 16:16:13.853023 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 23 16:16:13 crc kubenswrapper[5050]: I1123 16:16:13.943703 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 23 16:16:14 crc kubenswrapper[5050]: I1123 16:16:14.060148 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="5af01ad6-c261-4d39-b3d8-4fe7a73524cf" containerName="cinder-scheduler" containerID="cri-o://c206774ae0b9813bda1fce04e9c5974f141ba16a926981b101f7a40459d9531d" gracePeriod=30 Nov 23 16:16:14 crc kubenswrapper[5050]: I1123 16:16:14.063623 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="5af01ad6-c261-4d39-b3d8-4fe7a73524cf" containerName="probe" containerID="cri-o://4eaf80a1bcd5f62c60185ecb37c51df8529829694a56d6dd5702eed631f63f9d" gracePeriod=30 Nov 23 16:16:15 crc kubenswrapper[5050]: I1123 16:16:15.073154 5050 generic.go:334] "Generic (PLEG): container finished" podID="5af01ad6-c261-4d39-b3d8-4fe7a73524cf" containerID="4eaf80a1bcd5f62c60185ecb37c51df8529829694a56d6dd5702eed631f63f9d" exitCode=0 Nov 23 16:16:15 crc kubenswrapper[5050]: I1123 16:16:15.073337 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5af01ad6-c261-4d39-b3d8-4fe7a73524cf","Type":"ContainerDied","Data":"4eaf80a1bcd5f62c60185ecb37c51df8529829694a56d6dd5702eed631f63f9d"} Nov 23 16:16:16 crc kubenswrapper[5050]: I1123 16:16:16.093473 5050 generic.go:334] "Generic (PLEG): container finished" podID="5af01ad6-c261-4d39-b3d8-4fe7a73524cf" containerID="c206774ae0b9813bda1fce04e9c5974f141ba16a926981b101f7a40459d9531d" exitCode=0 Nov 23 16:16:16 crc kubenswrapper[5050]: I1123 16:16:16.093646 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5af01ad6-c261-4d39-b3d8-4fe7a73524cf","Type":"ContainerDied","Data":"c206774ae0b9813bda1fce04e9c5974f141ba16a926981b101f7a40459d9531d"} Nov 23 16:16:16 crc kubenswrapper[5050]: I1123 16:16:16.094279 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5af01ad6-c261-4d39-b3d8-4fe7a73524cf","Type":"ContainerDied","Data":"4518ddefdf9304c84885bc85c22aa2e193d0f76936b4afeb2642479a2f34d798"} Nov 23 16:16:16 crc kubenswrapper[5050]: I1123 16:16:16.094311 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4518ddefdf9304c84885bc85c22aa2e193d0f76936b4afeb2642479a2f34d798" Nov 23 16:16:16 crc kubenswrapper[5050]: I1123 16:16:16.133140 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 23 16:16:16 crc kubenswrapper[5050]: I1123 16:16:16.232926 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-config-data\") pod \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\" (UID: \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\") " Nov 23 16:16:16 crc kubenswrapper[5050]: I1123 16:16:16.233321 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-config-data-custom\") pod \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\" (UID: \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\") " Nov 23 16:16:16 crc kubenswrapper[5050]: I1123 16:16:16.233358 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f6krd\" (UniqueName: \"kubernetes.io/projected/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-kube-api-access-f6krd\") pod \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\" (UID: \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\") " Nov 23 16:16:16 crc kubenswrapper[5050]: I1123 16:16:16.233397 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-combined-ca-bundle\") pod \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\" (UID: \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\") " Nov 23 16:16:16 crc kubenswrapper[5050]: I1123 16:16:16.233457 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-etc-machine-id\") pod \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\" (UID: \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\") " Nov 23 16:16:16 crc kubenswrapper[5050]: I1123 16:16:16.233526 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-scripts\") pod \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\" (UID: \"5af01ad6-c261-4d39-b3d8-4fe7a73524cf\") " Nov 23 16:16:16 crc kubenswrapper[5050]: I1123 16:16:16.235387 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "5af01ad6-c261-4d39-b3d8-4fe7a73524cf" (UID: "5af01ad6-c261-4d39-b3d8-4fe7a73524cf"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 16:16:16 crc kubenswrapper[5050]: I1123 16:16:16.247656 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-scripts" (OuterVolumeSpecName: "scripts") pod "5af01ad6-c261-4d39-b3d8-4fe7a73524cf" (UID: "5af01ad6-c261-4d39-b3d8-4fe7a73524cf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:16:16 crc kubenswrapper[5050]: I1123 16:16:16.247750 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-kube-api-access-f6krd" (OuterVolumeSpecName: "kube-api-access-f6krd") pod "5af01ad6-c261-4d39-b3d8-4fe7a73524cf" (UID: "5af01ad6-c261-4d39-b3d8-4fe7a73524cf"). InnerVolumeSpecName "kube-api-access-f6krd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:16:16 crc kubenswrapper[5050]: I1123 16:16:16.247755 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "5af01ad6-c261-4d39-b3d8-4fe7a73524cf" (UID: "5af01ad6-c261-4d39-b3d8-4fe7a73524cf"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:16:16 crc kubenswrapper[5050]: I1123 16:16:16.302533 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5af01ad6-c261-4d39-b3d8-4fe7a73524cf" (UID: "5af01ad6-c261-4d39-b3d8-4fe7a73524cf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:16:16 crc kubenswrapper[5050]: I1123 16:16:16.337524 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:16:16 crc kubenswrapper[5050]: I1123 16:16:16.337566 5050 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 23 16:16:16 crc kubenswrapper[5050]: I1123 16:16:16.337580 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f6krd\" (UniqueName: \"kubernetes.io/projected/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-kube-api-access-f6krd\") on node \"crc\" DevicePath \"\"" Nov 23 16:16:16 crc kubenswrapper[5050]: I1123 16:16:16.337591 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:16:16 crc kubenswrapper[5050]: I1123 16:16:16.337600 5050 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 23 16:16:16 crc kubenswrapper[5050]: I1123 16:16:16.373367 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-c962f" Nov 23 16:16:16 crc kubenswrapper[5050]: I1123 16:16:16.373426 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-c962f" Nov 23 16:16:16 crc kubenswrapper[5050]: I1123 16:16:16.384743 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-config-data" (OuterVolumeSpecName: "config-data") pod "5af01ad6-c261-4d39-b3d8-4fe7a73524cf" (UID: "5af01ad6-c261-4d39-b3d8-4fe7a73524cf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:16:16 crc kubenswrapper[5050]: I1123 16:16:16.423254 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-c962f" Nov 23 16:16:16 crc kubenswrapper[5050]: I1123 16:16:16.440367 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5af01ad6-c261-4d39-b3d8-4fe7a73524cf-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:16:16 crc kubenswrapper[5050]: I1123 16:16:16.532958 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-volume1-0" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.102918 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.148517 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.156892 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.165547 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-c962f" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.165611 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 23 16:16:17 crc kubenswrapper[5050]: E1123 16:16:17.166160 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5af01ad6-c261-4d39-b3d8-4fe7a73524cf" containerName="probe" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.166178 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="5af01ad6-c261-4d39-b3d8-4fe7a73524cf" containerName="probe" Nov 23 16:16:17 crc kubenswrapper[5050]: E1123 16:16:17.166206 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5af01ad6-c261-4d39-b3d8-4fe7a73524cf" containerName="cinder-scheduler" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.166213 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="5af01ad6-c261-4d39-b3d8-4fe7a73524cf" containerName="cinder-scheduler" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.166422 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="5af01ad6-c261-4d39-b3d8-4fe7a73524cf" containerName="cinder-scheduler" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.166454 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="5af01ad6-c261-4d39-b3d8-4fe7a73524cf" containerName="probe" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.167738 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.179233 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.190398 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.257981 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8w8bf\" (UniqueName: \"kubernetes.io/projected/832cbdc7-a2fb-41ba-93fa-342961b53fea-kube-api-access-8w8bf\") pod \"cinder-scheduler-0\" (UID: \"832cbdc7-a2fb-41ba-93fa-342961b53fea\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.258153 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/832cbdc7-a2fb-41ba-93fa-342961b53fea-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"832cbdc7-a2fb-41ba-93fa-342961b53fea\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.258205 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/832cbdc7-a2fb-41ba-93fa-342961b53fea-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"832cbdc7-a2fb-41ba-93fa-342961b53fea\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.258258 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/832cbdc7-a2fb-41ba-93fa-342961b53fea-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"832cbdc7-a2fb-41ba-93fa-342961b53fea\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.258313 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/832cbdc7-a2fb-41ba-93fa-342961b53fea-scripts\") pod \"cinder-scheduler-0\" (UID: \"832cbdc7-a2fb-41ba-93fa-342961b53fea\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.258352 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/832cbdc7-a2fb-41ba-93fa-342961b53fea-config-data\") pod \"cinder-scheduler-0\" (UID: \"832cbdc7-a2fb-41ba-93fa-342961b53fea\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.258763 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-c962f"] Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.312500 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-backup-0" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.360813 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8w8bf\" (UniqueName: \"kubernetes.io/projected/832cbdc7-a2fb-41ba-93fa-342961b53fea-kube-api-access-8w8bf\") pod \"cinder-scheduler-0\" (UID: \"832cbdc7-a2fb-41ba-93fa-342961b53fea\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.360937 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/832cbdc7-a2fb-41ba-93fa-342961b53fea-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"832cbdc7-a2fb-41ba-93fa-342961b53fea\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.360978 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/832cbdc7-a2fb-41ba-93fa-342961b53fea-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"832cbdc7-a2fb-41ba-93fa-342961b53fea\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.361022 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/832cbdc7-a2fb-41ba-93fa-342961b53fea-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"832cbdc7-a2fb-41ba-93fa-342961b53fea\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.361071 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/832cbdc7-a2fb-41ba-93fa-342961b53fea-scripts\") pod \"cinder-scheduler-0\" (UID: \"832cbdc7-a2fb-41ba-93fa-342961b53fea\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.361096 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/832cbdc7-a2fb-41ba-93fa-342961b53fea-config-data\") pod \"cinder-scheduler-0\" (UID: \"832cbdc7-a2fb-41ba-93fa-342961b53fea\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.361594 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/832cbdc7-a2fb-41ba-93fa-342961b53fea-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"832cbdc7-a2fb-41ba-93fa-342961b53fea\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.367692 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/832cbdc7-a2fb-41ba-93fa-342961b53fea-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"832cbdc7-a2fb-41ba-93fa-342961b53fea\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.378342 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/832cbdc7-a2fb-41ba-93fa-342961b53fea-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"832cbdc7-a2fb-41ba-93fa-342961b53fea\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.382346 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/832cbdc7-a2fb-41ba-93fa-342961b53fea-scripts\") pod \"cinder-scheduler-0\" (UID: \"832cbdc7-a2fb-41ba-93fa-342961b53fea\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.383693 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8w8bf\" (UniqueName: \"kubernetes.io/projected/832cbdc7-a2fb-41ba-93fa-342961b53fea-kube-api-access-8w8bf\") pod \"cinder-scheduler-0\" (UID: \"832cbdc7-a2fb-41ba-93fa-342961b53fea\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.383876 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/832cbdc7-a2fb-41ba-93fa-342961b53fea-config-data\") pod \"cinder-scheduler-0\" (UID: \"832cbdc7-a2fb-41ba-93fa-342961b53fea\") " pod="openstack/cinder-scheduler-0" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.494228 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 23 16:16:17 crc kubenswrapper[5050]: I1123 16:16:17.570086 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5af01ad6-c261-4d39-b3d8-4fe7a73524cf" path="/var/lib/kubelet/pods/5af01ad6-c261-4d39-b3d8-4fe7a73524cf/volumes" Nov 23 16:16:18 crc kubenswrapper[5050]: I1123 16:16:18.011892 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 23 16:16:18 crc kubenswrapper[5050]: I1123 16:16:18.118947 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"832cbdc7-a2fb-41ba-93fa-342961b53fea","Type":"ContainerStarted","Data":"2f72b08cc37717ef125a3947e91e0e4128f20fbf054b858ec5406776cd5fb4b4"} Nov 23 16:16:18 crc kubenswrapper[5050]: I1123 16:16:18.549559 5050 scope.go:117] "RemoveContainer" containerID="131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00" Nov 23 16:16:18 crc kubenswrapper[5050]: E1123 16:16:18.550163 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:16:19 crc kubenswrapper[5050]: I1123 16:16:19.137603 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"832cbdc7-a2fb-41ba-93fa-342961b53fea","Type":"ContainerStarted","Data":"5a296e5dfaa75840c401a39996ff47114469931beb7d69495860982f0717d171"} Nov 23 16:16:19 crc kubenswrapper[5050]: I1123 16:16:19.137670 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-c962f" podUID="4051fc5f-7beb-46ce-8c91-bb54c6b5003a" containerName="registry-server" containerID="cri-o://8375c1ec9e676e0a5c8f15c84163bc2b1541b1117c6a54bc36d432856160b62a" gracePeriod=2 Nov 23 16:16:19 crc kubenswrapper[5050]: I1123 16:16:19.646125 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c962f" Nov 23 16:16:19 crc kubenswrapper[5050]: I1123 16:16:19.723577 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4051fc5f-7beb-46ce-8c91-bb54c6b5003a-catalog-content\") pod \"4051fc5f-7beb-46ce-8c91-bb54c6b5003a\" (UID: \"4051fc5f-7beb-46ce-8c91-bb54c6b5003a\") " Nov 23 16:16:19 crc kubenswrapper[5050]: I1123 16:16:19.723725 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wjp4j\" (UniqueName: \"kubernetes.io/projected/4051fc5f-7beb-46ce-8c91-bb54c6b5003a-kube-api-access-wjp4j\") pod \"4051fc5f-7beb-46ce-8c91-bb54c6b5003a\" (UID: \"4051fc5f-7beb-46ce-8c91-bb54c6b5003a\") " Nov 23 16:16:19 crc kubenswrapper[5050]: I1123 16:16:19.723827 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4051fc5f-7beb-46ce-8c91-bb54c6b5003a-utilities\") pod \"4051fc5f-7beb-46ce-8c91-bb54c6b5003a\" (UID: \"4051fc5f-7beb-46ce-8c91-bb54c6b5003a\") " Nov 23 16:16:19 crc kubenswrapper[5050]: I1123 16:16:19.724789 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4051fc5f-7beb-46ce-8c91-bb54c6b5003a-utilities" (OuterVolumeSpecName: "utilities") pod "4051fc5f-7beb-46ce-8c91-bb54c6b5003a" (UID: "4051fc5f-7beb-46ce-8c91-bb54c6b5003a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:16:19 crc kubenswrapper[5050]: I1123 16:16:19.745068 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4051fc5f-7beb-46ce-8c91-bb54c6b5003a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4051fc5f-7beb-46ce-8c91-bb54c6b5003a" (UID: "4051fc5f-7beb-46ce-8c91-bb54c6b5003a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:16:19 crc kubenswrapper[5050]: I1123 16:16:19.745771 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4051fc5f-7beb-46ce-8c91-bb54c6b5003a-kube-api-access-wjp4j" (OuterVolumeSpecName: "kube-api-access-wjp4j") pod "4051fc5f-7beb-46ce-8c91-bb54c6b5003a" (UID: "4051fc5f-7beb-46ce-8c91-bb54c6b5003a"). InnerVolumeSpecName "kube-api-access-wjp4j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:16:19 crc kubenswrapper[5050]: I1123 16:16:19.831204 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4051fc5f-7beb-46ce-8c91-bb54c6b5003a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 16:16:19 crc kubenswrapper[5050]: I1123 16:16:19.831284 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wjp4j\" (UniqueName: \"kubernetes.io/projected/4051fc5f-7beb-46ce-8c91-bb54c6b5003a-kube-api-access-wjp4j\") on node \"crc\" DevicePath \"\"" Nov 23 16:16:19 crc kubenswrapper[5050]: I1123 16:16:19.831298 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4051fc5f-7beb-46ce-8c91-bb54c6b5003a-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 16:16:20 crc kubenswrapper[5050]: I1123 16:16:20.155013 5050 generic.go:334] "Generic (PLEG): container finished" podID="4051fc5f-7beb-46ce-8c91-bb54c6b5003a" containerID="8375c1ec9e676e0a5c8f15c84163bc2b1541b1117c6a54bc36d432856160b62a" exitCode=0 Nov 23 16:16:20 crc kubenswrapper[5050]: I1123 16:16:20.155104 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c962f" event={"ID":"4051fc5f-7beb-46ce-8c91-bb54c6b5003a","Type":"ContainerDied","Data":"8375c1ec9e676e0a5c8f15c84163bc2b1541b1117c6a54bc36d432856160b62a"} Nov 23 16:16:20 crc kubenswrapper[5050]: I1123 16:16:20.155147 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c962f" Nov 23 16:16:20 crc kubenswrapper[5050]: I1123 16:16:20.155189 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c962f" event={"ID":"4051fc5f-7beb-46ce-8c91-bb54c6b5003a","Type":"ContainerDied","Data":"32be0e5ca13e538eae76c0c36be2784490570a2f391e2124196782aca1553ca2"} Nov 23 16:16:20 crc kubenswrapper[5050]: I1123 16:16:20.155219 5050 scope.go:117] "RemoveContainer" containerID="8375c1ec9e676e0a5c8f15c84163bc2b1541b1117c6a54bc36d432856160b62a" Nov 23 16:16:20 crc kubenswrapper[5050]: I1123 16:16:20.166215 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"832cbdc7-a2fb-41ba-93fa-342961b53fea","Type":"ContainerStarted","Data":"9c2e8e92995af1847bf6bf7d5a5468e654437fdd284f89d7bec09e4f6b9edcc7"} Nov 23 16:16:20 crc kubenswrapper[5050]: I1123 16:16:20.201103 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.201078663 podStartE2EDuration="3.201078663s" podCreationTimestamp="2025-11-23 16:16:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:16:20.196565216 +0000 UTC m=+5675.363561701" watchObservedRunningTime="2025-11-23 16:16:20.201078663 +0000 UTC m=+5675.368075158" Nov 23 16:16:20 crc kubenswrapper[5050]: I1123 16:16:20.226128 5050 scope.go:117] "RemoveContainer" containerID="ff7572bc24ca289e935af37d8b3478b89a03ba341b22c31e22f729adb6c5bab6" Nov 23 16:16:20 crc kubenswrapper[5050]: I1123 16:16:20.238174 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-c962f"] Nov 23 16:16:20 crc kubenswrapper[5050]: I1123 16:16:20.249082 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-c962f"] Nov 23 16:16:20 crc kubenswrapper[5050]: I1123 16:16:20.273621 5050 scope.go:117] "RemoveContainer" containerID="0dcab51a2dd8d2521ee2e0101458880081da887bd2039c8401504f9007fceec9" Nov 23 16:16:20 crc kubenswrapper[5050]: I1123 16:16:20.316402 5050 scope.go:117] "RemoveContainer" containerID="8375c1ec9e676e0a5c8f15c84163bc2b1541b1117c6a54bc36d432856160b62a" Nov 23 16:16:20 crc kubenswrapper[5050]: E1123 16:16:20.318901 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8375c1ec9e676e0a5c8f15c84163bc2b1541b1117c6a54bc36d432856160b62a\": container with ID starting with 8375c1ec9e676e0a5c8f15c84163bc2b1541b1117c6a54bc36d432856160b62a not found: ID does not exist" containerID="8375c1ec9e676e0a5c8f15c84163bc2b1541b1117c6a54bc36d432856160b62a" Nov 23 16:16:20 crc kubenswrapper[5050]: I1123 16:16:20.318965 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8375c1ec9e676e0a5c8f15c84163bc2b1541b1117c6a54bc36d432856160b62a"} err="failed to get container status \"8375c1ec9e676e0a5c8f15c84163bc2b1541b1117c6a54bc36d432856160b62a\": rpc error: code = NotFound desc = could not find container \"8375c1ec9e676e0a5c8f15c84163bc2b1541b1117c6a54bc36d432856160b62a\": container with ID starting with 8375c1ec9e676e0a5c8f15c84163bc2b1541b1117c6a54bc36d432856160b62a not found: ID does not exist" Nov 23 16:16:20 crc kubenswrapper[5050]: I1123 16:16:20.319001 5050 scope.go:117] "RemoveContainer" containerID="ff7572bc24ca289e935af37d8b3478b89a03ba341b22c31e22f729adb6c5bab6" Nov 23 16:16:20 crc kubenswrapper[5050]: E1123 16:16:20.321650 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff7572bc24ca289e935af37d8b3478b89a03ba341b22c31e22f729adb6c5bab6\": container with ID starting with ff7572bc24ca289e935af37d8b3478b89a03ba341b22c31e22f729adb6c5bab6 not found: ID does not exist" containerID="ff7572bc24ca289e935af37d8b3478b89a03ba341b22c31e22f729adb6c5bab6" Nov 23 16:16:20 crc kubenswrapper[5050]: I1123 16:16:20.321722 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff7572bc24ca289e935af37d8b3478b89a03ba341b22c31e22f729adb6c5bab6"} err="failed to get container status \"ff7572bc24ca289e935af37d8b3478b89a03ba341b22c31e22f729adb6c5bab6\": rpc error: code = NotFound desc = could not find container \"ff7572bc24ca289e935af37d8b3478b89a03ba341b22c31e22f729adb6c5bab6\": container with ID starting with ff7572bc24ca289e935af37d8b3478b89a03ba341b22c31e22f729adb6c5bab6 not found: ID does not exist" Nov 23 16:16:20 crc kubenswrapper[5050]: I1123 16:16:20.321761 5050 scope.go:117] "RemoveContainer" containerID="0dcab51a2dd8d2521ee2e0101458880081da887bd2039c8401504f9007fceec9" Nov 23 16:16:20 crc kubenswrapper[5050]: E1123 16:16:20.322837 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0dcab51a2dd8d2521ee2e0101458880081da887bd2039c8401504f9007fceec9\": container with ID starting with 0dcab51a2dd8d2521ee2e0101458880081da887bd2039c8401504f9007fceec9 not found: ID does not exist" containerID="0dcab51a2dd8d2521ee2e0101458880081da887bd2039c8401504f9007fceec9" Nov 23 16:16:20 crc kubenswrapper[5050]: I1123 16:16:20.322950 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0dcab51a2dd8d2521ee2e0101458880081da887bd2039c8401504f9007fceec9"} err="failed to get container status \"0dcab51a2dd8d2521ee2e0101458880081da887bd2039c8401504f9007fceec9\": rpc error: code = NotFound desc = could not find container \"0dcab51a2dd8d2521ee2e0101458880081da887bd2039c8401504f9007fceec9\": container with ID starting with 0dcab51a2dd8d2521ee2e0101458880081da887bd2039c8401504f9007fceec9 not found: ID does not exist" Nov 23 16:16:21 crc kubenswrapper[5050]: I1123 16:16:21.564893 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4051fc5f-7beb-46ce-8c91-bb54c6b5003a" path="/var/lib/kubelet/pods/4051fc5f-7beb-46ce-8c91-bb54c6b5003a/volumes" Nov 23 16:16:22 crc kubenswrapper[5050]: I1123 16:16:22.424249 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 23 16:16:22 crc kubenswrapper[5050]: I1123 16:16:22.495348 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 23 16:16:27 crc kubenswrapper[5050]: I1123 16:16:27.742756 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 23 16:16:33 crc kubenswrapper[5050]: I1123 16:16:33.549696 5050 scope.go:117] "RemoveContainer" containerID="131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00" Nov 23 16:16:33 crc kubenswrapper[5050]: E1123 16:16:33.552677 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:16:47 crc kubenswrapper[5050]: I1123 16:16:47.552747 5050 scope.go:117] "RemoveContainer" containerID="131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00" Nov 23 16:16:47 crc kubenswrapper[5050]: E1123 16:16:47.554155 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:17:00 crc kubenswrapper[5050]: I1123 16:17:00.549286 5050 scope.go:117] "RemoveContainer" containerID="131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00" Nov 23 16:17:00 crc kubenswrapper[5050]: E1123 16:17:00.550666 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:17:08 crc kubenswrapper[5050]: I1123 16:17:08.800117 5050 scope.go:117] "RemoveContainer" containerID="d7663179605ac4be880286abb3618037a11f5166eec1e523180355db80284be2" Nov 23 16:17:08 crc kubenswrapper[5050]: I1123 16:17:08.831639 5050 scope.go:117] "RemoveContainer" containerID="916e1300c6d72dce22b47536fe1f32a2705ed7a380c524d954cf7ce9d888f283" Nov 23 16:17:13 crc kubenswrapper[5050]: I1123 16:17:13.552590 5050 scope.go:117] "RemoveContainer" containerID="131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00" Nov 23 16:17:13 crc kubenswrapper[5050]: E1123 16:17:13.554019 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:17:25 crc kubenswrapper[5050]: I1123 16:17:25.566658 5050 scope.go:117] "RemoveContainer" containerID="131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00" Nov 23 16:17:25 crc kubenswrapper[5050]: E1123 16:17:25.567950 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:17:36 crc kubenswrapper[5050]: I1123 16:17:36.548808 5050 scope.go:117] "RemoveContainer" containerID="131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00" Nov 23 16:17:36 crc kubenswrapper[5050]: E1123 16:17:36.549993 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:17:40 crc kubenswrapper[5050]: I1123 16:17:40.100665 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-8bb4-account-create-hgrkr"] Nov 23 16:17:40 crc kubenswrapper[5050]: I1123 16:17:40.111002 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-8bb4-account-create-hgrkr"] Nov 23 16:17:41 crc kubenswrapper[5050]: I1123 16:17:41.045360 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-vgsft"] Nov 23 16:17:41 crc kubenswrapper[5050]: I1123 16:17:41.063232 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-vgsft"] Nov 23 16:17:41 crc kubenswrapper[5050]: I1123 16:17:41.572158 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a88f6a6-0d39-4175-aa16-62d64b8ca2d7" path="/var/lib/kubelet/pods/8a88f6a6-0d39-4175-aa16-62d64b8ca2d7/volumes" Nov 23 16:17:41 crc kubenswrapper[5050]: I1123 16:17:41.573752 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f261955d-669f-47e5-9a2d-9c7971293dc9" path="/var/lib/kubelet/pods/f261955d-669f-47e5-9a2d-9c7971293dc9/volumes" Nov 23 16:17:51 crc kubenswrapper[5050]: I1123 16:17:51.042221 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-wsz5j"] Nov 23 16:17:51 crc kubenswrapper[5050]: I1123 16:17:51.052178 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-wsz5j"] Nov 23 16:17:51 crc kubenswrapper[5050]: I1123 16:17:51.548374 5050 scope.go:117] "RemoveContainer" containerID="131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00" Nov 23 16:17:51 crc kubenswrapper[5050]: E1123 16:17:51.548893 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:17:51 crc kubenswrapper[5050]: I1123 16:17:51.566226 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6624adb-caff-44b3-89dc-0cdd87aa56e7" path="/var/lib/kubelet/pods/e6624adb-caff-44b3-89dc-0cdd87aa56e7/volumes" Nov 23 16:18:03 crc kubenswrapper[5050]: I1123 16:18:03.550010 5050 scope.go:117] "RemoveContainer" containerID="131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00" Nov 23 16:18:03 crc kubenswrapper[5050]: E1123 16:18:03.551566 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:18:05 crc kubenswrapper[5050]: I1123 16:18:05.109430 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-bskzx"] Nov 23 16:18:05 crc kubenswrapper[5050]: I1123 16:18:05.126216 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-bskzx"] Nov 23 16:18:05 crc kubenswrapper[5050]: I1123 16:18:05.571213 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0643b76-5863-43b7-b8a9-cdc50659bc95" path="/var/lib/kubelet/pods/d0643b76-5863-43b7-b8a9-cdc50659bc95/volumes" Nov 23 16:18:08 crc kubenswrapper[5050]: I1123 16:18:08.951122 5050 scope.go:117] "RemoveContainer" containerID="4067dac9832390b9f4abb04f8b909d5a6b02f1da6f8ddb6c096247ef85e3ad20" Nov 23 16:18:09 crc kubenswrapper[5050]: I1123 16:18:09.006362 5050 scope.go:117] "RemoveContainer" containerID="58a7a4c46aa9f634166bd2e00d75e57043f0f439be060bf2f0359dae331c5488" Nov 23 16:18:09 crc kubenswrapper[5050]: I1123 16:18:09.051071 5050 scope.go:117] "RemoveContainer" containerID="82813006167f2355ac50e44e9399807f591cc7e00b9e6cb62a096324a162d1a4" Nov 23 16:18:09 crc kubenswrapper[5050]: I1123 16:18:09.103725 5050 scope.go:117] "RemoveContainer" containerID="ca200ab6a5fe18c0f0660b43a31ccea3c0d72af2ab3902ebee55b9215300203b" Nov 23 16:18:11 crc kubenswrapper[5050]: I1123 16:18:11.945624 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-jzkdv"] Nov 23 16:18:11 crc kubenswrapper[5050]: E1123 16:18:11.946415 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4051fc5f-7beb-46ce-8c91-bb54c6b5003a" containerName="registry-server" Nov 23 16:18:11 crc kubenswrapper[5050]: I1123 16:18:11.946434 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4051fc5f-7beb-46ce-8c91-bb54c6b5003a" containerName="registry-server" Nov 23 16:18:11 crc kubenswrapper[5050]: E1123 16:18:11.946493 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4051fc5f-7beb-46ce-8c91-bb54c6b5003a" containerName="extract-utilities" Nov 23 16:18:11 crc kubenswrapper[5050]: I1123 16:18:11.946503 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4051fc5f-7beb-46ce-8c91-bb54c6b5003a" containerName="extract-utilities" Nov 23 16:18:11 crc kubenswrapper[5050]: E1123 16:18:11.946536 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4051fc5f-7beb-46ce-8c91-bb54c6b5003a" containerName="extract-content" Nov 23 16:18:11 crc kubenswrapper[5050]: I1123 16:18:11.946544 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4051fc5f-7beb-46ce-8c91-bb54c6b5003a" containerName="extract-content" Nov 23 16:18:11 crc kubenswrapper[5050]: I1123 16:18:11.946776 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4051fc5f-7beb-46ce-8c91-bb54c6b5003a" containerName="registry-server" Nov 23 16:18:11 crc kubenswrapper[5050]: I1123 16:18:11.947908 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jzkdv" Nov 23 16:18:11 crc kubenswrapper[5050]: I1123 16:18:11.951839 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 23 16:18:11 crc kubenswrapper[5050]: I1123 16:18:11.952151 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-x4x9b" Nov 23 16:18:11 crc kubenswrapper[5050]: I1123 16:18:11.969204 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-jzkdv"] Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.014157 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-4r95w"] Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.018086 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-4r95w" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.033378 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-4r95w"] Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.049931 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/67fb633c-03ee-4b5f-acd1-930b6112cd19-var-log-ovn\") pod \"ovn-controller-jzkdv\" (UID: \"67fb633c-03ee-4b5f-acd1-930b6112cd19\") " pod="openstack/ovn-controller-jzkdv" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.050060 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/67fb633c-03ee-4b5f-acd1-930b6112cd19-var-run-ovn\") pod \"ovn-controller-jzkdv\" (UID: \"67fb633c-03ee-4b5f-acd1-930b6112cd19\") " pod="openstack/ovn-controller-jzkdv" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.050109 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6246\" (UniqueName: \"kubernetes.io/projected/67fb633c-03ee-4b5f-acd1-930b6112cd19-kube-api-access-p6246\") pod \"ovn-controller-jzkdv\" (UID: \"67fb633c-03ee-4b5f-acd1-930b6112cd19\") " pod="openstack/ovn-controller-jzkdv" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.050130 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/67fb633c-03ee-4b5f-acd1-930b6112cd19-var-run\") pod \"ovn-controller-jzkdv\" (UID: \"67fb633c-03ee-4b5f-acd1-930b6112cd19\") " pod="openstack/ovn-controller-jzkdv" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.050153 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/67fb633c-03ee-4b5f-acd1-930b6112cd19-scripts\") pod \"ovn-controller-jzkdv\" (UID: \"67fb633c-03ee-4b5f-acd1-930b6112cd19\") " pod="openstack/ovn-controller-jzkdv" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.152039 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/67fb633c-03ee-4b5f-acd1-930b6112cd19-var-run\") pod \"ovn-controller-jzkdv\" (UID: \"67fb633c-03ee-4b5f-acd1-930b6112cd19\") " pod="openstack/ovn-controller-jzkdv" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.152583 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/67fb633c-03ee-4b5f-acd1-930b6112cd19-scripts\") pod \"ovn-controller-jzkdv\" (UID: \"67fb633c-03ee-4b5f-acd1-930b6112cd19\") " pod="openstack/ovn-controller-jzkdv" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.152636 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9f33288a-10e9-4c41-878f-554f583430a1-var-run\") pod \"ovn-controller-ovs-4r95w\" (UID: \"9f33288a-10e9-4c41-878f-554f583430a1\") " pod="openstack/ovn-controller-ovs-4r95w" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.152646 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/67fb633c-03ee-4b5f-acd1-930b6112cd19-var-run\") pod \"ovn-controller-jzkdv\" (UID: \"67fb633c-03ee-4b5f-acd1-930b6112cd19\") " pod="openstack/ovn-controller-jzkdv" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.152683 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/67fb633c-03ee-4b5f-acd1-930b6112cd19-var-log-ovn\") pod \"ovn-controller-jzkdv\" (UID: \"67fb633c-03ee-4b5f-acd1-930b6112cd19\") " pod="openstack/ovn-controller-jzkdv" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.152850 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/67fb633c-03ee-4b5f-acd1-930b6112cd19-var-log-ovn\") pod \"ovn-controller-jzkdv\" (UID: \"67fb633c-03ee-4b5f-acd1-930b6112cd19\") " pod="openstack/ovn-controller-jzkdv" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.152984 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9f33288a-10e9-4c41-878f-554f583430a1-scripts\") pod \"ovn-controller-ovs-4r95w\" (UID: \"9f33288a-10e9-4c41-878f-554f583430a1\") " pod="openstack/ovn-controller-ovs-4r95w" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.153131 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/9f33288a-10e9-4c41-878f-554f583430a1-var-log\") pod \"ovn-controller-ovs-4r95w\" (UID: \"9f33288a-10e9-4c41-878f-554f583430a1\") " pod="openstack/ovn-controller-ovs-4r95w" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.153297 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/67fb633c-03ee-4b5f-acd1-930b6112cd19-var-run-ovn\") pod \"ovn-controller-jzkdv\" (UID: \"67fb633c-03ee-4b5f-acd1-930b6112cd19\") " pod="openstack/ovn-controller-jzkdv" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.153363 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jbhlq\" (UniqueName: \"kubernetes.io/projected/9f33288a-10e9-4c41-878f-554f583430a1-kube-api-access-jbhlq\") pod \"ovn-controller-ovs-4r95w\" (UID: \"9f33288a-10e9-4c41-878f-554f583430a1\") " pod="openstack/ovn-controller-ovs-4r95w" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.153391 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/9f33288a-10e9-4c41-878f-554f583430a1-var-lib\") pod \"ovn-controller-ovs-4r95w\" (UID: \"9f33288a-10e9-4c41-878f-554f583430a1\") " pod="openstack/ovn-controller-ovs-4r95w" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.153421 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/9f33288a-10e9-4c41-878f-554f583430a1-etc-ovs\") pod \"ovn-controller-ovs-4r95w\" (UID: \"9f33288a-10e9-4c41-878f-554f583430a1\") " pod="openstack/ovn-controller-ovs-4r95w" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.153408 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/67fb633c-03ee-4b5f-acd1-930b6112cd19-var-run-ovn\") pod \"ovn-controller-jzkdv\" (UID: \"67fb633c-03ee-4b5f-acd1-930b6112cd19\") " pod="openstack/ovn-controller-jzkdv" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.153476 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6246\" (UniqueName: \"kubernetes.io/projected/67fb633c-03ee-4b5f-acd1-930b6112cd19-kube-api-access-p6246\") pod \"ovn-controller-jzkdv\" (UID: \"67fb633c-03ee-4b5f-acd1-930b6112cd19\") " pod="openstack/ovn-controller-jzkdv" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.155471 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/67fb633c-03ee-4b5f-acd1-930b6112cd19-scripts\") pod \"ovn-controller-jzkdv\" (UID: \"67fb633c-03ee-4b5f-acd1-930b6112cd19\") " pod="openstack/ovn-controller-jzkdv" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.182467 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6246\" (UniqueName: \"kubernetes.io/projected/67fb633c-03ee-4b5f-acd1-930b6112cd19-kube-api-access-p6246\") pod \"ovn-controller-jzkdv\" (UID: \"67fb633c-03ee-4b5f-acd1-930b6112cd19\") " pod="openstack/ovn-controller-jzkdv" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.255460 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jbhlq\" (UniqueName: \"kubernetes.io/projected/9f33288a-10e9-4c41-878f-554f583430a1-kube-api-access-jbhlq\") pod \"ovn-controller-ovs-4r95w\" (UID: \"9f33288a-10e9-4c41-878f-554f583430a1\") " pod="openstack/ovn-controller-ovs-4r95w" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.255800 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/9f33288a-10e9-4c41-878f-554f583430a1-var-lib\") pod \"ovn-controller-ovs-4r95w\" (UID: \"9f33288a-10e9-4c41-878f-554f583430a1\") " pod="openstack/ovn-controller-ovs-4r95w" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.255891 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/9f33288a-10e9-4c41-878f-554f583430a1-etc-ovs\") pod \"ovn-controller-ovs-4r95w\" (UID: \"9f33288a-10e9-4c41-878f-554f583430a1\") " pod="openstack/ovn-controller-ovs-4r95w" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.256006 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9f33288a-10e9-4c41-878f-554f583430a1-var-run\") pod \"ovn-controller-ovs-4r95w\" (UID: \"9f33288a-10e9-4c41-878f-554f583430a1\") " pod="openstack/ovn-controller-ovs-4r95w" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.256068 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9f33288a-10e9-4c41-878f-554f583430a1-var-run\") pod \"ovn-controller-ovs-4r95w\" (UID: \"9f33288a-10e9-4c41-878f-554f583430a1\") " pod="openstack/ovn-controller-ovs-4r95w" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.256017 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/9f33288a-10e9-4c41-878f-554f583430a1-var-lib\") pod \"ovn-controller-ovs-4r95w\" (UID: \"9f33288a-10e9-4c41-878f-554f583430a1\") " pod="openstack/ovn-controller-ovs-4r95w" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.256165 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/9f33288a-10e9-4c41-878f-554f583430a1-etc-ovs\") pod \"ovn-controller-ovs-4r95w\" (UID: \"9f33288a-10e9-4c41-878f-554f583430a1\") " pod="openstack/ovn-controller-ovs-4r95w" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.256393 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9f33288a-10e9-4c41-878f-554f583430a1-scripts\") pod \"ovn-controller-ovs-4r95w\" (UID: \"9f33288a-10e9-4c41-878f-554f583430a1\") " pod="openstack/ovn-controller-ovs-4r95w" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.256525 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/9f33288a-10e9-4c41-878f-554f583430a1-var-log\") pod \"ovn-controller-ovs-4r95w\" (UID: \"9f33288a-10e9-4c41-878f-554f583430a1\") " pod="openstack/ovn-controller-ovs-4r95w" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.256782 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/9f33288a-10e9-4c41-878f-554f583430a1-var-log\") pod \"ovn-controller-ovs-4r95w\" (UID: \"9f33288a-10e9-4c41-878f-554f583430a1\") " pod="openstack/ovn-controller-ovs-4r95w" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.259502 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9f33288a-10e9-4c41-878f-554f583430a1-scripts\") pod \"ovn-controller-ovs-4r95w\" (UID: \"9f33288a-10e9-4c41-878f-554f583430a1\") " pod="openstack/ovn-controller-ovs-4r95w" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.271827 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jzkdv" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.275824 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jbhlq\" (UniqueName: \"kubernetes.io/projected/9f33288a-10e9-4c41-878f-554f583430a1-kube-api-access-jbhlq\") pod \"ovn-controller-ovs-4r95w\" (UID: \"9f33288a-10e9-4c41-878f-554f583430a1\") " pod="openstack/ovn-controller-ovs-4r95w" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.349055 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-4r95w" Nov 23 16:18:12 crc kubenswrapper[5050]: I1123 16:18:12.856471 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-jzkdv"] Nov 23 16:18:13 crc kubenswrapper[5050]: I1123 16:18:13.261458 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-4r95w"] Nov 23 16:18:13 crc kubenswrapper[5050]: I1123 16:18:13.443783 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-j6l2h"] Nov 23 16:18:13 crc kubenswrapper[5050]: I1123 16:18:13.445306 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-j6l2h" Nov 23 16:18:13 crc kubenswrapper[5050]: I1123 16:18:13.447660 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 23 16:18:13 crc kubenswrapper[5050]: I1123 16:18:13.470253 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-j6l2h"] Nov 23 16:18:13 crc kubenswrapper[5050]: I1123 16:18:13.588890 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/df823f17-83c9-444e-8e5e-610dd679a890-ovn-rundir\") pod \"ovn-controller-metrics-j6l2h\" (UID: \"df823f17-83c9-444e-8e5e-610dd679a890\") " pod="openstack/ovn-controller-metrics-j6l2h" Nov 23 16:18:13 crc kubenswrapper[5050]: I1123 16:18:13.589038 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/df823f17-83c9-444e-8e5e-610dd679a890-ovs-rundir\") pod \"ovn-controller-metrics-j6l2h\" (UID: \"df823f17-83c9-444e-8e5e-610dd679a890\") " pod="openstack/ovn-controller-metrics-j6l2h" Nov 23 16:18:13 crc kubenswrapper[5050]: I1123 16:18:13.589537 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df823f17-83c9-444e-8e5e-610dd679a890-config\") pod \"ovn-controller-metrics-j6l2h\" (UID: \"df823f17-83c9-444e-8e5e-610dd679a890\") " pod="openstack/ovn-controller-metrics-j6l2h" Nov 23 16:18:13 crc kubenswrapper[5050]: I1123 16:18:13.589591 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ts7p5\" (UniqueName: \"kubernetes.io/projected/df823f17-83c9-444e-8e5e-610dd679a890-kube-api-access-ts7p5\") pod \"ovn-controller-metrics-j6l2h\" (UID: \"df823f17-83c9-444e-8e5e-610dd679a890\") " pod="openstack/ovn-controller-metrics-j6l2h" Nov 23 16:18:13 crc kubenswrapper[5050]: I1123 16:18:13.645212 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jzkdv" event={"ID":"67fb633c-03ee-4b5f-acd1-930b6112cd19","Type":"ContainerStarted","Data":"5a0cb838b7378a34de0a9e9421c2be6d85147dece546fc4a63eedd24c9272eec"} Nov 23 16:18:13 crc kubenswrapper[5050]: I1123 16:18:13.645261 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jzkdv" event={"ID":"67fb633c-03ee-4b5f-acd1-930b6112cd19","Type":"ContainerStarted","Data":"62f05eb6a71b2a38681060cd9c67a2c753ef91b774ab0eb026138e2ae15bc39c"} Nov 23 16:18:13 crc kubenswrapper[5050]: I1123 16:18:13.645659 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-jzkdv" Nov 23 16:18:13 crc kubenswrapper[5050]: I1123 16:18:13.647351 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-4r95w" event={"ID":"9f33288a-10e9-4c41-878f-554f583430a1","Type":"ContainerStarted","Data":"c1dfc3aba1cc07ac9fe09f745f0a571ad2465b85a693a2d7e814f9b5e428fede"} Nov 23 16:18:13 crc kubenswrapper[5050]: I1123 16:18:13.672092 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-jzkdv" podStartSLOduration=2.672065798 podStartE2EDuration="2.672065798s" podCreationTimestamp="2025-11-23 16:18:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:18:13.662957131 +0000 UTC m=+5788.829953616" watchObservedRunningTime="2025-11-23 16:18:13.672065798 +0000 UTC m=+5788.839062293" Nov 23 16:18:13 crc kubenswrapper[5050]: I1123 16:18:13.692580 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/df823f17-83c9-444e-8e5e-610dd679a890-ovn-rundir\") pod \"ovn-controller-metrics-j6l2h\" (UID: \"df823f17-83c9-444e-8e5e-610dd679a890\") " pod="openstack/ovn-controller-metrics-j6l2h" Nov 23 16:18:13 crc kubenswrapper[5050]: I1123 16:18:13.693305 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/df823f17-83c9-444e-8e5e-610dd679a890-ovs-rundir\") pod \"ovn-controller-metrics-j6l2h\" (UID: \"df823f17-83c9-444e-8e5e-610dd679a890\") " pod="openstack/ovn-controller-metrics-j6l2h" Nov 23 16:18:13 crc kubenswrapper[5050]: I1123 16:18:13.693475 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df823f17-83c9-444e-8e5e-610dd679a890-config\") pod \"ovn-controller-metrics-j6l2h\" (UID: \"df823f17-83c9-444e-8e5e-610dd679a890\") " pod="openstack/ovn-controller-metrics-j6l2h" Nov 23 16:18:13 crc kubenswrapper[5050]: I1123 16:18:13.693618 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ts7p5\" (UniqueName: \"kubernetes.io/projected/df823f17-83c9-444e-8e5e-610dd679a890-kube-api-access-ts7p5\") pod \"ovn-controller-metrics-j6l2h\" (UID: \"df823f17-83c9-444e-8e5e-610dd679a890\") " pod="openstack/ovn-controller-metrics-j6l2h" Nov 23 16:18:13 crc kubenswrapper[5050]: I1123 16:18:13.694063 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/df823f17-83c9-444e-8e5e-610dd679a890-ovn-rundir\") pod \"ovn-controller-metrics-j6l2h\" (UID: \"df823f17-83c9-444e-8e5e-610dd679a890\") " pod="openstack/ovn-controller-metrics-j6l2h" Nov 23 16:18:13 crc kubenswrapper[5050]: I1123 16:18:13.694240 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/df823f17-83c9-444e-8e5e-610dd679a890-ovs-rundir\") pod \"ovn-controller-metrics-j6l2h\" (UID: \"df823f17-83c9-444e-8e5e-610dd679a890\") " pod="openstack/ovn-controller-metrics-j6l2h" Nov 23 16:18:13 crc kubenswrapper[5050]: I1123 16:18:13.695331 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df823f17-83c9-444e-8e5e-610dd679a890-config\") pod \"ovn-controller-metrics-j6l2h\" (UID: \"df823f17-83c9-444e-8e5e-610dd679a890\") " pod="openstack/ovn-controller-metrics-j6l2h" Nov 23 16:18:13 crc kubenswrapper[5050]: I1123 16:18:13.717085 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ts7p5\" (UniqueName: \"kubernetes.io/projected/df823f17-83c9-444e-8e5e-610dd679a890-kube-api-access-ts7p5\") pod \"ovn-controller-metrics-j6l2h\" (UID: \"df823f17-83c9-444e-8e5e-610dd679a890\") " pod="openstack/ovn-controller-metrics-j6l2h" Nov 23 16:18:13 crc kubenswrapper[5050]: I1123 16:18:13.781399 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-j6l2h" Nov 23 16:18:14 crc kubenswrapper[5050]: I1123 16:18:14.296075 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-j6l2h"] Nov 23 16:18:14 crc kubenswrapper[5050]: W1123 16:18:14.297395 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf823f17_83c9_444e_8e5e_610dd679a890.slice/crio-30ccce06b02251f6a6031137c680953a9e703b1bbec4cdbffdab2283182bb40e WatchSource:0}: Error finding container 30ccce06b02251f6a6031137c680953a9e703b1bbec4cdbffdab2283182bb40e: Status 404 returned error can't find the container with id 30ccce06b02251f6a6031137c680953a9e703b1bbec4cdbffdab2283182bb40e Nov 23 16:18:14 crc kubenswrapper[5050]: I1123 16:18:14.417371 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-db-create-bw9cx"] Nov 23 16:18:14 crc kubenswrapper[5050]: I1123 16:18:14.419700 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-bw9cx" Nov 23 16:18:14 crc kubenswrapper[5050]: I1123 16:18:14.429760 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-create-bw9cx"] Nov 23 16:18:14 crc kubenswrapper[5050]: I1123 16:18:14.517392 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4nrj\" (UniqueName: \"kubernetes.io/projected/5ba0989c-deb9-4516-9d8b-8e59e0565cd1-kube-api-access-m4nrj\") pod \"octavia-db-create-bw9cx\" (UID: \"5ba0989c-deb9-4516-9d8b-8e59e0565cd1\") " pod="openstack/octavia-db-create-bw9cx" Nov 23 16:18:14 crc kubenswrapper[5050]: I1123 16:18:14.517553 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5ba0989c-deb9-4516-9d8b-8e59e0565cd1-operator-scripts\") pod \"octavia-db-create-bw9cx\" (UID: \"5ba0989c-deb9-4516-9d8b-8e59e0565cd1\") " pod="openstack/octavia-db-create-bw9cx" Nov 23 16:18:14 crc kubenswrapper[5050]: I1123 16:18:14.618923 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4nrj\" (UniqueName: \"kubernetes.io/projected/5ba0989c-deb9-4516-9d8b-8e59e0565cd1-kube-api-access-m4nrj\") pod \"octavia-db-create-bw9cx\" (UID: \"5ba0989c-deb9-4516-9d8b-8e59e0565cd1\") " pod="openstack/octavia-db-create-bw9cx" Nov 23 16:18:14 crc kubenswrapper[5050]: I1123 16:18:14.619078 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5ba0989c-deb9-4516-9d8b-8e59e0565cd1-operator-scripts\") pod \"octavia-db-create-bw9cx\" (UID: \"5ba0989c-deb9-4516-9d8b-8e59e0565cd1\") " pod="openstack/octavia-db-create-bw9cx" Nov 23 16:18:14 crc kubenswrapper[5050]: I1123 16:18:14.620107 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5ba0989c-deb9-4516-9d8b-8e59e0565cd1-operator-scripts\") pod \"octavia-db-create-bw9cx\" (UID: \"5ba0989c-deb9-4516-9d8b-8e59e0565cd1\") " pod="openstack/octavia-db-create-bw9cx" Nov 23 16:18:14 crc kubenswrapper[5050]: I1123 16:18:14.639537 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4nrj\" (UniqueName: \"kubernetes.io/projected/5ba0989c-deb9-4516-9d8b-8e59e0565cd1-kube-api-access-m4nrj\") pod \"octavia-db-create-bw9cx\" (UID: \"5ba0989c-deb9-4516-9d8b-8e59e0565cd1\") " pod="openstack/octavia-db-create-bw9cx" Nov 23 16:18:14 crc kubenswrapper[5050]: I1123 16:18:14.659500 5050 generic.go:334] "Generic (PLEG): container finished" podID="9f33288a-10e9-4c41-878f-554f583430a1" containerID="2835a4c38048ec4294c1bb356f6840300fc20eae1ca045114ca64415b64f7479" exitCode=0 Nov 23 16:18:14 crc kubenswrapper[5050]: I1123 16:18:14.659626 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-4r95w" event={"ID":"9f33288a-10e9-4c41-878f-554f583430a1","Type":"ContainerDied","Data":"2835a4c38048ec4294c1bb356f6840300fc20eae1ca045114ca64415b64f7479"} Nov 23 16:18:14 crc kubenswrapper[5050]: I1123 16:18:14.662138 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-j6l2h" event={"ID":"df823f17-83c9-444e-8e5e-610dd679a890","Type":"ContainerStarted","Data":"f99e8a7811e7b04b8fbe41efce03066e135731fa7f489af320236a9b0c8b5f5a"} Nov 23 16:18:14 crc kubenswrapper[5050]: I1123 16:18:14.662195 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-j6l2h" event={"ID":"df823f17-83c9-444e-8e5e-610dd679a890","Type":"ContainerStarted","Data":"30ccce06b02251f6a6031137c680953a9e703b1bbec4cdbffdab2283182bb40e"} Nov 23 16:18:14 crc kubenswrapper[5050]: I1123 16:18:14.710088 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-j6l2h" podStartSLOduration=1.710065636 podStartE2EDuration="1.710065636s" podCreationTimestamp="2025-11-23 16:18:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:18:14.705440875 +0000 UTC m=+5789.872437380" watchObservedRunningTime="2025-11-23 16:18:14.710065636 +0000 UTC m=+5789.877062131" Nov 23 16:18:14 crc kubenswrapper[5050]: I1123 16:18:14.751202 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-bw9cx" Nov 23 16:18:15 crc kubenswrapper[5050]: W1123 16:18:15.907220 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5ba0989c_deb9_4516_9d8b_8e59e0565cd1.slice/crio-399e32f1153851c1a7660636e7edb5276b4de12dd36901dc2100fd0e15b1a470 WatchSource:0}: Error finding container 399e32f1153851c1a7660636e7edb5276b4de12dd36901dc2100fd0e15b1a470: Status 404 returned error can't find the container with id 399e32f1153851c1a7660636e7edb5276b4de12dd36901dc2100fd0e15b1a470 Nov 23 16:18:15 crc kubenswrapper[5050]: I1123 16:18:15.916568 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-create-bw9cx"] Nov 23 16:18:16 crc kubenswrapper[5050]: I1123 16:18:16.421371 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-add8-account-create-hgsgf"] Nov 23 16:18:16 crc kubenswrapper[5050]: I1123 16:18:16.423519 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-add8-account-create-hgsgf" Nov 23 16:18:16 crc kubenswrapper[5050]: I1123 16:18:16.425916 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-db-secret" Nov 23 16:18:16 crc kubenswrapper[5050]: I1123 16:18:16.431141 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-add8-account-create-hgsgf"] Nov 23 16:18:16 crc kubenswrapper[5050]: I1123 16:18:16.504741 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e91873a-a795-426c-b90c-bd25dec9386e-operator-scripts\") pod \"octavia-add8-account-create-hgsgf\" (UID: \"0e91873a-a795-426c-b90c-bd25dec9386e\") " pod="openstack/octavia-add8-account-create-hgsgf" Nov 23 16:18:16 crc kubenswrapper[5050]: I1123 16:18:16.504892 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pm2wd\" (UniqueName: \"kubernetes.io/projected/0e91873a-a795-426c-b90c-bd25dec9386e-kube-api-access-pm2wd\") pod \"octavia-add8-account-create-hgsgf\" (UID: \"0e91873a-a795-426c-b90c-bd25dec9386e\") " pod="openstack/octavia-add8-account-create-hgsgf" Nov 23 16:18:16 crc kubenswrapper[5050]: I1123 16:18:16.607242 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e91873a-a795-426c-b90c-bd25dec9386e-operator-scripts\") pod \"octavia-add8-account-create-hgsgf\" (UID: \"0e91873a-a795-426c-b90c-bd25dec9386e\") " pod="openstack/octavia-add8-account-create-hgsgf" Nov 23 16:18:16 crc kubenswrapper[5050]: I1123 16:18:16.607327 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pm2wd\" (UniqueName: \"kubernetes.io/projected/0e91873a-a795-426c-b90c-bd25dec9386e-kube-api-access-pm2wd\") pod \"octavia-add8-account-create-hgsgf\" (UID: \"0e91873a-a795-426c-b90c-bd25dec9386e\") " pod="openstack/octavia-add8-account-create-hgsgf" Nov 23 16:18:16 crc kubenswrapper[5050]: I1123 16:18:16.609616 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e91873a-a795-426c-b90c-bd25dec9386e-operator-scripts\") pod \"octavia-add8-account-create-hgsgf\" (UID: \"0e91873a-a795-426c-b90c-bd25dec9386e\") " pod="openstack/octavia-add8-account-create-hgsgf" Nov 23 16:18:16 crc kubenswrapper[5050]: I1123 16:18:16.641476 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pm2wd\" (UniqueName: \"kubernetes.io/projected/0e91873a-a795-426c-b90c-bd25dec9386e-kube-api-access-pm2wd\") pod \"octavia-add8-account-create-hgsgf\" (UID: \"0e91873a-a795-426c-b90c-bd25dec9386e\") " pod="openstack/octavia-add8-account-create-hgsgf" Nov 23 16:18:16 crc kubenswrapper[5050]: I1123 16:18:16.695263 5050 generic.go:334] "Generic (PLEG): container finished" podID="5ba0989c-deb9-4516-9d8b-8e59e0565cd1" containerID="969c7d5af6d321a0b0a60235c193dd3d84a266286ec3cdb1723362c111437d6e" exitCode=0 Nov 23 16:18:16 crc kubenswrapper[5050]: I1123 16:18:16.695998 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-bw9cx" event={"ID":"5ba0989c-deb9-4516-9d8b-8e59e0565cd1","Type":"ContainerDied","Data":"969c7d5af6d321a0b0a60235c193dd3d84a266286ec3cdb1723362c111437d6e"} Nov 23 16:18:16 crc kubenswrapper[5050]: I1123 16:18:16.696036 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-bw9cx" event={"ID":"5ba0989c-deb9-4516-9d8b-8e59e0565cd1","Type":"ContainerStarted","Data":"399e32f1153851c1a7660636e7edb5276b4de12dd36901dc2100fd0e15b1a470"} Nov 23 16:18:16 crc kubenswrapper[5050]: I1123 16:18:16.702969 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-4r95w" event={"ID":"9f33288a-10e9-4c41-878f-554f583430a1","Type":"ContainerStarted","Data":"c8f245ddbd632c82bf44dbc3feb2c11ba08ded3bd67f90ad5278b93c2052d075"} Nov 23 16:18:16 crc kubenswrapper[5050]: I1123 16:18:16.703014 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-4r95w" event={"ID":"9f33288a-10e9-4c41-878f-554f583430a1","Type":"ContainerStarted","Data":"76ca76c89c9a5eff48a4c67c420784b8f9f130a765a8d32cb04bacd5613cb76c"} Nov 23 16:18:16 crc kubenswrapper[5050]: I1123 16:18:16.703950 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-4r95w" Nov 23 16:18:16 crc kubenswrapper[5050]: I1123 16:18:16.704001 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-4r95w" Nov 23 16:18:16 crc kubenswrapper[5050]: I1123 16:18:16.741020 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-4r95w" podStartSLOduration=5.741003169 podStartE2EDuration="5.741003169s" podCreationTimestamp="2025-11-23 16:18:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:18:16.735884675 +0000 UTC m=+5791.902881160" watchObservedRunningTime="2025-11-23 16:18:16.741003169 +0000 UTC m=+5791.907999654" Nov 23 16:18:16 crc kubenswrapper[5050]: I1123 16:18:16.742236 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-add8-account-create-hgsgf" Nov 23 16:18:17 crc kubenswrapper[5050]: I1123 16:18:17.227727 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-add8-account-create-hgsgf"] Nov 23 16:18:17 crc kubenswrapper[5050]: I1123 16:18:17.549604 5050 scope.go:117] "RemoveContainer" containerID="131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00" Nov 23 16:18:17 crc kubenswrapper[5050]: E1123 16:18:17.550358 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:18:17 crc kubenswrapper[5050]: I1123 16:18:17.715029 5050 generic.go:334] "Generic (PLEG): container finished" podID="0e91873a-a795-426c-b90c-bd25dec9386e" containerID="8657cdc0fa967668e658b8ff3329e6362417f03d33c237685c535d594fdd620d" exitCode=0 Nov 23 16:18:17 crc kubenswrapper[5050]: I1123 16:18:17.715171 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-add8-account-create-hgsgf" event={"ID":"0e91873a-a795-426c-b90c-bd25dec9386e","Type":"ContainerDied","Data":"8657cdc0fa967668e658b8ff3329e6362417f03d33c237685c535d594fdd620d"} Nov 23 16:18:17 crc kubenswrapper[5050]: I1123 16:18:17.715227 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-add8-account-create-hgsgf" event={"ID":"0e91873a-a795-426c-b90c-bd25dec9386e","Type":"ContainerStarted","Data":"208c3f7f6f83f43396c0839701d0a7ca6cbfb7a7e191c810d4be3d84fe78c401"} Nov 23 16:18:18 crc kubenswrapper[5050]: I1123 16:18:18.105327 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-bw9cx" Nov 23 16:18:18 crc kubenswrapper[5050]: I1123 16:18:18.140976 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5ba0989c-deb9-4516-9d8b-8e59e0565cd1-operator-scripts\") pod \"5ba0989c-deb9-4516-9d8b-8e59e0565cd1\" (UID: \"5ba0989c-deb9-4516-9d8b-8e59e0565cd1\") " Nov 23 16:18:18 crc kubenswrapper[5050]: I1123 16:18:18.141118 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m4nrj\" (UniqueName: \"kubernetes.io/projected/5ba0989c-deb9-4516-9d8b-8e59e0565cd1-kube-api-access-m4nrj\") pod \"5ba0989c-deb9-4516-9d8b-8e59e0565cd1\" (UID: \"5ba0989c-deb9-4516-9d8b-8e59e0565cd1\") " Nov 23 16:18:18 crc kubenswrapper[5050]: I1123 16:18:18.141839 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ba0989c-deb9-4516-9d8b-8e59e0565cd1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5ba0989c-deb9-4516-9d8b-8e59e0565cd1" (UID: "5ba0989c-deb9-4516-9d8b-8e59e0565cd1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:18:18 crc kubenswrapper[5050]: I1123 16:18:18.142142 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5ba0989c-deb9-4516-9d8b-8e59e0565cd1-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:18:18 crc kubenswrapper[5050]: I1123 16:18:18.148458 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ba0989c-deb9-4516-9d8b-8e59e0565cd1-kube-api-access-m4nrj" (OuterVolumeSpecName: "kube-api-access-m4nrj") pod "5ba0989c-deb9-4516-9d8b-8e59e0565cd1" (UID: "5ba0989c-deb9-4516-9d8b-8e59e0565cd1"). InnerVolumeSpecName "kube-api-access-m4nrj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:18:18 crc kubenswrapper[5050]: I1123 16:18:18.243141 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m4nrj\" (UniqueName: \"kubernetes.io/projected/5ba0989c-deb9-4516-9d8b-8e59e0565cd1-kube-api-access-m4nrj\") on node \"crc\" DevicePath \"\"" Nov 23 16:18:18 crc kubenswrapper[5050]: I1123 16:18:18.730065 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-bw9cx" Nov 23 16:18:18 crc kubenswrapper[5050]: I1123 16:18:18.730061 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-bw9cx" event={"ID":"5ba0989c-deb9-4516-9d8b-8e59e0565cd1","Type":"ContainerDied","Data":"399e32f1153851c1a7660636e7edb5276b4de12dd36901dc2100fd0e15b1a470"} Nov 23 16:18:18 crc kubenswrapper[5050]: I1123 16:18:18.730856 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="399e32f1153851c1a7660636e7edb5276b4de12dd36901dc2100fd0e15b1a470" Nov 23 16:18:19 crc kubenswrapper[5050]: I1123 16:18:19.179015 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-add8-account-create-hgsgf" Nov 23 16:18:19 crc kubenswrapper[5050]: I1123 16:18:19.274743 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pm2wd\" (UniqueName: \"kubernetes.io/projected/0e91873a-a795-426c-b90c-bd25dec9386e-kube-api-access-pm2wd\") pod \"0e91873a-a795-426c-b90c-bd25dec9386e\" (UID: \"0e91873a-a795-426c-b90c-bd25dec9386e\") " Nov 23 16:18:19 crc kubenswrapper[5050]: I1123 16:18:19.274930 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e91873a-a795-426c-b90c-bd25dec9386e-operator-scripts\") pod \"0e91873a-a795-426c-b90c-bd25dec9386e\" (UID: \"0e91873a-a795-426c-b90c-bd25dec9386e\") " Nov 23 16:18:19 crc kubenswrapper[5050]: I1123 16:18:19.276079 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e91873a-a795-426c-b90c-bd25dec9386e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0e91873a-a795-426c-b90c-bd25dec9386e" (UID: "0e91873a-a795-426c-b90c-bd25dec9386e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:18:19 crc kubenswrapper[5050]: I1123 16:18:19.276671 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e91873a-a795-426c-b90c-bd25dec9386e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:18:19 crc kubenswrapper[5050]: I1123 16:18:19.283074 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e91873a-a795-426c-b90c-bd25dec9386e-kube-api-access-pm2wd" (OuterVolumeSpecName: "kube-api-access-pm2wd") pod "0e91873a-a795-426c-b90c-bd25dec9386e" (UID: "0e91873a-a795-426c-b90c-bd25dec9386e"). InnerVolumeSpecName "kube-api-access-pm2wd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:18:19 crc kubenswrapper[5050]: I1123 16:18:19.378027 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pm2wd\" (UniqueName: \"kubernetes.io/projected/0e91873a-a795-426c-b90c-bd25dec9386e-kube-api-access-pm2wd\") on node \"crc\" DevicePath \"\"" Nov 23 16:18:19 crc kubenswrapper[5050]: I1123 16:18:19.747788 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-add8-account-create-hgsgf" event={"ID":"0e91873a-a795-426c-b90c-bd25dec9386e","Type":"ContainerDied","Data":"208c3f7f6f83f43396c0839701d0a7ca6cbfb7a7e191c810d4be3d84fe78c401"} Nov 23 16:18:19 crc kubenswrapper[5050]: I1123 16:18:19.747851 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="208c3f7f6f83f43396c0839701d0a7ca6cbfb7a7e191c810d4be3d84fe78c401" Nov 23 16:18:19 crc kubenswrapper[5050]: I1123 16:18:19.747901 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-add8-account-create-hgsgf" Nov 23 16:18:22 crc kubenswrapper[5050]: I1123 16:18:22.120196 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-persistence-db-create-n99mq"] Nov 23 16:18:22 crc kubenswrapper[5050]: E1123 16:18:22.121087 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e91873a-a795-426c-b90c-bd25dec9386e" containerName="mariadb-account-create" Nov 23 16:18:22 crc kubenswrapper[5050]: I1123 16:18:22.121107 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e91873a-a795-426c-b90c-bd25dec9386e" containerName="mariadb-account-create" Nov 23 16:18:22 crc kubenswrapper[5050]: E1123 16:18:22.121134 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ba0989c-deb9-4516-9d8b-8e59e0565cd1" containerName="mariadb-database-create" Nov 23 16:18:22 crc kubenswrapper[5050]: I1123 16:18:22.121140 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ba0989c-deb9-4516-9d8b-8e59e0565cd1" containerName="mariadb-database-create" Nov 23 16:18:22 crc kubenswrapper[5050]: I1123 16:18:22.121370 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e91873a-a795-426c-b90c-bd25dec9386e" containerName="mariadb-account-create" Nov 23 16:18:22 crc kubenswrapper[5050]: I1123 16:18:22.121387 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ba0989c-deb9-4516-9d8b-8e59e0565cd1" containerName="mariadb-database-create" Nov 23 16:18:22 crc kubenswrapper[5050]: I1123 16:18:22.122192 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-n99mq" Nov 23 16:18:22 crc kubenswrapper[5050]: I1123 16:18:22.140802 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-persistence-db-create-n99mq"] Nov 23 16:18:22 crc kubenswrapper[5050]: I1123 16:18:22.148208 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7w8d8\" (UniqueName: \"kubernetes.io/projected/b33829ce-bce6-479e-915b-aee1ffe96e5f-kube-api-access-7w8d8\") pod \"octavia-persistence-db-create-n99mq\" (UID: \"b33829ce-bce6-479e-915b-aee1ffe96e5f\") " pod="openstack/octavia-persistence-db-create-n99mq" Nov 23 16:18:22 crc kubenswrapper[5050]: I1123 16:18:22.148276 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b33829ce-bce6-479e-915b-aee1ffe96e5f-operator-scripts\") pod \"octavia-persistence-db-create-n99mq\" (UID: \"b33829ce-bce6-479e-915b-aee1ffe96e5f\") " pod="openstack/octavia-persistence-db-create-n99mq" Nov 23 16:18:22 crc kubenswrapper[5050]: I1123 16:18:22.251559 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7w8d8\" (UniqueName: \"kubernetes.io/projected/b33829ce-bce6-479e-915b-aee1ffe96e5f-kube-api-access-7w8d8\") pod \"octavia-persistence-db-create-n99mq\" (UID: \"b33829ce-bce6-479e-915b-aee1ffe96e5f\") " pod="openstack/octavia-persistence-db-create-n99mq" Nov 23 16:18:22 crc kubenswrapper[5050]: I1123 16:18:22.251641 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b33829ce-bce6-479e-915b-aee1ffe96e5f-operator-scripts\") pod \"octavia-persistence-db-create-n99mq\" (UID: \"b33829ce-bce6-479e-915b-aee1ffe96e5f\") " pod="openstack/octavia-persistence-db-create-n99mq" Nov 23 16:18:22 crc kubenswrapper[5050]: I1123 16:18:22.252990 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b33829ce-bce6-479e-915b-aee1ffe96e5f-operator-scripts\") pod \"octavia-persistence-db-create-n99mq\" (UID: \"b33829ce-bce6-479e-915b-aee1ffe96e5f\") " pod="openstack/octavia-persistence-db-create-n99mq" Nov 23 16:18:22 crc kubenswrapper[5050]: I1123 16:18:22.272292 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7w8d8\" (UniqueName: \"kubernetes.io/projected/b33829ce-bce6-479e-915b-aee1ffe96e5f-kube-api-access-7w8d8\") pod \"octavia-persistence-db-create-n99mq\" (UID: \"b33829ce-bce6-479e-915b-aee1ffe96e5f\") " pod="openstack/octavia-persistence-db-create-n99mq" Nov 23 16:18:22 crc kubenswrapper[5050]: I1123 16:18:22.443529 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-n99mq" Nov 23 16:18:22 crc kubenswrapper[5050]: I1123 16:18:22.614090 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-4b68-account-create-4466z"] Nov 23 16:18:22 crc kubenswrapper[5050]: I1123 16:18:22.616480 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-4b68-account-create-4466z" Nov 23 16:18:22 crc kubenswrapper[5050]: I1123 16:18:22.624894 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-4b68-account-create-4466z"] Nov 23 16:18:22 crc kubenswrapper[5050]: I1123 16:18:22.630492 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-persistence-db-secret" Nov 23 16:18:22 crc kubenswrapper[5050]: I1123 16:18:22.661890 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-87hln\" (UniqueName: \"kubernetes.io/projected/e6abf46b-e82a-404d-ae3b-5adddf6f4960-kube-api-access-87hln\") pod \"octavia-4b68-account-create-4466z\" (UID: \"e6abf46b-e82a-404d-ae3b-5adddf6f4960\") " pod="openstack/octavia-4b68-account-create-4466z" Nov 23 16:18:22 crc kubenswrapper[5050]: I1123 16:18:22.661955 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e6abf46b-e82a-404d-ae3b-5adddf6f4960-operator-scripts\") pod \"octavia-4b68-account-create-4466z\" (UID: \"e6abf46b-e82a-404d-ae3b-5adddf6f4960\") " pod="openstack/octavia-4b68-account-create-4466z" Nov 23 16:18:22 crc kubenswrapper[5050]: I1123 16:18:22.764596 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e6abf46b-e82a-404d-ae3b-5adddf6f4960-operator-scripts\") pod \"octavia-4b68-account-create-4466z\" (UID: \"e6abf46b-e82a-404d-ae3b-5adddf6f4960\") " pod="openstack/octavia-4b68-account-create-4466z" Nov 23 16:18:22 crc kubenswrapper[5050]: I1123 16:18:22.765008 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-87hln\" (UniqueName: \"kubernetes.io/projected/e6abf46b-e82a-404d-ae3b-5adddf6f4960-kube-api-access-87hln\") pod \"octavia-4b68-account-create-4466z\" (UID: \"e6abf46b-e82a-404d-ae3b-5adddf6f4960\") " pod="openstack/octavia-4b68-account-create-4466z" Nov 23 16:18:22 crc kubenswrapper[5050]: I1123 16:18:22.765582 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e6abf46b-e82a-404d-ae3b-5adddf6f4960-operator-scripts\") pod \"octavia-4b68-account-create-4466z\" (UID: \"e6abf46b-e82a-404d-ae3b-5adddf6f4960\") " pod="openstack/octavia-4b68-account-create-4466z" Nov 23 16:18:22 crc kubenswrapper[5050]: I1123 16:18:22.816672 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-87hln\" (UniqueName: \"kubernetes.io/projected/e6abf46b-e82a-404d-ae3b-5adddf6f4960-kube-api-access-87hln\") pod \"octavia-4b68-account-create-4466z\" (UID: \"e6abf46b-e82a-404d-ae3b-5adddf6f4960\") " pod="openstack/octavia-4b68-account-create-4466z" Nov 23 16:18:22 crc kubenswrapper[5050]: I1123 16:18:22.951252 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-4b68-account-create-4466z" Nov 23 16:18:22 crc kubenswrapper[5050]: I1123 16:18:22.969067 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-persistence-db-create-n99mq"] Nov 23 16:18:23 crc kubenswrapper[5050]: I1123 16:18:23.533636 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-4b68-account-create-4466z"] Nov 23 16:18:23 crc kubenswrapper[5050]: W1123 16:18:23.537652 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode6abf46b_e82a_404d_ae3b_5adddf6f4960.slice/crio-d842e0799539d080072031131e56aafc245dda3efe86cacd49e44b15e9030d5e WatchSource:0}: Error finding container d842e0799539d080072031131e56aafc245dda3efe86cacd49e44b15e9030d5e: Status 404 returned error can't find the container with id d842e0799539d080072031131e56aafc245dda3efe86cacd49e44b15e9030d5e Nov 23 16:18:23 crc kubenswrapper[5050]: I1123 16:18:23.810078 5050 generic.go:334] "Generic (PLEG): container finished" podID="b33829ce-bce6-479e-915b-aee1ffe96e5f" containerID="4229c28564dcdb076c51d9999be27df633bb55ec6c736898cc5b93c7ae28a14a" exitCode=0 Nov 23 16:18:23 crc kubenswrapper[5050]: I1123 16:18:23.810721 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-n99mq" event={"ID":"b33829ce-bce6-479e-915b-aee1ffe96e5f","Type":"ContainerDied","Data":"4229c28564dcdb076c51d9999be27df633bb55ec6c736898cc5b93c7ae28a14a"} Nov 23 16:18:23 crc kubenswrapper[5050]: I1123 16:18:23.810763 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-n99mq" event={"ID":"b33829ce-bce6-479e-915b-aee1ffe96e5f","Type":"ContainerStarted","Data":"7c8ede479f03d1390e0872000e793d0595af738d7939adedcba5fa355a91d6da"} Nov 23 16:18:23 crc kubenswrapper[5050]: I1123 16:18:23.813940 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-4b68-account-create-4466z" event={"ID":"e6abf46b-e82a-404d-ae3b-5adddf6f4960","Type":"ContainerStarted","Data":"cb88b2e579978ea61e7fee63962165a38d9a7985b737800808b9cc9ed8718904"} Nov 23 16:18:23 crc kubenswrapper[5050]: I1123 16:18:23.814009 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-4b68-account-create-4466z" event={"ID":"e6abf46b-e82a-404d-ae3b-5adddf6f4960","Type":"ContainerStarted","Data":"d842e0799539d080072031131e56aafc245dda3efe86cacd49e44b15e9030d5e"} Nov 23 16:18:23 crc kubenswrapper[5050]: I1123 16:18:23.868537 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-4b68-account-create-4466z" podStartSLOduration=1.868514746 podStartE2EDuration="1.868514746s" podCreationTimestamp="2025-11-23 16:18:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:18:23.861957681 +0000 UTC m=+5799.028954196" watchObservedRunningTime="2025-11-23 16:18:23.868514746 +0000 UTC m=+5799.035511241" Nov 23 16:18:24 crc kubenswrapper[5050]: I1123 16:18:24.853586 5050 generic.go:334] "Generic (PLEG): container finished" podID="e6abf46b-e82a-404d-ae3b-5adddf6f4960" containerID="cb88b2e579978ea61e7fee63962165a38d9a7985b737800808b9cc9ed8718904" exitCode=0 Nov 23 16:18:24 crc kubenswrapper[5050]: I1123 16:18:24.853675 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-4b68-account-create-4466z" event={"ID":"e6abf46b-e82a-404d-ae3b-5adddf6f4960","Type":"ContainerDied","Data":"cb88b2e579978ea61e7fee63962165a38d9a7985b737800808b9cc9ed8718904"} Nov 23 16:18:25 crc kubenswrapper[5050]: I1123 16:18:25.239317 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-n99mq" Nov 23 16:18:25 crc kubenswrapper[5050]: I1123 16:18:25.365370 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7w8d8\" (UniqueName: \"kubernetes.io/projected/b33829ce-bce6-479e-915b-aee1ffe96e5f-kube-api-access-7w8d8\") pod \"b33829ce-bce6-479e-915b-aee1ffe96e5f\" (UID: \"b33829ce-bce6-479e-915b-aee1ffe96e5f\") " Nov 23 16:18:25 crc kubenswrapper[5050]: I1123 16:18:25.365507 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b33829ce-bce6-479e-915b-aee1ffe96e5f-operator-scripts\") pod \"b33829ce-bce6-479e-915b-aee1ffe96e5f\" (UID: \"b33829ce-bce6-479e-915b-aee1ffe96e5f\") " Nov 23 16:18:25 crc kubenswrapper[5050]: I1123 16:18:25.366285 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b33829ce-bce6-479e-915b-aee1ffe96e5f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b33829ce-bce6-479e-915b-aee1ffe96e5f" (UID: "b33829ce-bce6-479e-915b-aee1ffe96e5f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:18:25 crc kubenswrapper[5050]: I1123 16:18:25.374381 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b33829ce-bce6-479e-915b-aee1ffe96e5f-kube-api-access-7w8d8" (OuterVolumeSpecName: "kube-api-access-7w8d8") pod "b33829ce-bce6-479e-915b-aee1ffe96e5f" (UID: "b33829ce-bce6-479e-915b-aee1ffe96e5f"). InnerVolumeSpecName "kube-api-access-7w8d8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:18:25 crc kubenswrapper[5050]: I1123 16:18:25.468065 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7w8d8\" (UniqueName: \"kubernetes.io/projected/b33829ce-bce6-479e-915b-aee1ffe96e5f-kube-api-access-7w8d8\") on node \"crc\" DevicePath \"\"" Nov 23 16:18:25 crc kubenswrapper[5050]: I1123 16:18:25.468122 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b33829ce-bce6-479e-915b-aee1ffe96e5f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:18:25 crc kubenswrapper[5050]: E1123 16:18:25.660005 5050 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb33829ce_bce6_479e_915b_aee1ffe96e5f.slice\": RecentStats: unable to find data in memory cache]" Nov 23 16:18:25 crc kubenswrapper[5050]: I1123 16:18:25.879727 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-n99mq" event={"ID":"b33829ce-bce6-479e-915b-aee1ffe96e5f","Type":"ContainerDied","Data":"7c8ede479f03d1390e0872000e793d0595af738d7939adedcba5fa355a91d6da"} Nov 23 16:18:25 crc kubenswrapper[5050]: I1123 16:18:25.880115 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7c8ede479f03d1390e0872000e793d0595af738d7939adedcba5fa355a91d6da" Nov 23 16:18:25 crc kubenswrapper[5050]: I1123 16:18:25.879798 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-n99mq" Nov 23 16:18:26 crc kubenswrapper[5050]: I1123 16:18:26.357901 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-4b68-account-create-4466z" Nov 23 16:18:26 crc kubenswrapper[5050]: I1123 16:18:26.512985 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-87hln\" (UniqueName: \"kubernetes.io/projected/e6abf46b-e82a-404d-ae3b-5adddf6f4960-kube-api-access-87hln\") pod \"e6abf46b-e82a-404d-ae3b-5adddf6f4960\" (UID: \"e6abf46b-e82a-404d-ae3b-5adddf6f4960\") " Nov 23 16:18:26 crc kubenswrapper[5050]: I1123 16:18:26.513332 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e6abf46b-e82a-404d-ae3b-5adddf6f4960-operator-scripts\") pod \"e6abf46b-e82a-404d-ae3b-5adddf6f4960\" (UID: \"e6abf46b-e82a-404d-ae3b-5adddf6f4960\") " Nov 23 16:18:26 crc kubenswrapper[5050]: I1123 16:18:26.515217 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6abf46b-e82a-404d-ae3b-5adddf6f4960-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e6abf46b-e82a-404d-ae3b-5adddf6f4960" (UID: "e6abf46b-e82a-404d-ae3b-5adddf6f4960"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:18:26 crc kubenswrapper[5050]: I1123 16:18:26.522536 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6abf46b-e82a-404d-ae3b-5adddf6f4960-kube-api-access-87hln" (OuterVolumeSpecName: "kube-api-access-87hln") pod "e6abf46b-e82a-404d-ae3b-5adddf6f4960" (UID: "e6abf46b-e82a-404d-ae3b-5adddf6f4960"). InnerVolumeSpecName "kube-api-access-87hln". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:18:26 crc kubenswrapper[5050]: I1123 16:18:26.616456 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e6abf46b-e82a-404d-ae3b-5adddf6f4960-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:18:26 crc kubenswrapper[5050]: I1123 16:18:26.616514 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-87hln\" (UniqueName: \"kubernetes.io/projected/e6abf46b-e82a-404d-ae3b-5adddf6f4960-kube-api-access-87hln\") on node \"crc\" DevicePath \"\"" Nov 23 16:18:26 crc kubenswrapper[5050]: I1123 16:18:26.912192 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-4b68-account-create-4466z" event={"ID":"e6abf46b-e82a-404d-ae3b-5adddf6f4960","Type":"ContainerDied","Data":"d842e0799539d080072031131e56aafc245dda3efe86cacd49e44b15e9030d5e"} Nov 23 16:18:26 crc kubenswrapper[5050]: I1123 16:18:26.913595 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d842e0799539d080072031131e56aafc245dda3efe86cacd49e44b15e9030d5e" Nov 23 16:18:26 crc kubenswrapper[5050]: I1123 16:18:26.912946 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-4b68-account-create-4466z" Nov 23 16:18:27 crc kubenswrapper[5050]: I1123 16:18:27.960450 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-api-78c74877d6-ndfk4"] Nov 23 16:18:27 crc kubenswrapper[5050]: E1123 16:18:27.962621 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6abf46b-e82a-404d-ae3b-5adddf6f4960" containerName="mariadb-account-create" Nov 23 16:18:27 crc kubenswrapper[5050]: I1123 16:18:27.962637 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6abf46b-e82a-404d-ae3b-5adddf6f4960" containerName="mariadb-account-create" Nov 23 16:18:27 crc kubenswrapper[5050]: E1123 16:18:27.962670 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b33829ce-bce6-479e-915b-aee1ffe96e5f" containerName="mariadb-database-create" Nov 23 16:18:27 crc kubenswrapper[5050]: I1123 16:18:27.962676 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="b33829ce-bce6-479e-915b-aee1ffe96e5f" containerName="mariadb-database-create" Nov 23 16:18:27 crc kubenswrapper[5050]: I1123 16:18:27.962881 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6abf46b-e82a-404d-ae3b-5adddf6f4960" containerName="mariadb-account-create" Nov 23 16:18:27 crc kubenswrapper[5050]: I1123 16:18:27.962914 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="b33829ce-bce6-479e-915b-aee1ffe96e5f" containerName="mariadb-database-create" Nov 23 16:18:27 crc kubenswrapper[5050]: I1123 16:18:27.967747 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-78c74877d6-ndfk4" Nov 23 16:18:27 crc kubenswrapper[5050]: I1123 16:18:27.970707 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-api-config-data" Nov 23 16:18:27 crc kubenswrapper[5050]: I1123 16:18:27.971163 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-api-scripts" Nov 23 16:18:27 crc kubenswrapper[5050]: I1123 16:18:27.977717 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-78c74877d6-ndfk4"] Nov 23 16:18:27 crc kubenswrapper[5050]: I1123 16:18:27.980111 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-octavia-dockercfg-2wjsc" Nov 23 16:18:28 crc kubenswrapper[5050]: I1123 16:18:28.057162 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45c153e5-8ce2-49a3-8476-eb9cb8a406ab-config-data\") pod \"octavia-api-78c74877d6-ndfk4\" (UID: \"45c153e5-8ce2-49a3-8476-eb9cb8a406ab\") " pod="openstack/octavia-api-78c74877d6-ndfk4" Nov 23 16:18:28 crc kubenswrapper[5050]: I1123 16:18:28.057255 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/45c153e5-8ce2-49a3-8476-eb9cb8a406ab-config-data-merged\") pod \"octavia-api-78c74877d6-ndfk4\" (UID: \"45c153e5-8ce2-49a3-8476-eb9cb8a406ab\") " pod="openstack/octavia-api-78c74877d6-ndfk4" Nov 23 16:18:28 crc kubenswrapper[5050]: I1123 16:18:28.057306 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/45c153e5-8ce2-49a3-8476-eb9cb8a406ab-octavia-run\") pod \"octavia-api-78c74877d6-ndfk4\" (UID: \"45c153e5-8ce2-49a3-8476-eb9cb8a406ab\") " pod="openstack/octavia-api-78c74877d6-ndfk4" Nov 23 16:18:28 crc kubenswrapper[5050]: I1123 16:18:28.057337 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45c153e5-8ce2-49a3-8476-eb9cb8a406ab-scripts\") pod \"octavia-api-78c74877d6-ndfk4\" (UID: \"45c153e5-8ce2-49a3-8476-eb9cb8a406ab\") " pod="openstack/octavia-api-78c74877d6-ndfk4" Nov 23 16:18:28 crc kubenswrapper[5050]: I1123 16:18:28.057363 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45c153e5-8ce2-49a3-8476-eb9cb8a406ab-combined-ca-bundle\") pod \"octavia-api-78c74877d6-ndfk4\" (UID: \"45c153e5-8ce2-49a3-8476-eb9cb8a406ab\") " pod="openstack/octavia-api-78c74877d6-ndfk4" Nov 23 16:18:28 crc kubenswrapper[5050]: I1123 16:18:28.159936 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45c153e5-8ce2-49a3-8476-eb9cb8a406ab-scripts\") pod \"octavia-api-78c74877d6-ndfk4\" (UID: \"45c153e5-8ce2-49a3-8476-eb9cb8a406ab\") " pod="openstack/octavia-api-78c74877d6-ndfk4" Nov 23 16:18:28 crc kubenswrapper[5050]: I1123 16:18:28.160015 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45c153e5-8ce2-49a3-8476-eb9cb8a406ab-combined-ca-bundle\") pod \"octavia-api-78c74877d6-ndfk4\" (UID: \"45c153e5-8ce2-49a3-8476-eb9cb8a406ab\") " pod="openstack/octavia-api-78c74877d6-ndfk4" Nov 23 16:18:28 crc kubenswrapper[5050]: I1123 16:18:28.160126 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45c153e5-8ce2-49a3-8476-eb9cb8a406ab-config-data\") pod \"octavia-api-78c74877d6-ndfk4\" (UID: \"45c153e5-8ce2-49a3-8476-eb9cb8a406ab\") " pod="openstack/octavia-api-78c74877d6-ndfk4" Nov 23 16:18:28 crc kubenswrapper[5050]: I1123 16:18:28.160199 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/45c153e5-8ce2-49a3-8476-eb9cb8a406ab-config-data-merged\") pod \"octavia-api-78c74877d6-ndfk4\" (UID: \"45c153e5-8ce2-49a3-8476-eb9cb8a406ab\") " pod="openstack/octavia-api-78c74877d6-ndfk4" Nov 23 16:18:28 crc kubenswrapper[5050]: I1123 16:18:28.160251 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/45c153e5-8ce2-49a3-8476-eb9cb8a406ab-octavia-run\") pod \"octavia-api-78c74877d6-ndfk4\" (UID: \"45c153e5-8ce2-49a3-8476-eb9cb8a406ab\") " pod="openstack/octavia-api-78c74877d6-ndfk4" Nov 23 16:18:28 crc kubenswrapper[5050]: I1123 16:18:28.160741 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/45c153e5-8ce2-49a3-8476-eb9cb8a406ab-octavia-run\") pod \"octavia-api-78c74877d6-ndfk4\" (UID: \"45c153e5-8ce2-49a3-8476-eb9cb8a406ab\") " pod="openstack/octavia-api-78c74877d6-ndfk4" Nov 23 16:18:28 crc kubenswrapper[5050]: I1123 16:18:28.160792 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/45c153e5-8ce2-49a3-8476-eb9cb8a406ab-config-data-merged\") pod \"octavia-api-78c74877d6-ndfk4\" (UID: \"45c153e5-8ce2-49a3-8476-eb9cb8a406ab\") " pod="openstack/octavia-api-78c74877d6-ndfk4" Nov 23 16:18:28 crc kubenswrapper[5050]: I1123 16:18:28.166715 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45c153e5-8ce2-49a3-8476-eb9cb8a406ab-combined-ca-bundle\") pod \"octavia-api-78c74877d6-ndfk4\" (UID: \"45c153e5-8ce2-49a3-8476-eb9cb8a406ab\") " pod="openstack/octavia-api-78c74877d6-ndfk4" Nov 23 16:18:28 crc kubenswrapper[5050]: I1123 16:18:28.171087 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45c153e5-8ce2-49a3-8476-eb9cb8a406ab-config-data\") pod \"octavia-api-78c74877d6-ndfk4\" (UID: \"45c153e5-8ce2-49a3-8476-eb9cb8a406ab\") " pod="openstack/octavia-api-78c74877d6-ndfk4" Nov 23 16:18:28 crc kubenswrapper[5050]: I1123 16:18:28.178839 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45c153e5-8ce2-49a3-8476-eb9cb8a406ab-scripts\") pod \"octavia-api-78c74877d6-ndfk4\" (UID: \"45c153e5-8ce2-49a3-8476-eb9cb8a406ab\") " pod="openstack/octavia-api-78c74877d6-ndfk4" Nov 23 16:18:28 crc kubenswrapper[5050]: I1123 16:18:28.305340 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-78c74877d6-ndfk4" Nov 23 16:18:28 crc kubenswrapper[5050]: I1123 16:18:28.548955 5050 scope.go:117] "RemoveContainer" containerID="131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00" Nov 23 16:18:28 crc kubenswrapper[5050]: E1123 16:18:28.549636 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:18:28 crc kubenswrapper[5050]: I1123 16:18:28.879284 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-78c74877d6-ndfk4"] Nov 23 16:18:28 crc kubenswrapper[5050]: W1123 16:18:28.889773 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod45c153e5_8ce2_49a3_8476_eb9cb8a406ab.slice/crio-bc9a2a1f1ae7b13328793158552782e7d040a6e7835cd96e06aa0639ff883bad WatchSource:0}: Error finding container bc9a2a1f1ae7b13328793158552782e7d040a6e7835cd96e06aa0639ff883bad: Status 404 returned error can't find the container with id bc9a2a1f1ae7b13328793158552782e7d040a6e7835cd96e06aa0639ff883bad Nov 23 16:18:28 crc kubenswrapper[5050]: I1123 16:18:28.938914 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-78c74877d6-ndfk4" event={"ID":"45c153e5-8ce2-49a3-8476-eb9cb8a406ab","Type":"ContainerStarted","Data":"bc9a2a1f1ae7b13328793158552782e7d040a6e7835cd96e06aa0639ff883bad"} Nov 23 16:18:39 crc kubenswrapper[5050]: I1123 16:18:39.552142 5050 scope.go:117] "RemoveContainer" containerID="131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00" Nov 23 16:18:39 crc kubenswrapper[5050]: E1123 16:18:39.553153 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:18:40 crc kubenswrapper[5050]: I1123 16:18:40.108533 5050 generic.go:334] "Generic (PLEG): container finished" podID="45c153e5-8ce2-49a3-8476-eb9cb8a406ab" containerID="f4962f87a96d287c832196fd1c28949347aac39042cd6fa26c4975d4997873a2" exitCode=0 Nov 23 16:18:40 crc kubenswrapper[5050]: I1123 16:18:40.108977 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-78c74877d6-ndfk4" event={"ID":"45c153e5-8ce2-49a3-8476-eb9cb8a406ab","Type":"ContainerDied","Data":"f4962f87a96d287c832196fd1c28949347aac39042cd6fa26c4975d4997873a2"} Nov 23 16:18:41 crc kubenswrapper[5050]: I1123 16:18:41.122538 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-78c74877d6-ndfk4" event={"ID":"45c153e5-8ce2-49a3-8476-eb9cb8a406ab","Type":"ContainerStarted","Data":"d689020f31ea261d1ea966fe3f3548f4edbf0f2276ce637fe4998c1e4c10ce40"} Nov 23 16:18:41 crc kubenswrapper[5050]: I1123 16:18:41.122906 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-78c74877d6-ndfk4" event={"ID":"45c153e5-8ce2-49a3-8476-eb9cb8a406ab","Type":"ContainerStarted","Data":"1f1c270b655f95c5ab54da9d0c648905d1808adedc0d0043841890cce71528b9"} Nov 23 16:18:41 crc kubenswrapper[5050]: I1123 16:18:41.122969 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-78c74877d6-ndfk4" Nov 23 16:18:41 crc kubenswrapper[5050]: I1123 16:18:41.124769 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-78c74877d6-ndfk4" Nov 23 16:18:41 crc kubenswrapper[5050]: I1123 16:18:41.146055 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-api-78c74877d6-ndfk4" podStartSLOduration=3.564005288 podStartE2EDuration="14.146037591s" podCreationTimestamp="2025-11-23 16:18:27 +0000 UTC" firstStartedPulling="2025-11-23 16:18:28.892996626 +0000 UTC m=+5804.059993111" lastFinishedPulling="2025-11-23 16:18:39.475028929 +0000 UTC m=+5814.642025414" observedRunningTime="2025-11-23 16:18:41.14389198 +0000 UTC m=+5816.310888465" watchObservedRunningTime="2025-11-23 16:18:41.146037591 +0000 UTC m=+5816.313034076" Nov 23 16:18:47 crc kubenswrapper[5050]: I1123 16:18:47.326231 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-jzkdv" podUID="67fb633c-03ee-4b5f-acd1-930b6112cd19" containerName="ovn-controller" probeResult="failure" output=< Nov 23 16:18:47 crc kubenswrapper[5050]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 23 16:18:47 crc kubenswrapper[5050]: > Nov 23 16:18:47 crc kubenswrapper[5050]: I1123 16:18:47.445108 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-4r95w" Nov 23 16:18:47 crc kubenswrapper[5050]: I1123 16:18:47.452270 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-4r95w" Nov 23 16:18:47 crc kubenswrapper[5050]: I1123 16:18:47.613377 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-jzkdv-config-v6b45"] Nov 23 16:18:47 crc kubenswrapper[5050]: I1123 16:18:47.615066 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jzkdv-config-v6b45" Nov 23 16:18:47 crc kubenswrapper[5050]: I1123 16:18:47.618651 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 23 16:18:47 crc kubenswrapper[5050]: I1123 16:18:47.629062 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-jzkdv-config-v6b45"] Nov 23 16:18:47 crc kubenswrapper[5050]: I1123 16:18:47.640505 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/556f4ebe-433e-4728-a823-cab0ce9e4610-var-run\") pod \"ovn-controller-jzkdv-config-v6b45\" (UID: \"556f4ebe-433e-4728-a823-cab0ce9e4610\") " pod="openstack/ovn-controller-jzkdv-config-v6b45" Nov 23 16:18:47 crc kubenswrapper[5050]: I1123 16:18:47.640580 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/556f4ebe-433e-4728-a823-cab0ce9e4610-var-run-ovn\") pod \"ovn-controller-jzkdv-config-v6b45\" (UID: \"556f4ebe-433e-4728-a823-cab0ce9e4610\") " pod="openstack/ovn-controller-jzkdv-config-v6b45" Nov 23 16:18:47 crc kubenswrapper[5050]: I1123 16:18:47.640619 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/556f4ebe-433e-4728-a823-cab0ce9e4610-scripts\") pod \"ovn-controller-jzkdv-config-v6b45\" (UID: \"556f4ebe-433e-4728-a823-cab0ce9e4610\") " pod="openstack/ovn-controller-jzkdv-config-v6b45" Nov 23 16:18:47 crc kubenswrapper[5050]: I1123 16:18:47.640894 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/556f4ebe-433e-4728-a823-cab0ce9e4610-var-log-ovn\") pod \"ovn-controller-jzkdv-config-v6b45\" (UID: \"556f4ebe-433e-4728-a823-cab0ce9e4610\") " pod="openstack/ovn-controller-jzkdv-config-v6b45" Nov 23 16:18:47 crc kubenswrapper[5050]: I1123 16:18:47.640979 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/556f4ebe-433e-4728-a823-cab0ce9e4610-additional-scripts\") pod \"ovn-controller-jzkdv-config-v6b45\" (UID: \"556f4ebe-433e-4728-a823-cab0ce9e4610\") " pod="openstack/ovn-controller-jzkdv-config-v6b45" Nov 23 16:18:47 crc kubenswrapper[5050]: I1123 16:18:47.641089 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mrbx\" (UniqueName: \"kubernetes.io/projected/556f4ebe-433e-4728-a823-cab0ce9e4610-kube-api-access-7mrbx\") pod \"ovn-controller-jzkdv-config-v6b45\" (UID: \"556f4ebe-433e-4728-a823-cab0ce9e4610\") " pod="openstack/ovn-controller-jzkdv-config-v6b45" Nov 23 16:18:47 crc kubenswrapper[5050]: I1123 16:18:47.743612 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/556f4ebe-433e-4728-a823-cab0ce9e4610-var-run\") pod \"ovn-controller-jzkdv-config-v6b45\" (UID: \"556f4ebe-433e-4728-a823-cab0ce9e4610\") " pod="openstack/ovn-controller-jzkdv-config-v6b45" Nov 23 16:18:47 crc kubenswrapper[5050]: I1123 16:18:47.743716 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/556f4ebe-433e-4728-a823-cab0ce9e4610-var-run-ovn\") pod \"ovn-controller-jzkdv-config-v6b45\" (UID: \"556f4ebe-433e-4728-a823-cab0ce9e4610\") " pod="openstack/ovn-controller-jzkdv-config-v6b45" Nov 23 16:18:47 crc kubenswrapper[5050]: I1123 16:18:47.743756 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/556f4ebe-433e-4728-a823-cab0ce9e4610-scripts\") pod \"ovn-controller-jzkdv-config-v6b45\" (UID: \"556f4ebe-433e-4728-a823-cab0ce9e4610\") " pod="openstack/ovn-controller-jzkdv-config-v6b45" Nov 23 16:18:47 crc kubenswrapper[5050]: I1123 16:18:47.743820 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/556f4ebe-433e-4728-a823-cab0ce9e4610-var-log-ovn\") pod \"ovn-controller-jzkdv-config-v6b45\" (UID: \"556f4ebe-433e-4728-a823-cab0ce9e4610\") " pod="openstack/ovn-controller-jzkdv-config-v6b45" Nov 23 16:18:47 crc kubenswrapper[5050]: I1123 16:18:47.743861 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/556f4ebe-433e-4728-a823-cab0ce9e4610-additional-scripts\") pod \"ovn-controller-jzkdv-config-v6b45\" (UID: \"556f4ebe-433e-4728-a823-cab0ce9e4610\") " pod="openstack/ovn-controller-jzkdv-config-v6b45" Nov 23 16:18:47 crc kubenswrapper[5050]: I1123 16:18:47.743917 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mrbx\" (UniqueName: \"kubernetes.io/projected/556f4ebe-433e-4728-a823-cab0ce9e4610-kube-api-access-7mrbx\") pod \"ovn-controller-jzkdv-config-v6b45\" (UID: \"556f4ebe-433e-4728-a823-cab0ce9e4610\") " pod="openstack/ovn-controller-jzkdv-config-v6b45" Nov 23 16:18:47 crc kubenswrapper[5050]: I1123 16:18:47.744068 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/556f4ebe-433e-4728-a823-cab0ce9e4610-var-run\") pod \"ovn-controller-jzkdv-config-v6b45\" (UID: \"556f4ebe-433e-4728-a823-cab0ce9e4610\") " pod="openstack/ovn-controller-jzkdv-config-v6b45" Nov 23 16:18:47 crc kubenswrapper[5050]: I1123 16:18:47.744211 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/556f4ebe-433e-4728-a823-cab0ce9e4610-var-run-ovn\") pod \"ovn-controller-jzkdv-config-v6b45\" (UID: \"556f4ebe-433e-4728-a823-cab0ce9e4610\") " pod="openstack/ovn-controller-jzkdv-config-v6b45" Nov 23 16:18:47 crc kubenswrapper[5050]: I1123 16:18:47.744748 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/556f4ebe-433e-4728-a823-cab0ce9e4610-var-log-ovn\") pod \"ovn-controller-jzkdv-config-v6b45\" (UID: \"556f4ebe-433e-4728-a823-cab0ce9e4610\") " pod="openstack/ovn-controller-jzkdv-config-v6b45" Nov 23 16:18:47 crc kubenswrapper[5050]: I1123 16:18:47.752845 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/556f4ebe-433e-4728-a823-cab0ce9e4610-additional-scripts\") pod \"ovn-controller-jzkdv-config-v6b45\" (UID: \"556f4ebe-433e-4728-a823-cab0ce9e4610\") " pod="openstack/ovn-controller-jzkdv-config-v6b45" Nov 23 16:18:47 crc kubenswrapper[5050]: I1123 16:18:47.754288 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/556f4ebe-433e-4728-a823-cab0ce9e4610-scripts\") pod \"ovn-controller-jzkdv-config-v6b45\" (UID: \"556f4ebe-433e-4728-a823-cab0ce9e4610\") " pod="openstack/ovn-controller-jzkdv-config-v6b45" Nov 23 16:18:47 crc kubenswrapper[5050]: I1123 16:18:47.792902 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mrbx\" (UniqueName: \"kubernetes.io/projected/556f4ebe-433e-4728-a823-cab0ce9e4610-kube-api-access-7mrbx\") pod \"ovn-controller-jzkdv-config-v6b45\" (UID: \"556f4ebe-433e-4728-a823-cab0ce9e4610\") " pod="openstack/ovn-controller-jzkdv-config-v6b45" Nov 23 16:18:47 crc kubenswrapper[5050]: I1123 16:18:47.936614 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jzkdv-config-v6b45" Nov 23 16:18:48 crc kubenswrapper[5050]: W1123 16:18:48.411876 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod556f4ebe_433e_4728_a823_cab0ce9e4610.slice/crio-a0d7fa03df47255db7fe90f6bcb3ab5a959a9e7609ab0a26debbda6b894830be WatchSource:0}: Error finding container a0d7fa03df47255db7fe90f6bcb3ab5a959a9e7609ab0a26debbda6b894830be: Status 404 returned error can't find the container with id a0d7fa03df47255db7fe90f6bcb3ab5a959a9e7609ab0a26debbda6b894830be Nov 23 16:18:48 crc kubenswrapper[5050]: I1123 16:18:48.412843 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-jzkdv-config-v6b45"] Nov 23 16:18:49 crc kubenswrapper[5050]: I1123 16:18:49.202715 5050 generic.go:334] "Generic (PLEG): container finished" podID="556f4ebe-433e-4728-a823-cab0ce9e4610" containerID="c8f331d053776c911d8203fe6e1908a05a9f73bcb1ac58ffa155103c0bb0463e" exitCode=0 Nov 23 16:18:49 crc kubenswrapper[5050]: I1123 16:18:49.202775 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jzkdv-config-v6b45" event={"ID":"556f4ebe-433e-4728-a823-cab0ce9e4610","Type":"ContainerDied","Data":"c8f331d053776c911d8203fe6e1908a05a9f73bcb1ac58ffa155103c0bb0463e"} Nov 23 16:18:49 crc kubenswrapper[5050]: I1123 16:18:49.203017 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jzkdv-config-v6b45" event={"ID":"556f4ebe-433e-4728-a823-cab0ce9e4610","Type":"ContainerStarted","Data":"a0d7fa03df47255db7fe90f6bcb3ab5a959a9e7609ab0a26debbda6b894830be"} Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.164087 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-rsyslog-rl599"] Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.168113 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-rsyslog-rl599" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.170489 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-rsyslog-scripts" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.172042 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"octavia-hmport-map" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.172270 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-rsyslog-config-data" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.178598 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-rsyslog-rl599"] Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.211294 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a52da4e-bd4e-4faa-99d2-944528a2f797-scripts\") pod \"octavia-rsyslog-rl599\" (UID: \"3a52da4e-bd4e-4faa-99d2-944528a2f797\") " pod="openstack/octavia-rsyslog-rl599" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.211404 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/3a52da4e-bd4e-4faa-99d2-944528a2f797-config-data-merged\") pod \"octavia-rsyslog-rl599\" (UID: \"3a52da4e-bd4e-4faa-99d2-944528a2f797\") " pod="openstack/octavia-rsyslog-rl599" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.211531 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/3a52da4e-bd4e-4faa-99d2-944528a2f797-hm-ports\") pod \"octavia-rsyslog-rl599\" (UID: \"3a52da4e-bd4e-4faa-99d2-944528a2f797\") " pod="openstack/octavia-rsyslog-rl599" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.211741 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a52da4e-bd4e-4faa-99d2-944528a2f797-config-data\") pod \"octavia-rsyslog-rl599\" (UID: \"3a52da4e-bd4e-4faa-99d2-944528a2f797\") " pod="openstack/octavia-rsyslog-rl599" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.313036 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a52da4e-bd4e-4faa-99d2-944528a2f797-scripts\") pod \"octavia-rsyslog-rl599\" (UID: \"3a52da4e-bd4e-4faa-99d2-944528a2f797\") " pod="openstack/octavia-rsyslog-rl599" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.313091 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/3a52da4e-bd4e-4faa-99d2-944528a2f797-config-data-merged\") pod \"octavia-rsyslog-rl599\" (UID: \"3a52da4e-bd4e-4faa-99d2-944528a2f797\") " pod="openstack/octavia-rsyslog-rl599" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.313131 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/3a52da4e-bd4e-4faa-99d2-944528a2f797-hm-ports\") pod \"octavia-rsyslog-rl599\" (UID: \"3a52da4e-bd4e-4faa-99d2-944528a2f797\") " pod="openstack/octavia-rsyslog-rl599" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.313221 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a52da4e-bd4e-4faa-99d2-944528a2f797-config-data\") pod \"octavia-rsyslog-rl599\" (UID: \"3a52da4e-bd4e-4faa-99d2-944528a2f797\") " pod="openstack/octavia-rsyslog-rl599" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.313839 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/3a52da4e-bd4e-4faa-99d2-944528a2f797-config-data-merged\") pod \"octavia-rsyslog-rl599\" (UID: \"3a52da4e-bd4e-4faa-99d2-944528a2f797\") " pod="openstack/octavia-rsyslog-rl599" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.314794 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/3a52da4e-bd4e-4faa-99d2-944528a2f797-hm-ports\") pod \"octavia-rsyslog-rl599\" (UID: \"3a52da4e-bd4e-4faa-99d2-944528a2f797\") " pod="openstack/octavia-rsyslog-rl599" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.335971 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a52da4e-bd4e-4faa-99d2-944528a2f797-config-data\") pod \"octavia-rsyslog-rl599\" (UID: \"3a52da4e-bd4e-4faa-99d2-944528a2f797\") " pod="openstack/octavia-rsyslog-rl599" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.343519 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a52da4e-bd4e-4faa-99d2-944528a2f797-scripts\") pod \"octavia-rsyslog-rl599\" (UID: \"3a52da4e-bd4e-4faa-99d2-944528a2f797\") " pod="openstack/octavia-rsyslog-rl599" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.490693 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-rsyslog-rl599" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.685972 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jzkdv-config-v6b45" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.725456 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/556f4ebe-433e-4728-a823-cab0ce9e4610-var-run\") pod \"556f4ebe-433e-4728-a823-cab0ce9e4610\" (UID: \"556f4ebe-433e-4728-a823-cab0ce9e4610\") " Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.725549 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/556f4ebe-433e-4728-a823-cab0ce9e4610-var-log-ovn\") pod \"556f4ebe-433e-4728-a823-cab0ce9e4610\" (UID: \"556f4ebe-433e-4728-a823-cab0ce9e4610\") " Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.725689 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/556f4ebe-433e-4728-a823-cab0ce9e4610-additional-scripts\") pod \"556f4ebe-433e-4728-a823-cab0ce9e4610\" (UID: \"556f4ebe-433e-4728-a823-cab0ce9e4610\") " Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.725870 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/556f4ebe-433e-4728-a823-cab0ce9e4610-var-run-ovn\") pod \"556f4ebe-433e-4728-a823-cab0ce9e4610\" (UID: \"556f4ebe-433e-4728-a823-cab0ce9e4610\") " Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.725938 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/556f4ebe-433e-4728-a823-cab0ce9e4610-scripts\") pod \"556f4ebe-433e-4728-a823-cab0ce9e4610\" (UID: \"556f4ebe-433e-4728-a823-cab0ce9e4610\") " Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.725996 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7mrbx\" (UniqueName: \"kubernetes.io/projected/556f4ebe-433e-4728-a823-cab0ce9e4610-kube-api-access-7mrbx\") pod \"556f4ebe-433e-4728-a823-cab0ce9e4610\" (UID: \"556f4ebe-433e-4728-a823-cab0ce9e4610\") " Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.727900 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/556f4ebe-433e-4728-a823-cab0ce9e4610-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "556f4ebe-433e-4728-a823-cab0ce9e4610" (UID: "556f4ebe-433e-4728-a823-cab0ce9e4610"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.728099 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/556f4ebe-433e-4728-a823-cab0ce9e4610-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "556f4ebe-433e-4728-a823-cab0ce9e4610" (UID: "556f4ebe-433e-4728-a823-cab0ce9e4610"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.728149 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/556f4ebe-433e-4728-a823-cab0ce9e4610-var-run" (OuterVolumeSpecName: "var-run") pod "556f4ebe-433e-4728-a823-cab0ce9e4610" (UID: "556f4ebe-433e-4728-a823-cab0ce9e4610"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.728172 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/556f4ebe-433e-4728-a823-cab0ce9e4610-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "556f4ebe-433e-4728-a823-cab0ce9e4610" (UID: "556f4ebe-433e-4728-a823-cab0ce9e4610"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.735254 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/556f4ebe-433e-4728-a823-cab0ce9e4610-scripts" (OuterVolumeSpecName: "scripts") pod "556f4ebe-433e-4728-a823-cab0ce9e4610" (UID: "556f4ebe-433e-4728-a823-cab0ce9e4610"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.735758 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/556f4ebe-433e-4728-a823-cab0ce9e4610-kube-api-access-7mrbx" (OuterVolumeSpecName: "kube-api-access-7mrbx") pod "556f4ebe-433e-4728-a823-cab0ce9e4610" (UID: "556f4ebe-433e-4728-a823-cab0ce9e4610"). InnerVolumeSpecName "kube-api-access-7mrbx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.828339 5050 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/556f4ebe-433e-4728-a823-cab0ce9e4610-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.828381 5050 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/556f4ebe-433e-4728-a823-cab0ce9e4610-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.828392 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/556f4ebe-433e-4728-a823-cab0ce9e4610-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.828403 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7mrbx\" (UniqueName: \"kubernetes.io/projected/556f4ebe-433e-4728-a823-cab0ce9e4610-kube-api-access-7mrbx\") on node \"crc\" DevicePath \"\"" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.828417 5050 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/556f4ebe-433e-4728-a823-cab0ce9e4610-var-run\") on node \"crc\" DevicePath \"\"" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.828425 5050 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/556f4ebe-433e-4728-a823-cab0ce9e4610-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.997697 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-image-upload-59f8cff499-7xrf5"] Nov 23 16:18:50 crc kubenswrapper[5050]: E1123 16:18:50.998498 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="556f4ebe-433e-4728-a823-cab0ce9e4610" containerName="ovn-config" Nov 23 16:18:50 crc kubenswrapper[5050]: I1123 16:18:50.998516 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="556f4ebe-433e-4728-a823-cab0ce9e4610" containerName="ovn-config" Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:50.998718 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="556f4ebe-433e-4728-a823-cab0ce9e4610" containerName="ovn-config" Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.001134 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-7xrf5" Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.007255 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-config-data" Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.019471 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-7xrf5"] Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.033959 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/09e7acda-7cac-4ac8-bde7-befc324d955e-httpd-config\") pod \"octavia-image-upload-59f8cff499-7xrf5\" (UID: \"09e7acda-7cac-4ac8-bde7-befc324d955e\") " pod="openstack/octavia-image-upload-59f8cff499-7xrf5" Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.034419 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/09e7acda-7cac-4ac8-bde7-befc324d955e-amphora-image\") pod \"octavia-image-upload-59f8cff499-7xrf5\" (UID: \"09e7acda-7cac-4ac8-bde7-befc324d955e\") " pod="openstack/octavia-image-upload-59f8cff499-7xrf5" Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.136597 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/09e7acda-7cac-4ac8-bde7-befc324d955e-amphora-image\") pod \"octavia-image-upload-59f8cff499-7xrf5\" (UID: \"09e7acda-7cac-4ac8-bde7-befc324d955e\") " pod="openstack/octavia-image-upload-59f8cff499-7xrf5" Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.136679 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/09e7acda-7cac-4ac8-bde7-befc324d955e-httpd-config\") pod \"octavia-image-upload-59f8cff499-7xrf5\" (UID: \"09e7acda-7cac-4ac8-bde7-befc324d955e\") " pod="openstack/octavia-image-upload-59f8cff499-7xrf5" Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.137309 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/09e7acda-7cac-4ac8-bde7-befc324d955e-amphora-image\") pod \"octavia-image-upload-59f8cff499-7xrf5\" (UID: \"09e7acda-7cac-4ac8-bde7-befc324d955e\") " pod="openstack/octavia-image-upload-59f8cff499-7xrf5" Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.143849 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/09e7acda-7cac-4ac8-bde7-befc324d955e-httpd-config\") pod \"octavia-image-upload-59f8cff499-7xrf5\" (UID: \"09e7acda-7cac-4ac8-bde7-befc324d955e\") " pod="openstack/octavia-image-upload-59f8cff499-7xrf5" Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.155144 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-rsyslog-rl599"] Nov 23 16:18:51 crc kubenswrapper[5050]: W1123 16:18:51.162290 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3a52da4e_bd4e_4faa_99d2_944528a2f797.slice/crio-cf8ac31082a5e6a89e661cb868806892466d6997efaa1147dc8e6ed10e507e06 WatchSource:0}: Error finding container cf8ac31082a5e6a89e661cb868806892466d6997efaa1147dc8e6ed10e507e06: Status 404 returned error can't find the container with id cf8ac31082a5e6a89e661cb868806892466d6997efaa1147dc8e6ed10e507e06 Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.227795 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jzkdv-config-v6b45" event={"ID":"556f4ebe-433e-4728-a823-cab0ce9e4610","Type":"ContainerDied","Data":"a0d7fa03df47255db7fe90f6bcb3ab5a959a9e7609ab0a26debbda6b894830be"} Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.227846 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a0d7fa03df47255db7fe90f6bcb3ab5a959a9e7609ab0a26debbda6b894830be" Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.227926 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jzkdv-config-v6b45" Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.231105 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-rl599" event={"ID":"3a52da4e-bd4e-4faa-99d2-944528a2f797","Type":"ContainerStarted","Data":"cf8ac31082a5e6a89e661cb868806892466d6997efaa1147dc8e6ed10e507e06"} Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.333973 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-7xrf5" Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.629897 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-db-sync-phxjz"] Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.632566 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-phxjz" Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.634727 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-scripts" Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.645468 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a6211f3-cd96-4278-b870-6e8f17da26dd-config-data\") pod \"octavia-db-sync-phxjz\" (UID: \"1a6211f3-cd96-4278-b870-6e8f17da26dd\") " pod="openstack/octavia-db-sync-phxjz" Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.645833 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/1a6211f3-cd96-4278-b870-6e8f17da26dd-config-data-merged\") pod \"octavia-db-sync-phxjz\" (UID: \"1a6211f3-cd96-4278-b870-6e8f17da26dd\") " pod="openstack/octavia-db-sync-phxjz" Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.646075 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a6211f3-cd96-4278-b870-6e8f17da26dd-combined-ca-bundle\") pod \"octavia-db-sync-phxjz\" (UID: \"1a6211f3-cd96-4278-b870-6e8f17da26dd\") " pod="openstack/octavia-db-sync-phxjz" Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.667741 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1a6211f3-cd96-4278-b870-6e8f17da26dd-scripts\") pod \"octavia-db-sync-phxjz\" (UID: \"1a6211f3-cd96-4278-b870-6e8f17da26dd\") " pod="openstack/octavia-db-sync-phxjz" Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.672308 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-sync-phxjz"] Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.779968 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1a6211f3-cd96-4278-b870-6e8f17da26dd-scripts\") pod \"octavia-db-sync-phxjz\" (UID: \"1a6211f3-cd96-4278-b870-6e8f17da26dd\") " pod="openstack/octavia-db-sync-phxjz" Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.780309 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a6211f3-cd96-4278-b870-6e8f17da26dd-config-data\") pod \"octavia-db-sync-phxjz\" (UID: \"1a6211f3-cd96-4278-b870-6e8f17da26dd\") " pod="openstack/octavia-db-sync-phxjz" Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.780424 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/1a6211f3-cd96-4278-b870-6e8f17da26dd-config-data-merged\") pod \"octavia-db-sync-phxjz\" (UID: \"1a6211f3-cd96-4278-b870-6e8f17da26dd\") " pod="openstack/octavia-db-sync-phxjz" Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.780533 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a6211f3-cd96-4278-b870-6e8f17da26dd-combined-ca-bundle\") pod \"octavia-db-sync-phxjz\" (UID: \"1a6211f3-cd96-4278-b870-6e8f17da26dd\") " pod="openstack/octavia-db-sync-phxjz" Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.783856 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/1a6211f3-cd96-4278-b870-6e8f17da26dd-config-data-merged\") pod \"octavia-db-sync-phxjz\" (UID: \"1a6211f3-cd96-4278-b870-6e8f17da26dd\") " pod="openstack/octavia-db-sync-phxjz" Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.790409 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a6211f3-cd96-4278-b870-6e8f17da26dd-config-data\") pod \"octavia-db-sync-phxjz\" (UID: \"1a6211f3-cd96-4278-b870-6e8f17da26dd\") " pod="openstack/octavia-db-sync-phxjz" Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.794480 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a6211f3-cd96-4278-b870-6e8f17da26dd-combined-ca-bundle\") pod \"octavia-db-sync-phxjz\" (UID: \"1a6211f3-cd96-4278-b870-6e8f17da26dd\") " pod="openstack/octavia-db-sync-phxjz" Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.795341 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1a6211f3-cd96-4278-b870-6e8f17da26dd-scripts\") pod \"octavia-db-sync-phxjz\" (UID: \"1a6211f3-cd96-4278-b870-6e8f17da26dd\") " pod="openstack/octavia-db-sync-phxjz" Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.968799 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-jzkdv-config-v6b45"] Nov 23 16:18:51 crc kubenswrapper[5050]: I1123 16:18:51.990374 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-jzkdv-config-v6b45"] Nov 23 16:18:52 crc kubenswrapper[5050]: I1123 16:18:52.012784 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-phxjz" Nov 23 16:18:52 crc kubenswrapper[5050]: I1123 16:18:52.136866 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-jzkdv-config-pj9m9"] Nov 23 16:18:52 crc kubenswrapper[5050]: I1123 16:18:52.139143 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jzkdv-config-pj9m9" Nov 23 16:18:52 crc kubenswrapper[5050]: I1123 16:18:52.143518 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 23 16:18:52 crc kubenswrapper[5050]: I1123 16:18:52.153490 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-jzkdv-config-pj9m9"] Nov 23 16:18:52 crc kubenswrapper[5050]: I1123 16:18:52.215561 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/42805a74-0667-4e80-9cd8-0e60d2d6adeb-additional-scripts\") pod \"ovn-controller-jzkdv-config-pj9m9\" (UID: \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\") " pod="openstack/ovn-controller-jzkdv-config-pj9m9" Nov 23 16:18:52 crc kubenswrapper[5050]: I1123 16:18:52.215936 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/42805a74-0667-4e80-9cd8-0e60d2d6adeb-var-run\") pod \"ovn-controller-jzkdv-config-pj9m9\" (UID: \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\") " pod="openstack/ovn-controller-jzkdv-config-pj9m9" Nov 23 16:18:52 crc kubenswrapper[5050]: I1123 16:18:52.215974 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tx6mg\" (UniqueName: \"kubernetes.io/projected/42805a74-0667-4e80-9cd8-0e60d2d6adeb-kube-api-access-tx6mg\") pod \"ovn-controller-jzkdv-config-pj9m9\" (UID: \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\") " pod="openstack/ovn-controller-jzkdv-config-pj9m9" Nov 23 16:18:52 crc kubenswrapper[5050]: I1123 16:18:52.216074 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/42805a74-0667-4e80-9cd8-0e60d2d6adeb-var-log-ovn\") pod \"ovn-controller-jzkdv-config-pj9m9\" (UID: \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\") " pod="openstack/ovn-controller-jzkdv-config-pj9m9" Nov 23 16:18:52 crc kubenswrapper[5050]: I1123 16:18:52.216551 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/42805a74-0667-4e80-9cd8-0e60d2d6adeb-scripts\") pod \"ovn-controller-jzkdv-config-pj9m9\" (UID: \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\") " pod="openstack/ovn-controller-jzkdv-config-pj9m9" Nov 23 16:18:52 crc kubenswrapper[5050]: I1123 16:18:52.216606 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/42805a74-0667-4e80-9cd8-0e60d2d6adeb-var-run-ovn\") pod \"ovn-controller-jzkdv-config-pj9m9\" (UID: \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\") " pod="openstack/ovn-controller-jzkdv-config-pj9m9" Nov 23 16:18:52 crc kubenswrapper[5050]: I1123 16:18:52.319639 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/42805a74-0667-4e80-9cd8-0e60d2d6adeb-scripts\") pod \"ovn-controller-jzkdv-config-pj9m9\" (UID: \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\") " pod="openstack/ovn-controller-jzkdv-config-pj9m9" Nov 23 16:18:52 crc kubenswrapper[5050]: I1123 16:18:52.319754 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/42805a74-0667-4e80-9cd8-0e60d2d6adeb-var-run-ovn\") pod \"ovn-controller-jzkdv-config-pj9m9\" (UID: \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\") " pod="openstack/ovn-controller-jzkdv-config-pj9m9" Nov 23 16:18:52 crc kubenswrapper[5050]: I1123 16:18:52.319894 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/42805a74-0667-4e80-9cd8-0e60d2d6adeb-additional-scripts\") pod \"ovn-controller-jzkdv-config-pj9m9\" (UID: \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\") " pod="openstack/ovn-controller-jzkdv-config-pj9m9" Nov 23 16:18:52 crc kubenswrapper[5050]: I1123 16:18:52.319940 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/42805a74-0667-4e80-9cd8-0e60d2d6adeb-var-run\") pod \"ovn-controller-jzkdv-config-pj9m9\" (UID: \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\") " pod="openstack/ovn-controller-jzkdv-config-pj9m9" Nov 23 16:18:52 crc kubenswrapper[5050]: I1123 16:18:52.319977 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tx6mg\" (UniqueName: \"kubernetes.io/projected/42805a74-0667-4e80-9cd8-0e60d2d6adeb-kube-api-access-tx6mg\") pod \"ovn-controller-jzkdv-config-pj9m9\" (UID: \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\") " pod="openstack/ovn-controller-jzkdv-config-pj9m9" Nov 23 16:18:52 crc kubenswrapper[5050]: I1123 16:18:52.320079 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/42805a74-0667-4e80-9cd8-0e60d2d6adeb-var-log-ovn\") pod \"ovn-controller-jzkdv-config-pj9m9\" (UID: \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\") " pod="openstack/ovn-controller-jzkdv-config-pj9m9" Nov 23 16:18:52 crc kubenswrapper[5050]: I1123 16:18:52.320533 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/42805a74-0667-4e80-9cd8-0e60d2d6adeb-var-log-ovn\") pod \"ovn-controller-jzkdv-config-pj9m9\" (UID: \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\") " pod="openstack/ovn-controller-jzkdv-config-pj9m9" Nov 23 16:18:52 crc kubenswrapper[5050]: I1123 16:18:52.320610 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/42805a74-0667-4e80-9cd8-0e60d2d6adeb-var-run\") pod \"ovn-controller-jzkdv-config-pj9m9\" (UID: \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\") " pod="openstack/ovn-controller-jzkdv-config-pj9m9" Nov 23 16:18:52 crc kubenswrapper[5050]: I1123 16:18:52.320943 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/42805a74-0667-4e80-9cd8-0e60d2d6adeb-var-run-ovn\") pod \"ovn-controller-jzkdv-config-pj9m9\" (UID: \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\") " pod="openstack/ovn-controller-jzkdv-config-pj9m9" Nov 23 16:18:52 crc kubenswrapper[5050]: I1123 16:18:52.321457 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/42805a74-0667-4e80-9cd8-0e60d2d6adeb-additional-scripts\") pod \"ovn-controller-jzkdv-config-pj9m9\" (UID: \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\") " pod="openstack/ovn-controller-jzkdv-config-pj9m9" Nov 23 16:18:52 crc kubenswrapper[5050]: I1123 16:18:52.324862 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/42805a74-0667-4e80-9cd8-0e60d2d6adeb-scripts\") pod \"ovn-controller-jzkdv-config-pj9m9\" (UID: \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\") " pod="openstack/ovn-controller-jzkdv-config-pj9m9" Nov 23 16:18:52 crc kubenswrapper[5050]: I1123 16:18:52.339682 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tx6mg\" (UniqueName: \"kubernetes.io/projected/42805a74-0667-4e80-9cd8-0e60d2d6adeb-kube-api-access-tx6mg\") pod \"ovn-controller-jzkdv-config-pj9m9\" (UID: \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\") " pod="openstack/ovn-controller-jzkdv-config-pj9m9" Nov 23 16:18:52 crc kubenswrapper[5050]: I1123 16:18:52.359982 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-jzkdv" Nov 23 16:18:52 crc kubenswrapper[5050]: I1123 16:18:52.468001 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jzkdv-config-pj9m9" Nov 23 16:18:52 crc kubenswrapper[5050]: I1123 16:18:52.621180 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-sync-phxjz"] Nov 23 16:18:52 crc kubenswrapper[5050]: W1123 16:18:52.633616 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1a6211f3_cd96_4278_b870_6e8f17da26dd.slice/crio-bc060c4dd201c9f38fe10fd2ac5b2d0de906047b279abb9fd6f1861deb7c437a WatchSource:0}: Error finding container bc060c4dd201c9f38fe10fd2ac5b2d0de906047b279abb9fd6f1861deb7c437a: Status 404 returned error can't find the container with id bc060c4dd201c9f38fe10fd2ac5b2d0de906047b279abb9fd6f1861deb7c437a Nov 23 16:18:52 crc kubenswrapper[5050]: I1123 16:18:52.757545 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-7xrf5"] Nov 23 16:18:52 crc kubenswrapper[5050]: W1123 16:18:52.775226 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09e7acda_7cac_4ac8_bde7_befc324d955e.slice/crio-a5e21ce30538630b48bcab54798e9d1fea63e0c7cda05345cec77402deb0d4c5 WatchSource:0}: Error finding container a5e21ce30538630b48bcab54798e9d1fea63e0c7cda05345cec77402deb0d4c5: Status 404 returned error can't find the container with id a5e21ce30538630b48bcab54798e9d1fea63e0c7cda05345cec77402deb0d4c5 Nov 23 16:18:53 crc kubenswrapper[5050]: I1123 16:18:53.024882 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-jzkdv-config-pj9m9"] Nov 23 16:18:53 crc kubenswrapper[5050]: W1123 16:18:53.025359 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod42805a74_0667_4e80_9cd8_0e60d2d6adeb.slice/crio-ede49a31ab2e53ffac8d777a0d8391f562c233b1423ebe26d11a580fadefaaf3 WatchSource:0}: Error finding container ede49a31ab2e53ffac8d777a0d8391f562c233b1423ebe26d11a580fadefaaf3: Status 404 returned error can't find the container with id ede49a31ab2e53ffac8d777a0d8391f562c233b1423ebe26d11a580fadefaaf3 Nov 23 16:18:53 crc kubenswrapper[5050]: I1123 16:18:53.257853 5050 generic.go:334] "Generic (PLEG): container finished" podID="1a6211f3-cd96-4278-b870-6e8f17da26dd" containerID="205da7a70d393d0b009fb1cbdf28e73a3558cdd8a0505926985f1b8b7a4a8623" exitCode=0 Nov 23 16:18:53 crc kubenswrapper[5050]: I1123 16:18:53.257930 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-phxjz" event={"ID":"1a6211f3-cd96-4278-b870-6e8f17da26dd","Type":"ContainerDied","Data":"205da7a70d393d0b009fb1cbdf28e73a3558cdd8a0505926985f1b8b7a4a8623"} Nov 23 16:18:53 crc kubenswrapper[5050]: I1123 16:18:53.257954 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-phxjz" event={"ID":"1a6211f3-cd96-4278-b870-6e8f17da26dd","Type":"ContainerStarted","Data":"bc060c4dd201c9f38fe10fd2ac5b2d0de906047b279abb9fd6f1861deb7c437a"} Nov 23 16:18:53 crc kubenswrapper[5050]: I1123 16:18:53.260717 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-7xrf5" event={"ID":"09e7acda-7cac-4ac8-bde7-befc324d955e","Type":"ContainerStarted","Data":"a5e21ce30538630b48bcab54798e9d1fea63e0c7cda05345cec77402deb0d4c5"} Nov 23 16:18:53 crc kubenswrapper[5050]: I1123 16:18:53.262517 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jzkdv-config-pj9m9" event={"ID":"42805a74-0667-4e80-9cd8-0e60d2d6adeb","Type":"ContainerStarted","Data":"ede49a31ab2e53ffac8d777a0d8391f562c233b1423ebe26d11a580fadefaaf3"} Nov 23 16:18:53 crc kubenswrapper[5050]: I1123 16:18:53.548435 5050 scope.go:117] "RemoveContainer" containerID="131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00" Nov 23 16:18:53 crc kubenswrapper[5050]: E1123 16:18:53.549272 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:18:53 crc kubenswrapper[5050]: I1123 16:18:53.582526 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="556f4ebe-433e-4728-a823-cab0ce9e4610" path="/var/lib/kubelet/pods/556f4ebe-433e-4728-a823-cab0ce9e4610/volumes" Nov 23 16:18:54 crc kubenswrapper[5050]: I1123 16:18:54.285927 5050 generic.go:334] "Generic (PLEG): container finished" podID="42805a74-0667-4e80-9cd8-0e60d2d6adeb" containerID="b0bad7799a99735bfbd956d80653de39fdb440cd0c61f2c75de64c0cd74e6481" exitCode=0 Nov 23 16:18:54 crc kubenswrapper[5050]: I1123 16:18:54.286338 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jzkdv-config-pj9m9" event={"ID":"42805a74-0667-4e80-9cd8-0e60d2d6adeb","Type":"ContainerDied","Data":"b0bad7799a99735bfbd956d80653de39fdb440cd0c61f2c75de64c0cd74e6481"} Nov 23 16:18:54 crc kubenswrapper[5050]: I1123 16:18:54.294828 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-phxjz" event={"ID":"1a6211f3-cd96-4278-b870-6e8f17da26dd","Type":"ContainerStarted","Data":"b8d4cd94669a986c203c808a2bb61a252807144e2a985a08aeeb6d41bc4a63b1"} Nov 23 16:18:54 crc kubenswrapper[5050]: I1123 16:18:54.331747 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-db-sync-phxjz" podStartSLOduration=3.331723143 podStartE2EDuration="3.331723143s" podCreationTimestamp="2025-11-23 16:18:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:18:54.3273859 +0000 UTC m=+5829.494382405" watchObservedRunningTime="2025-11-23 16:18:54.331723143 +0000 UTC m=+5829.498719638" Nov 23 16:18:56 crc kubenswrapper[5050]: I1123 16:18:56.177960 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jzkdv-config-pj9m9" Nov 23 16:18:56 crc kubenswrapper[5050]: I1123 16:18:56.276637 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/42805a74-0667-4e80-9cd8-0e60d2d6adeb-var-run-ovn\") pod \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\" (UID: \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\") " Nov 23 16:18:56 crc kubenswrapper[5050]: I1123 16:18:56.276743 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/42805a74-0667-4e80-9cd8-0e60d2d6adeb-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "42805a74-0667-4e80-9cd8-0e60d2d6adeb" (UID: "42805a74-0667-4e80-9cd8-0e60d2d6adeb"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 16:18:56 crc kubenswrapper[5050]: I1123 16:18:56.276771 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/42805a74-0667-4e80-9cd8-0e60d2d6adeb-var-run\") pod \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\" (UID: \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\") " Nov 23 16:18:56 crc kubenswrapper[5050]: I1123 16:18:56.276814 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/42805a74-0667-4e80-9cd8-0e60d2d6adeb-additional-scripts\") pod \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\" (UID: \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\") " Nov 23 16:18:56 crc kubenswrapper[5050]: I1123 16:18:56.276863 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tx6mg\" (UniqueName: \"kubernetes.io/projected/42805a74-0667-4e80-9cd8-0e60d2d6adeb-kube-api-access-tx6mg\") pod \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\" (UID: \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\") " Nov 23 16:18:56 crc kubenswrapper[5050]: I1123 16:18:56.276890 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/42805a74-0667-4e80-9cd8-0e60d2d6adeb-var-log-ovn\") pod \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\" (UID: \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\") " Nov 23 16:18:56 crc kubenswrapper[5050]: I1123 16:18:56.276890 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/42805a74-0667-4e80-9cd8-0e60d2d6adeb-var-run" (OuterVolumeSpecName: "var-run") pod "42805a74-0667-4e80-9cd8-0e60d2d6adeb" (UID: "42805a74-0667-4e80-9cd8-0e60d2d6adeb"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 16:18:56 crc kubenswrapper[5050]: I1123 16:18:56.276966 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/42805a74-0667-4e80-9cd8-0e60d2d6adeb-scripts\") pod \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\" (UID: \"42805a74-0667-4e80-9cd8-0e60d2d6adeb\") " Nov 23 16:18:56 crc kubenswrapper[5050]: I1123 16:18:56.277618 5050 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/42805a74-0667-4e80-9cd8-0e60d2d6adeb-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 23 16:18:56 crc kubenswrapper[5050]: I1123 16:18:56.277648 5050 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/42805a74-0667-4e80-9cd8-0e60d2d6adeb-var-run\") on node \"crc\" DevicePath \"\"" Nov 23 16:18:56 crc kubenswrapper[5050]: I1123 16:18:56.278339 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/42805a74-0667-4e80-9cd8-0e60d2d6adeb-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "42805a74-0667-4e80-9cd8-0e60d2d6adeb" (UID: "42805a74-0667-4e80-9cd8-0e60d2d6adeb"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 16:18:56 crc kubenswrapper[5050]: I1123 16:18:56.278569 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42805a74-0667-4e80-9cd8-0e60d2d6adeb-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "42805a74-0667-4e80-9cd8-0e60d2d6adeb" (UID: "42805a74-0667-4e80-9cd8-0e60d2d6adeb"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:18:56 crc kubenswrapper[5050]: I1123 16:18:56.278681 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42805a74-0667-4e80-9cd8-0e60d2d6adeb-scripts" (OuterVolumeSpecName: "scripts") pod "42805a74-0667-4e80-9cd8-0e60d2d6adeb" (UID: "42805a74-0667-4e80-9cd8-0e60d2d6adeb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:18:56 crc kubenswrapper[5050]: I1123 16:18:56.289893 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42805a74-0667-4e80-9cd8-0e60d2d6adeb-kube-api-access-tx6mg" (OuterVolumeSpecName: "kube-api-access-tx6mg") pod "42805a74-0667-4e80-9cd8-0e60d2d6adeb" (UID: "42805a74-0667-4e80-9cd8-0e60d2d6adeb"). InnerVolumeSpecName "kube-api-access-tx6mg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:18:56 crc kubenswrapper[5050]: I1123 16:18:56.329034 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jzkdv-config-pj9m9" event={"ID":"42805a74-0667-4e80-9cd8-0e60d2d6adeb","Type":"ContainerDied","Data":"ede49a31ab2e53ffac8d777a0d8391f562c233b1423ebe26d11a580fadefaaf3"} Nov 23 16:18:56 crc kubenswrapper[5050]: I1123 16:18:56.329097 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ede49a31ab2e53ffac8d777a0d8391f562c233b1423ebe26d11a580fadefaaf3" Nov 23 16:18:56 crc kubenswrapper[5050]: I1123 16:18:56.329170 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jzkdv-config-pj9m9" Nov 23 16:18:56 crc kubenswrapper[5050]: I1123 16:18:56.333134 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-rl599" event={"ID":"3a52da4e-bd4e-4faa-99d2-944528a2f797","Type":"ContainerStarted","Data":"11c8b60d39e845b4ef58a66fd1941ea194fbb60ea7eb464ab688452596841465"} Nov 23 16:18:56 crc kubenswrapper[5050]: I1123 16:18:56.380140 5050 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/42805a74-0667-4e80-9cd8-0e60d2d6adeb-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:18:56 crc kubenswrapper[5050]: I1123 16:18:56.380183 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tx6mg\" (UniqueName: \"kubernetes.io/projected/42805a74-0667-4e80-9cd8-0e60d2d6adeb-kube-api-access-tx6mg\") on node \"crc\" DevicePath \"\"" Nov 23 16:18:56 crc kubenswrapper[5050]: I1123 16:18:56.380201 5050 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/42805a74-0667-4e80-9cd8-0e60d2d6adeb-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 23 16:18:56 crc kubenswrapper[5050]: I1123 16:18:56.380217 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/42805a74-0667-4e80-9cd8-0e60d2d6adeb-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:18:56 crc kubenswrapper[5050]: E1123 16:18:56.523966 5050 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1a6211f3_cd96_4278_b870_6e8f17da26dd.slice/crio-b8d4cd94669a986c203c808a2bb61a252807144e2a985a08aeeb6d41bc4a63b1.scope\": RecentStats: unable to find data in memory cache]" Nov 23 16:18:57 crc kubenswrapper[5050]: I1123 16:18:57.260920 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-jzkdv-config-pj9m9"] Nov 23 16:18:57 crc kubenswrapper[5050]: I1123 16:18:57.275054 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-jzkdv-config-pj9m9"] Nov 23 16:18:57 crc kubenswrapper[5050]: I1123 16:18:57.346829 5050 generic.go:334] "Generic (PLEG): container finished" podID="1a6211f3-cd96-4278-b870-6e8f17da26dd" containerID="b8d4cd94669a986c203c808a2bb61a252807144e2a985a08aeeb6d41bc4a63b1" exitCode=0 Nov 23 16:18:57 crc kubenswrapper[5050]: I1123 16:18:57.347547 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-phxjz" event={"ID":"1a6211f3-cd96-4278-b870-6e8f17da26dd","Type":"ContainerDied","Data":"b8d4cd94669a986c203c808a2bb61a252807144e2a985a08aeeb6d41bc4a63b1"} Nov 23 16:18:57 crc kubenswrapper[5050]: I1123 16:18:57.561536 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42805a74-0667-4e80-9cd8-0e60d2d6adeb" path="/var/lib/kubelet/pods/42805a74-0667-4e80-9cd8-0e60d2d6adeb/volumes" Nov 23 16:18:58 crc kubenswrapper[5050]: I1123 16:18:58.361516 5050 generic.go:334] "Generic (PLEG): container finished" podID="3a52da4e-bd4e-4faa-99d2-944528a2f797" containerID="11c8b60d39e845b4ef58a66fd1941ea194fbb60ea7eb464ab688452596841465" exitCode=0 Nov 23 16:18:58 crc kubenswrapper[5050]: I1123 16:18:58.361864 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-rl599" event={"ID":"3a52da4e-bd4e-4faa-99d2-944528a2f797","Type":"ContainerDied","Data":"11c8b60d39e845b4ef58a66fd1941ea194fbb60ea7eb464ab688452596841465"} Nov 23 16:18:58 crc kubenswrapper[5050]: I1123 16:18:58.921658 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-phxjz" Nov 23 16:18:59 crc kubenswrapper[5050]: I1123 16:18:59.052411 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/1a6211f3-cd96-4278-b870-6e8f17da26dd-config-data-merged\") pod \"1a6211f3-cd96-4278-b870-6e8f17da26dd\" (UID: \"1a6211f3-cd96-4278-b870-6e8f17da26dd\") " Nov 23 16:18:59 crc kubenswrapper[5050]: I1123 16:18:59.052470 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a6211f3-cd96-4278-b870-6e8f17da26dd-combined-ca-bundle\") pod \"1a6211f3-cd96-4278-b870-6e8f17da26dd\" (UID: \"1a6211f3-cd96-4278-b870-6e8f17da26dd\") " Nov 23 16:18:59 crc kubenswrapper[5050]: I1123 16:18:59.052727 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1a6211f3-cd96-4278-b870-6e8f17da26dd-scripts\") pod \"1a6211f3-cd96-4278-b870-6e8f17da26dd\" (UID: \"1a6211f3-cd96-4278-b870-6e8f17da26dd\") " Nov 23 16:18:59 crc kubenswrapper[5050]: I1123 16:18:59.052759 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a6211f3-cd96-4278-b870-6e8f17da26dd-config-data\") pod \"1a6211f3-cd96-4278-b870-6e8f17da26dd\" (UID: \"1a6211f3-cd96-4278-b870-6e8f17da26dd\") " Nov 23 16:18:59 crc kubenswrapper[5050]: I1123 16:18:59.060117 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a6211f3-cd96-4278-b870-6e8f17da26dd-scripts" (OuterVolumeSpecName: "scripts") pod "1a6211f3-cd96-4278-b870-6e8f17da26dd" (UID: "1a6211f3-cd96-4278-b870-6e8f17da26dd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:18:59 crc kubenswrapper[5050]: I1123 16:18:59.072877 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a6211f3-cd96-4278-b870-6e8f17da26dd-config-data" (OuterVolumeSpecName: "config-data") pod "1a6211f3-cd96-4278-b870-6e8f17da26dd" (UID: "1a6211f3-cd96-4278-b870-6e8f17da26dd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:18:59 crc kubenswrapper[5050]: I1123 16:18:59.099676 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1a6211f3-cd96-4278-b870-6e8f17da26dd-config-data-merged" (OuterVolumeSpecName: "config-data-merged") pod "1a6211f3-cd96-4278-b870-6e8f17da26dd" (UID: "1a6211f3-cd96-4278-b870-6e8f17da26dd"). InnerVolumeSpecName "config-data-merged". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:18:59 crc kubenswrapper[5050]: I1123 16:18:59.103687 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a6211f3-cd96-4278-b870-6e8f17da26dd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1a6211f3-cd96-4278-b870-6e8f17da26dd" (UID: "1a6211f3-cd96-4278-b870-6e8f17da26dd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:18:59 crc kubenswrapper[5050]: I1123 16:18:59.155634 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1a6211f3-cd96-4278-b870-6e8f17da26dd-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:18:59 crc kubenswrapper[5050]: I1123 16:18:59.155675 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a6211f3-cd96-4278-b870-6e8f17da26dd-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:18:59 crc kubenswrapper[5050]: I1123 16:18:59.155688 5050 reconciler_common.go:293] "Volume detached for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/1a6211f3-cd96-4278-b870-6e8f17da26dd-config-data-merged\") on node \"crc\" DevicePath \"\"" Nov 23 16:18:59 crc kubenswrapper[5050]: I1123 16:18:59.155702 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a6211f3-cd96-4278-b870-6e8f17da26dd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:18:59 crc kubenswrapper[5050]: I1123 16:18:59.379811 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-phxjz" event={"ID":"1a6211f3-cd96-4278-b870-6e8f17da26dd","Type":"ContainerDied","Data":"bc060c4dd201c9f38fe10fd2ac5b2d0de906047b279abb9fd6f1861deb7c437a"} Nov 23 16:18:59 crc kubenswrapper[5050]: I1123 16:18:59.379878 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bc060c4dd201c9f38fe10fd2ac5b2d0de906047b279abb9fd6f1861deb7c437a" Nov 23 16:18:59 crc kubenswrapper[5050]: I1123 16:18:59.379971 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-phxjz" Nov 23 16:19:02 crc kubenswrapper[5050]: I1123 16:19:02.585849 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-78c74877d6-ndfk4" Nov 23 16:19:02 crc kubenswrapper[5050]: I1123 16:19:02.951634 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-78c74877d6-ndfk4" Nov 23 16:19:03 crc kubenswrapper[5050]: I1123 16:19:03.428128 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-7xrf5" event={"ID":"09e7acda-7cac-4ac8-bde7-befc324d955e","Type":"ContainerStarted","Data":"0444e7c5fc112447fe3de77c6754552da39f363b52de39888364d1d648108a80"} Nov 23 16:19:04 crc kubenswrapper[5050]: I1123 16:19:04.442162 5050 generic.go:334] "Generic (PLEG): container finished" podID="09e7acda-7cac-4ac8-bde7-befc324d955e" containerID="0444e7c5fc112447fe3de77c6754552da39f363b52de39888364d1d648108a80" exitCode=0 Nov 23 16:19:04 crc kubenswrapper[5050]: I1123 16:19:04.442261 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-7xrf5" event={"ID":"09e7acda-7cac-4ac8-bde7-befc324d955e","Type":"ContainerDied","Data":"0444e7c5fc112447fe3de77c6754552da39f363b52de39888364d1d648108a80"} Nov 23 16:19:04 crc kubenswrapper[5050]: I1123 16:19:04.447697 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-rl599" event={"ID":"3a52da4e-bd4e-4faa-99d2-944528a2f797","Type":"ContainerStarted","Data":"c0f5acff7bae3030140664146ed5cfab07e47575ae9deb12153e2d6d78bb92c9"} Nov 23 16:19:04 crc kubenswrapper[5050]: I1123 16:19:04.447989 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-rsyslog-rl599" Nov 23 16:19:04 crc kubenswrapper[5050]: I1123 16:19:04.508730 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-rsyslog-rl599" podStartSLOduration=2.117638413 podStartE2EDuration="14.508699013s" podCreationTimestamp="2025-11-23 16:18:50 +0000 UTC" firstStartedPulling="2025-11-23 16:18:51.164660823 +0000 UTC m=+5826.331657318" lastFinishedPulling="2025-11-23 16:19:03.555721433 +0000 UTC m=+5838.722717918" observedRunningTime="2025-11-23 16:19:04.493306559 +0000 UTC m=+5839.660303054" watchObservedRunningTime="2025-11-23 16:19:04.508699013 +0000 UTC m=+5839.675695528" Nov 23 16:19:06 crc kubenswrapper[5050]: I1123 16:19:06.484923 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-7xrf5" event={"ID":"09e7acda-7cac-4ac8-bde7-befc324d955e","Type":"ContainerStarted","Data":"8eb9fe5e649dc786ec698cd2ecfeda53c2ff809cb48eade1389fafe19b3351cd"} Nov 23 16:19:06 crc kubenswrapper[5050]: I1123 16:19:06.536707 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-image-upload-59f8cff499-7xrf5" podStartSLOduration=3.712542959 podStartE2EDuration="16.536678403s" podCreationTimestamp="2025-11-23 16:18:50 +0000 UTC" firstStartedPulling="2025-11-23 16:18:52.778151124 +0000 UTC m=+5827.945147609" lastFinishedPulling="2025-11-23 16:19:05.602286518 +0000 UTC m=+5840.769283053" observedRunningTime="2025-11-23 16:19:06.518176101 +0000 UTC m=+5841.685172646" watchObservedRunningTime="2025-11-23 16:19:06.536678403 +0000 UTC m=+5841.703674908" Nov 23 16:19:06 crc kubenswrapper[5050]: I1123 16:19:06.548968 5050 scope.go:117] "RemoveContainer" containerID="131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00" Nov 23 16:19:06 crc kubenswrapper[5050]: E1123 16:19:06.549600 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:19:18 crc kubenswrapper[5050]: I1123 16:19:18.549084 5050 scope.go:117] "RemoveContainer" containerID="131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00" Nov 23 16:19:18 crc kubenswrapper[5050]: E1123 16:19:18.550671 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:19:20 crc kubenswrapper[5050]: I1123 16:19:20.530862 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-rsyslog-rl599" Nov 23 16:19:29 crc kubenswrapper[5050]: I1123 16:19:29.576909 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-7xrf5"] Nov 23 16:19:29 crc kubenswrapper[5050]: I1123 16:19:29.578359 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/octavia-image-upload-59f8cff499-7xrf5" podUID="09e7acda-7cac-4ac8-bde7-befc324d955e" containerName="octavia-amphora-httpd" containerID="cri-o://8eb9fe5e649dc786ec698cd2ecfeda53c2ff809cb48eade1389fafe19b3351cd" gracePeriod=30 Nov 23 16:19:29 crc kubenswrapper[5050]: I1123 16:19:29.818429 5050 generic.go:334] "Generic (PLEG): container finished" podID="09e7acda-7cac-4ac8-bde7-befc324d955e" containerID="8eb9fe5e649dc786ec698cd2ecfeda53c2ff809cb48eade1389fafe19b3351cd" exitCode=0 Nov 23 16:19:29 crc kubenswrapper[5050]: I1123 16:19:29.818544 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-7xrf5" event={"ID":"09e7acda-7cac-4ac8-bde7-befc324d955e","Type":"ContainerDied","Data":"8eb9fe5e649dc786ec698cd2ecfeda53c2ff809cb48eade1389fafe19b3351cd"} Nov 23 16:19:30 crc kubenswrapper[5050]: I1123 16:19:30.270410 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-7xrf5" Nov 23 16:19:30 crc kubenswrapper[5050]: I1123 16:19:30.363402 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/09e7acda-7cac-4ac8-bde7-befc324d955e-httpd-config\") pod \"09e7acda-7cac-4ac8-bde7-befc324d955e\" (UID: \"09e7acda-7cac-4ac8-bde7-befc324d955e\") " Nov 23 16:19:30 crc kubenswrapper[5050]: I1123 16:19:30.363512 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/09e7acda-7cac-4ac8-bde7-befc324d955e-amphora-image\") pod \"09e7acda-7cac-4ac8-bde7-befc324d955e\" (UID: \"09e7acda-7cac-4ac8-bde7-befc324d955e\") " Nov 23 16:19:30 crc kubenswrapper[5050]: I1123 16:19:30.395248 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09e7acda-7cac-4ac8-bde7-befc324d955e-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "09e7acda-7cac-4ac8-bde7-befc324d955e" (UID: "09e7acda-7cac-4ac8-bde7-befc324d955e"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:19:30 crc kubenswrapper[5050]: I1123 16:19:30.458349 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09e7acda-7cac-4ac8-bde7-befc324d955e-amphora-image" (OuterVolumeSpecName: "amphora-image") pod "09e7acda-7cac-4ac8-bde7-befc324d955e" (UID: "09e7acda-7cac-4ac8-bde7-befc324d955e"). InnerVolumeSpecName "amphora-image". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:19:30 crc kubenswrapper[5050]: I1123 16:19:30.465846 5050 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/09e7acda-7cac-4ac8-bde7-befc324d955e-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 23 16:19:30 crc kubenswrapper[5050]: I1123 16:19:30.465891 5050 reconciler_common.go:293] "Volume detached for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/09e7acda-7cac-4ac8-bde7-befc324d955e-amphora-image\") on node \"crc\" DevicePath \"\"" Nov 23 16:19:30 crc kubenswrapper[5050]: I1123 16:19:30.832141 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-7xrf5" event={"ID":"09e7acda-7cac-4ac8-bde7-befc324d955e","Type":"ContainerDied","Data":"a5e21ce30538630b48bcab54798e9d1fea63e0c7cda05345cec77402deb0d4c5"} Nov 23 16:19:30 crc kubenswrapper[5050]: I1123 16:19:30.832197 5050 scope.go:117] "RemoveContainer" containerID="8eb9fe5e649dc786ec698cd2ecfeda53c2ff809cb48eade1389fafe19b3351cd" Nov 23 16:19:30 crc kubenswrapper[5050]: I1123 16:19:30.832196 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-7xrf5" Nov 23 16:19:30 crc kubenswrapper[5050]: I1123 16:19:30.880755 5050 scope.go:117] "RemoveContainer" containerID="0444e7c5fc112447fe3de77c6754552da39f363b52de39888364d1d648108a80" Nov 23 16:19:30 crc kubenswrapper[5050]: I1123 16:19:30.885050 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-7xrf5"] Nov 23 16:19:30 crc kubenswrapper[5050]: I1123 16:19:30.907075 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-7xrf5"] Nov 23 16:19:31 crc kubenswrapper[5050]: I1123 16:19:31.570857 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09e7acda-7cac-4ac8-bde7-befc324d955e" path="/var/lib/kubelet/pods/09e7acda-7cac-4ac8-bde7-befc324d955e/volumes" Nov 23 16:19:33 crc kubenswrapper[5050]: I1123 16:19:33.038717 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-image-upload-59f8cff499-wlqsk"] Nov 23 16:19:33 crc kubenswrapper[5050]: E1123 16:19:33.039177 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09e7acda-7cac-4ac8-bde7-befc324d955e" containerName="init" Nov 23 16:19:33 crc kubenswrapper[5050]: I1123 16:19:33.039193 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="09e7acda-7cac-4ac8-bde7-befc324d955e" containerName="init" Nov 23 16:19:33 crc kubenswrapper[5050]: E1123 16:19:33.039206 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a6211f3-cd96-4278-b870-6e8f17da26dd" containerName="octavia-db-sync" Nov 23 16:19:33 crc kubenswrapper[5050]: I1123 16:19:33.039212 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a6211f3-cd96-4278-b870-6e8f17da26dd" containerName="octavia-db-sync" Nov 23 16:19:33 crc kubenswrapper[5050]: E1123 16:19:33.039220 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09e7acda-7cac-4ac8-bde7-befc324d955e" containerName="octavia-amphora-httpd" Nov 23 16:19:33 crc kubenswrapper[5050]: I1123 16:19:33.039227 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="09e7acda-7cac-4ac8-bde7-befc324d955e" containerName="octavia-amphora-httpd" Nov 23 16:19:33 crc kubenswrapper[5050]: E1123 16:19:33.039238 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42805a74-0667-4e80-9cd8-0e60d2d6adeb" containerName="ovn-config" Nov 23 16:19:33 crc kubenswrapper[5050]: I1123 16:19:33.039244 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="42805a74-0667-4e80-9cd8-0e60d2d6adeb" containerName="ovn-config" Nov 23 16:19:33 crc kubenswrapper[5050]: E1123 16:19:33.039259 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a6211f3-cd96-4278-b870-6e8f17da26dd" containerName="init" Nov 23 16:19:33 crc kubenswrapper[5050]: I1123 16:19:33.039267 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a6211f3-cd96-4278-b870-6e8f17da26dd" containerName="init" Nov 23 16:19:33 crc kubenswrapper[5050]: I1123 16:19:33.039528 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a6211f3-cd96-4278-b870-6e8f17da26dd" containerName="octavia-db-sync" Nov 23 16:19:33 crc kubenswrapper[5050]: I1123 16:19:33.039559 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="42805a74-0667-4e80-9cd8-0e60d2d6adeb" containerName="ovn-config" Nov 23 16:19:33 crc kubenswrapper[5050]: I1123 16:19:33.039577 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="09e7acda-7cac-4ac8-bde7-befc324d955e" containerName="octavia-amphora-httpd" Nov 23 16:19:33 crc kubenswrapper[5050]: I1123 16:19:33.040698 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-wlqsk" Nov 23 16:19:33 crc kubenswrapper[5050]: I1123 16:19:33.044116 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-config-data" Nov 23 16:19:33 crc kubenswrapper[5050]: I1123 16:19:33.059594 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-wlqsk"] Nov 23 16:19:33 crc kubenswrapper[5050]: I1123 16:19:33.228662 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/33b0e351-a0e3-4bae-abe7-080ac3620f92-amphora-image\") pod \"octavia-image-upload-59f8cff499-wlqsk\" (UID: \"33b0e351-a0e3-4bae-abe7-080ac3620f92\") " pod="openstack/octavia-image-upload-59f8cff499-wlqsk" Nov 23 16:19:33 crc kubenswrapper[5050]: I1123 16:19:33.229068 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/33b0e351-a0e3-4bae-abe7-080ac3620f92-httpd-config\") pod \"octavia-image-upload-59f8cff499-wlqsk\" (UID: \"33b0e351-a0e3-4bae-abe7-080ac3620f92\") " pod="openstack/octavia-image-upload-59f8cff499-wlqsk" Nov 23 16:19:33 crc kubenswrapper[5050]: I1123 16:19:33.331982 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/33b0e351-a0e3-4bae-abe7-080ac3620f92-amphora-image\") pod \"octavia-image-upload-59f8cff499-wlqsk\" (UID: \"33b0e351-a0e3-4bae-abe7-080ac3620f92\") " pod="openstack/octavia-image-upload-59f8cff499-wlqsk" Nov 23 16:19:33 crc kubenswrapper[5050]: I1123 16:19:33.332105 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/33b0e351-a0e3-4bae-abe7-080ac3620f92-httpd-config\") pod \"octavia-image-upload-59f8cff499-wlqsk\" (UID: \"33b0e351-a0e3-4bae-abe7-080ac3620f92\") " pod="openstack/octavia-image-upload-59f8cff499-wlqsk" Nov 23 16:19:33 crc kubenswrapper[5050]: I1123 16:19:33.332747 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/33b0e351-a0e3-4bae-abe7-080ac3620f92-amphora-image\") pod \"octavia-image-upload-59f8cff499-wlqsk\" (UID: \"33b0e351-a0e3-4bae-abe7-080ac3620f92\") " pod="openstack/octavia-image-upload-59f8cff499-wlqsk" Nov 23 16:19:33 crc kubenswrapper[5050]: I1123 16:19:33.338855 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/33b0e351-a0e3-4bae-abe7-080ac3620f92-httpd-config\") pod \"octavia-image-upload-59f8cff499-wlqsk\" (UID: \"33b0e351-a0e3-4bae-abe7-080ac3620f92\") " pod="openstack/octavia-image-upload-59f8cff499-wlqsk" Nov 23 16:19:33 crc kubenswrapper[5050]: I1123 16:19:33.364765 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-wlqsk" Nov 23 16:19:33 crc kubenswrapper[5050]: I1123 16:19:33.550276 5050 scope.go:117] "RemoveContainer" containerID="131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00" Nov 23 16:19:33 crc kubenswrapper[5050]: E1123 16:19:33.551204 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:19:33 crc kubenswrapper[5050]: I1123 16:19:33.889545 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-wlqsk"] Nov 23 16:19:34 crc kubenswrapper[5050]: I1123 16:19:34.894800 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-wlqsk" event={"ID":"33b0e351-a0e3-4bae-abe7-080ac3620f92","Type":"ContainerStarted","Data":"94459242212bdca668355f15b20f133ec2d04b275398295cd538789e01792471"} Nov 23 16:19:34 crc kubenswrapper[5050]: I1123 16:19:34.894896 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-wlqsk" event={"ID":"33b0e351-a0e3-4bae-abe7-080ac3620f92","Type":"ContainerStarted","Data":"43770aa159badbc80da20970638754703d3055971ab8099adca0279aab0b9a2d"} Nov 23 16:19:35 crc kubenswrapper[5050]: I1123 16:19:35.906943 5050 generic.go:334] "Generic (PLEG): container finished" podID="33b0e351-a0e3-4bae-abe7-080ac3620f92" containerID="94459242212bdca668355f15b20f133ec2d04b275398295cd538789e01792471" exitCode=0 Nov 23 16:19:35 crc kubenswrapper[5050]: I1123 16:19:35.907023 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-wlqsk" event={"ID":"33b0e351-a0e3-4bae-abe7-080ac3620f92","Type":"ContainerDied","Data":"94459242212bdca668355f15b20f133ec2d04b275398295cd538789e01792471"} Nov 23 16:19:37 crc kubenswrapper[5050]: I1123 16:19:37.938042 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-wlqsk" event={"ID":"33b0e351-a0e3-4bae-abe7-080ac3620f92","Type":"ContainerStarted","Data":"ad6c127c502c4dfc1c8852901661978c6ff93588c2c486a9bdce7121a288efce"} Nov 23 16:19:45 crc kubenswrapper[5050]: I1123 16:19:45.573654 5050 scope.go:117] "RemoveContainer" containerID="131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00" Nov 23 16:19:45 crc kubenswrapper[5050]: E1123 16:19:45.575200 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:19:46 crc kubenswrapper[5050]: I1123 16:19:46.903167 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-image-upload-59f8cff499-wlqsk" podStartSLOduration=10.198164968 podStartE2EDuration="13.903139619s" podCreationTimestamp="2025-11-23 16:19:33 +0000 UTC" firstStartedPulling="2025-11-23 16:19:33.898176774 +0000 UTC m=+5869.065173279" lastFinishedPulling="2025-11-23 16:19:37.603151445 +0000 UTC m=+5872.770147930" observedRunningTime="2025-11-23 16:19:37.957033087 +0000 UTC m=+5873.124029572" watchObservedRunningTime="2025-11-23 16:19:46.903139619 +0000 UTC m=+5882.070136104" Nov 23 16:19:46 crc kubenswrapper[5050]: I1123 16:19:46.915114 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-b25tk"] Nov 23 16:19:46 crc kubenswrapper[5050]: I1123 16:19:46.920576 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b25tk" Nov 23 16:19:46 crc kubenswrapper[5050]: I1123 16:19:46.924896 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b25tk"] Nov 23 16:19:46 crc kubenswrapper[5050]: I1123 16:19:46.991902 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7345504-9bde-40fb-b98e-9939c35fc411-utilities\") pod \"community-operators-b25tk\" (UID: \"f7345504-9bde-40fb-b98e-9939c35fc411\") " pod="openshift-marketplace/community-operators-b25tk" Nov 23 16:19:46 crc kubenswrapper[5050]: I1123 16:19:46.992061 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pbdm8\" (UniqueName: \"kubernetes.io/projected/f7345504-9bde-40fb-b98e-9939c35fc411-kube-api-access-pbdm8\") pod \"community-operators-b25tk\" (UID: \"f7345504-9bde-40fb-b98e-9939c35fc411\") " pod="openshift-marketplace/community-operators-b25tk" Nov 23 16:19:46 crc kubenswrapper[5050]: I1123 16:19:46.992158 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7345504-9bde-40fb-b98e-9939c35fc411-catalog-content\") pod \"community-operators-b25tk\" (UID: \"f7345504-9bde-40fb-b98e-9939c35fc411\") " pod="openshift-marketplace/community-operators-b25tk" Nov 23 16:19:47 crc kubenswrapper[5050]: I1123 16:19:47.094357 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7345504-9bde-40fb-b98e-9939c35fc411-utilities\") pod \"community-operators-b25tk\" (UID: \"f7345504-9bde-40fb-b98e-9939c35fc411\") " pod="openshift-marketplace/community-operators-b25tk" Nov 23 16:19:47 crc kubenswrapper[5050]: I1123 16:19:47.094594 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pbdm8\" (UniqueName: \"kubernetes.io/projected/f7345504-9bde-40fb-b98e-9939c35fc411-kube-api-access-pbdm8\") pod \"community-operators-b25tk\" (UID: \"f7345504-9bde-40fb-b98e-9939c35fc411\") " pod="openshift-marketplace/community-operators-b25tk" Nov 23 16:19:47 crc kubenswrapper[5050]: I1123 16:19:47.094680 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7345504-9bde-40fb-b98e-9939c35fc411-catalog-content\") pod \"community-operators-b25tk\" (UID: \"f7345504-9bde-40fb-b98e-9939c35fc411\") " pod="openshift-marketplace/community-operators-b25tk" Nov 23 16:19:47 crc kubenswrapper[5050]: I1123 16:19:47.095055 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7345504-9bde-40fb-b98e-9939c35fc411-utilities\") pod \"community-operators-b25tk\" (UID: \"f7345504-9bde-40fb-b98e-9939c35fc411\") " pod="openshift-marketplace/community-operators-b25tk" Nov 23 16:19:47 crc kubenswrapper[5050]: I1123 16:19:47.095647 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7345504-9bde-40fb-b98e-9939c35fc411-catalog-content\") pod \"community-operators-b25tk\" (UID: \"f7345504-9bde-40fb-b98e-9939c35fc411\") " pod="openshift-marketplace/community-operators-b25tk" Nov 23 16:19:47 crc kubenswrapper[5050]: I1123 16:19:47.125809 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pbdm8\" (UniqueName: \"kubernetes.io/projected/f7345504-9bde-40fb-b98e-9939c35fc411-kube-api-access-pbdm8\") pod \"community-operators-b25tk\" (UID: \"f7345504-9bde-40fb-b98e-9939c35fc411\") " pod="openshift-marketplace/community-operators-b25tk" Nov 23 16:19:47 crc kubenswrapper[5050]: I1123 16:19:47.249840 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b25tk" Nov 23 16:19:47 crc kubenswrapper[5050]: I1123 16:19:47.652053 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b25tk"] Nov 23 16:19:48 crc kubenswrapper[5050]: I1123 16:19:48.073200 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b25tk" event={"ID":"f7345504-9bde-40fb-b98e-9939c35fc411","Type":"ContainerStarted","Data":"62585c711bff5b8ca1220ef0c9b7b21a6a55d97e47fc2c899839e2c92f0e2407"} Nov 23 16:19:49 crc kubenswrapper[5050]: I1123 16:19:49.087172 5050 generic.go:334] "Generic (PLEG): container finished" podID="f7345504-9bde-40fb-b98e-9939c35fc411" containerID="dd9b7734ab6b9f3673fd13e930e938f34b91641d71ea9c5834427236393cab61" exitCode=0 Nov 23 16:19:49 crc kubenswrapper[5050]: I1123 16:19:49.087298 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b25tk" event={"ID":"f7345504-9bde-40fb-b98e-9939c35fc411","Type":"ContainerDied","Data":"dd9b7734ab6b9f3673fd13e930e938f34b91641d71ea9c5834427236393cab61"} Nov 23 16:19:50 crc kubenswrapper[5050]: I1123 16:19:50.099787 5050 generic.go:334] "Generic (PLEG): container finished" podID="f7345504-9bde-40fb-b98e-9939c35fc411" containerID="c49ba611418d8c675fdd2119dea20ee597de1ddfaf5c57ddaf45f1f9e2997c2f" exitCode=0 Nov 23 16:19:50 crc kubenswrapper[5050]: I1123 16:19:50.099886 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b25tk" event={"ID":"f7345504-9bde-40fb-b98e-9939c35fc411","Type":"ContainerDied","Data":"c49ba611418d8c675fdd2119dea20ee597de1ddfaf5c57ddaf45f1f9e2997c2f"} Nov 23 16:19:52 crc kubenswrapper[5050]: I1123 16:19:52.129600 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b25tk" event={"ID":"f7345504-9bde-40fb-b98e-9939c35fc411","Type":"ContainerStarted","Data":"2860bef6c2b9f5a66ad9ae0b92cc8097ee196a6a21302ad08f9f316797a4ee40"} Nov 23 16:19:52 crc kubenswrapper[5050]: I1123 16:19:52.162983 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-b25tk" podStartSLOduration=3.7164549190000002 podStartE2EDuration="6.162957405s" podCreationTimestamp="2025-11-23 16:19:46 +0000 UTC" firstStartedPulling="2025-11-23 16:19:49.09176017 +0000 UTC m=+5884.258756695" lastFinishedPulling="2025-11-23 16:19:51.538262696 +0000 UTC m=+5886.705259181" observedRunningTime="2025-11-23 16:19:52.1535325 +0000 UTC m=+5887.320528985" watchObservedRunningTime="2025-11-23 16:19:52.162957405 +0000 UTC m=+5887.329953890" Nov 23 16:19:55 crc kubenswrapper[5050]: I1123 16:19:55.360889 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-healthmanager-z4gjs"] Nov 23 16:19:55 crc kubenswrapper[5050]: I1123 16:19:55.363267 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-healthmanager-z4gjs" Nov 23 16:19:55 crc kubenswrapper[5050]: I1123 16:19:55.367169 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-healthmanager-config-data" Nov 23 16:19:55 crc kubenswrapper[5050]: I1123 16:19:55.367384 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-certs-secret" Nov 23 16:19:55 crc kubenswrapper[5050]: I1123 16:19:55.375894 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-healthmanager-scripts" Nov 23 16:19:55 crc kubenswrapper[5050]: I1123 16:19:55.385841 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-healthmanager-z4gjs"] Nov 23 16:19:55 crc kubenswrapper[5050]: I1123 16:19:55.413342 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/4123706e-1979-4f48-96d7-bd5b53ebd04c-hm-ports\") pod \"octavia-healthmanager-z4gjs\" (UID: \"4123706e-1979-4f48-96d7-bd5b53ebd04c\") " pod="openstack/octavia-healthmanager-z4gjs" Nov 23 16:19:55 crc kubenswrapper[5050]: I1123 16:19:55.413587 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4123706e-1979-4f48-96d7-bd5b53ebd04c-scripts\") pod \"octavia-healthmanager-z4gjs\" (UID: \"4123706e-1979-4f48-96d7-bd5b53ebd04c\") " pod="openstack/octavia-healthmanager-z4gjs" Nov 23 16:19:55 crc kubenswrapper[5050]: I1123 16:19:55.413684 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/4123706e-1979-4f48-96d7-bd5b53ebd04c-config-data-merged\") pod \"octavia-healthmanager-z4gjs\" (UID: \"4123706e-1979-4f48-96d7-bd5b53ebd04c\") " pod="openstack/octavia-healthmanager-z4gjs" Nov 23 16:19:55 crc kubenswrapper[5050]: I1123 16:19:55.413742 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4123706e-1979-4f48-96d7-bd5b53ebd04c-combined-ca-bundle\") pod \"octavia-healthmanager-z4gjs\" (UID: \"4123706e-1979-4f48-96d7-bd5b53ebd04c\") " pod="openstack/octavia-healthmanager-z4gjs" Nov 23 16:19:55 crc kubenswrapper[5050]: I1123 16:19:55.413787 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4123706e-1979-4f48-96d7-bd5b53ebd04c-config-data\") pod \"octavia-healthmanager-z4gjs\" (UID: \"4123706e-1979-4f48-96d7-bd5b53ebd04c\") " pod="openstack/octavia-healthmanager-z4gjs" Nov 23 16:19:55 crc kubenswrapper[5050]: I1123 16:19:55.414079 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/4123706e-1979-4f48-96d7-bd5b53ebd04c-amphora-certs\") pod \"octavia-healthmanager-z4gjs\" (UID: \"4123706e-1979-4f48-96d7-bd5b53ebd04c\") " pod="openstack/octavia-healthmanager-z4gjs" Nov 23 16:19:55 crc kubenswrapper[5050]: I1123 16:19:55.516424 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4123706e-1979-4f48-96d7-bd5b53ebd04c-scripts\") pod \"octavia-healthmanager-z4gjs\" (UID: \"4123706e-1979-4f48-96d7-bd5b53ebd04c\") " pod="openstack/octavia-healthmanager-z4gjs" Nov 23 16:19:55 crc kubenswrapper[5050]: I1123 16:19:55.516592 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/4123706e-1979-4f48-96d7-bd5b53ebd04c-config-data-merged\") pod \"octavia-healthmanager-z4gjs\" (UID: \"4123706e-1979-4f48-96d7-bd5b53ebd04c\") " pod="openstack/octavia-healthmanager-z4gjs" Nov 23 16:19:55 crc kubenswrapper[5050]: I1123 16:19:55.516668 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4123706e-1979-4f48-96d7-bd5b53ebd04c-combined-ca-bundle\") pod \"octavia-healthmanager-z4gjs\" (UID: \"4123706e-1979-4f48-96d7-bd5b53ebd04c\") " pod="openstack/octavia-healthmanager-z4gjs" Nov 23 16:19:55 crc kubenswrapper[5050]: I1123 16:19:55.516696 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4123706e-1979-4f48-96d7-bd5b53ebd04c-config-data\") pod \"octavia-healthmanager-z4gjs\" (UID: \"4123706e-1979-4f48-96d7-bd5b53ebd04c\") " pod="openstack/octavia-healthmanager-z4gjs" Nov 23 16:19:55 crc kubenswrapper[5050]: I1123 16:19:55.516991 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/4123706e-1979-4f48-96d7-bd5b53ebd04c-amphora-certs\") pod \"octavia-healthmanager-z4gjs\" (UID: \"4123706e-1979-4f48-96d7-bd5b53ebd04c\") " pod="openstack/octavia-healthmanager-z4gjs" Nov 23 16:19:55 crc kubenswrapper[5050]: I1123 16:19:55.517264 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/4123706e-1979-4f48-96d7-bd5b53ebd04c-hm-ports\") pod \"octavia-healthmanager-z4gjs\" (UID: \"4123706e-1979-4f48-96d7-bd5b53ebd04c\") " pod="openstack/octavia-healthmanager-z4gjs" Nov 23 16:19:55 crc kubenswrapper[5050]: I1123 16:19:55.517788 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/4123706e-1979-4f48-96d7-bd5b53ebd04c-config-data-merged\") pod \"octavia-healthmanager-z4gjs\" (UID: \"4123706e-1979-4f48-96d7-bd5b53ebd04c\") " pod="openstack/octavia-healthmanager-z4gjs" Nov 23 16:19:55 crc kubenswrapper[5050]: I1123 16:19:55.518492 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/4123706e-1979-4f48-96d7-bd5b53ebd04c-hm-ports\") pod \"octavia-healthmanager-z4gjs\" (UID: \"4123706e-1979-4f48-96d7-bd5b53ebd04c\") " pod="openstack/octavia-healthmanager-z4gjs" Nov 23 16:19:55 crc kubenswrapper[5050]: I1123 16:19:55.524098 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4123706e-1979-4f48-96d7-bd5b53ebd04c-combined-ca-bundle\") pod \"octavia-healthmanager-z4gjs\" (UID: \"4123706e-1979-4f48-96d7-bd5b53ebd04c\") " pod="openstack/octavia-healthmanager-z4gjs" Nov 23 16:19:55 crc kubenswrapper[5050]: I1123 16:19:55.524477 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/4123706e-1979-4f48-96d7-bd5b53ebd04c-amphora-certs\") pod \"octavia-healthmanager-z4gjs\" (UID: \"4123706e-1979-4f48-96d7-bd5b53ebd04c\") " pod="openstack/octavia-healthmanager-z4gjs" Nov 23 16:19:55 crc kubenswrapper[5050]: I1123 16:19:55.527246 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4123706e-1979-4f48-96d7-bd5b53ebd04c-scripts\") pod \"octavia-healthmanager-z4gjs\" (UID: \"4123706e-1979-4f48-96d7-bd5b53ebd04c\") " pod="openstack/octavia-healthmanager-z4gjs" Nov 23 16:19:55 crc kubenswrapper[5050]: I1123 16:19:55.540399 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4123706e-1979-4f48-96d7-bd5b53ebd04c-config-data\") pod \"octavia-healthmanager-z4gjs\" (UID: \"4123706e-1979-4f48-96d7-bd5b53ebd04c\") " pod="openstack/octavia-healthmanager-z4gjs" Nov 23 16:19:55 crc kubenswrapper[5050]: I1123 16:19:55.701440 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-healthmanager-z4gjs" Nov 23 16:19:56 crc kubenswrapper[5050]: I1123 16:19:56.485890 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-healthmanager-z4gjs"] Nov 23 16:19:56 crc kubenswrapper[5050]: W1123 16:19:56.493198 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4123706e_1979_4f48_96d7_bd5b53ebd04c.slice/crio-d6c7dd28cda19844ccfa4eae2e18f158c922f185cd35846ab7aa5220a62e03ad WatchSource:0}: Error finding container d6c7dd28cda19844ccfa4eae2e18f158c922f185cd35846ab7aa5220a62e03ad: Status 404 returned error can't find the container with id d6c7dd28cda19844ccfa4eae2e18f158c922f185cd35846ab7aa5220a62e03ad Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.212931 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-z4gjs" event={"ID":"4123706e-1979-4f48-96d7-bd5b53ebd04c","Type":"ContainerStarted","Data":"706fc87c9c720f1614330fe579f43b57bb905941800b604c979e3e143b2f3e8c"} Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.213606 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-z4gjs" event={"ID":"4123706e-1979-4f48-96d7-bd5b53ebd04c","Type":"ContainerStarted","Data":"d6c7dd28cda19844ccfa4eae2e18f158c922f185cd35846ab7aa5220a62e03ad"} Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.250241 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-b25tk" Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.250715 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-b25tk" Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.314555 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-b25tk" Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.393764 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-housekeeping-96ckg"] Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.396216 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-housekeeping-96ckg" Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.401873 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-housekeeping-scripts" Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.402175 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-housekeeping-config-data" Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.411760 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-housekeeping-96ckg"] Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.471379 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/31208d48-e815-461a-b520-436b71bc90ce-scripts\") pod \"octavia-housekeeping-96ckg\" (UID: \"31208d48-e815-461a-b520-436b71bc90ce\") " pod="openstack/octavia-housekeeping-96ckg" Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.471489 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31208d48-e815-461a-b520-436b71bc90ce-combined-ca-bundle\") pod \"octavia-housekeeping-96ckg\" (UID: \"31208d48-e815-461a-b520-436b71bc90ce\") " pod="openstack/octavia-housekeeping-96ckg" Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.471518 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/31208d48-e815-461a-b520-436b71bc90ce-config-data-merged\") pod \"octavia-housekeeping-96ckg\" (UID: \"31208d48-e815-461a-b520-436b71bc90ce\") " pod="openstack/octavia-housekeeping-96ckg" Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.471547 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/31208d48-e815-461a-b520-436b71bc90ce-amphora-certs\") pod \"octavia-housekeeping-96ckg\" (UID: \"31208d48-e815-461a-b520-436b71bc90ce\") " pod="openstack/octavia-housekeeping-96ckg" Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.471767 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/31208d48-e815-461a-b520-436b71bc90ce-hm-ports\") pod \"octavia-housekeeping-96ckg\" (UID: \"31208d48-e815-461a-b520-436b71bc90ce\") " pod="openstack/octavia-housekeeping-96ckg" Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.471939 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31208d48-e815-461a-b520-436b71bc90ce-config-data\") pod \"octavia-housekeeping-96ckg\" (UID: \"31208d48-e815-461a-b520-436b71bc90ce\") " pod="openstack/octavia-housekeeping-96ckg" Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.573564 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31208d48-e815-461a-b520-436b71bc90ce-config-data\") pod \"octavia-housekeeping-96ckg\" (UID: \"31208d48-e815-461a-b520-436b71bc90ce\") " pod="openstack/octavia-housekeeping-96ckg" Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.573706 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/31208d48-e815-461a-b520-436b71bc90ce-scripts\") pod \"octavia-housekeeping-96ckg\" (UID: \"31208d48-e815-461a-b520-436b71bc90ce\") " pod="openstack/octavia-housekeeping-96ckg" Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.573821 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31208d48-e815-461a-b520-436b71bc90ce-combined-ca-bundle\") pod \"octavia-housekeeping-96ckg\" (UID: \"31208d48-e815-461a-b520-436b71bc90ce\") " pod="openstack/octavia-housekeeping-96ckg" Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.573852 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/31208d48-e815-461a-b520-436b71bc90ce-config-data-merged\") pod \"octavia-housekeeping-96ckg\" (UID: \"31208d48-e815-461a-b520-436b71bc90ce\") " pod="openstack/octavia-housekeeping-96ckg" Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.573887 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/31208d48-e815-461a-b520-436b71bc90ce-amphora-certs\") pod \"octavia-housekeeping-96ckg\" (UID: \"31208d48-e815-461a-b520-436b71bc90ce\") " pod="openstack/octavia-housekeeping-96ckg" Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.573950 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/31208d48-e815-461a-b520-436b71bc90ce-hm-ports\") pod \"octavia-housekeeping-96ckg\" (UID: \"31208d48-e815-461a-b520-436b71bc90ce\") " pod="openstack/octavia-housekeeping-96ckg" Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.576084 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/31208d48-e815-461a-b520-436b71bc90ce-config-data-merged\") pod \"octavia-housekeeping-96ckg\" (UID: \"31208d48-e815-461a-b520-436b71bc90ce\") " pod="openstack/octavia-housekeeping-96ckg" Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.576748 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/31208d48-e815-461a-b520-436b71bc90ce-hm-ports\") pod \"octavia-housekeeping-96ckg\" (UID: \"31208d48-e815-461a-b520-436b71bc90ce\") " pod="openstack/octavia-housekeeping-96ckg" Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.579832 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31208d48-e815-461a-b520-436b71bc90ce-config-data\") pod \"octavia-housekeeping-96ckg\" (UID: \"31208d48-e815-461a-b520-436b71bc90ce\") " pod="openstack/octavia-housekeeping-96ckg" Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.581051 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/31208d48-e815-461a-b520-436b71bc90ce-scripts\") pod \"octavia-housekeeping-96ckg\" (UID: \"31208d48-e815-461a-b520-436b71bc90ce\") " pod="openstack/octavia-housekeeping-96ckg" Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.581941 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/31208d48-e815-461a-b520-436b71bc90ce-amphora-certs\") pod \"octavia-housekeeping-96ckg\" (UID: \"31208d48-e815-461a-b520-436b71bc90ce\") " pod="openstack/octavia-housekeeping-96ckg" Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.587588 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31208d48-e815-461a-b520-436b71bc90ce-combined-ca-bundle\") pod \"octavia-housekeeping-96ckg\" (UID: \"31208d48-e815-461a-b520-436b71bc90ce\") " pod="openstack/octavia-housekeeping-96ckg" Nov 23 16:19:57 crc kubenswrapper[5050]: I1123 16:19:57.725715 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-housekeeping-96ckg" Nov 23 16:19:58 crc kubenswrapper[5050]: I1123 16:19:58.298347 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-housekeeping-96ckg"] Nov 23 16:19:58 crc kubenswrapper[5050]: I1123 16:19:58.301468 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-b25tk" Nov 23 16:19:58 crc kubenswrapper[5050]: I1123 16:19:58.362766 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b25tk"] Nov 23 16:19:58 crc kubenswrapper[5050]: I1123 16:19:58.577182 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-worker-gtlmz"] Nov 23 16:19:58 crc kubenswrapper[5050]: I1123 16:19:58.593135 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-worker-gtlmz" Nov 23 16:19:58 crc kubenswrapper[5050]: I1123 16:19:58.597120 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-worker-scripts" Nov 23 16:19:58 crc kubenswrapper[5050]: I1123 16:19:58.598034 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-worker-config-data" Nov 23 16:19:58 crc kubenswrapper[5050]: I1123 16:19:58.604566 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-worker-gtlmz"] Nov 23 16:19:58 crc kubenswrapper[5050]: I1123 16:19:58.608631 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d-config-data\") pod \"octavia-worker-gtlmz\" (UID: \"8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d\") " pod="openstack/octavia-worker-gtlmz" Nov 23 16:19:58 crc kubenswrapper[5050]: I1123 16:19:58.608776 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d-combined-ca-bundle\") pod \"octavia-worker-gtlmz\" (UID: \"8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d\") " pod="openstack/octavia-worker-gtlmz" Nov 23 16:19:58 crc kubenswrapper[5050]: I1123 16:19:58.609178 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d-hm-ports\") pod \"octavia-worker-gtlmz\" (UID: \"8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d\") " pod="openstack/octavia-worker-gtlmz" Nov 23 16:19:58 crc kubenswrapper[5050]: I1123 16:19:58.609267 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d-config-data-merged\") pod \"octavia-worker-gtlmz\" (UID: \"8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d\") " pod="openstack/octavia-worker-gtlmz" Nov 23 16:19:58 crc kubenswrapper[5050]: I1123 16:19:58.609364 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d-amphora-certs\") pod \"octavia-worker-gtlmz\" (UID: \"8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d\") " pod="openstack/octavia-worker-gtlmz" Nov 23 16:19:58 crc kubenswrapper[5050]: I1123 16:19:58.609426 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d-scripts\") pod \"octavia-worker-gtlmz\" (UID: \"8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d\") " pod="openstack/octavia-worker-gtlmz" Nov 23 16:19:58 crc kubenswrapper[5050]: I1123 16:19:58.729928 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d-hm-ports\") pod \"octavia-worker-gtlmz\" (UID: \"8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d\") " pod="openstack/octavia-worker-gtlmz" Nov 23 16:19:58 crc kubenswrapper[5050]: I1123 16:19:58.730112 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d-config-data-merged\") pod \"octavia-worker-gtlmz\" (UID: \"8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d\") " pod="openstack/octavia-worker-gtlmz" Nov 23 16:19:58 crc kubenswrapper[5050]: I1123 16:19:58.730177 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d-amphora-certs\") pod \"octavia-worker-gtlmz\" (UID: \"8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d\") " pod="openstack/octavia-worker-gtlmz" Nov 23 16:19:58 crc kubenswrapper[5050]: I1123 16:19:58.730206 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d-scripts\") pod \"octavia-worker-gtlmz\" (UID: \"8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d\") " pod="openstack/octavia-worker-gtlmz" Nov 23 16:19:58 crc kubenswrapper[5050]: I1123 16:19:58.730316 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d-config-data\") pod \"octavia-worker-gtlmz\" (UID: \"8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d\") " pod="openstack/octavia-worker-gtlmz" Nov 23 16:19:58 crc kubenswrapper[5050]: I1123 16:19:58.730371 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d-combined-ca-bundle\") pod \"octavia-worker-gtlmz\" (UID: \"8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d\") " pod="openstack/octavia-worker-gtlmz" Nov 23 16:19:58 crc kubenswrapper[5050]: I1123 16:19:58.731757 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d-config-data-merged\") pod \"octavia-worker-gtlmz\" (UID: \"8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d\") " pod="openstack/octavia-worker-gtlmz" Nov 23 16:19:58 crc kubenswrapper[5050]: I1123 16:19:58.731793 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d-hm-ports\") pod \"octavia-worker-gtlmz\" (UID: \"8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d\") " pod="openstack/octavia-worker-gtlmz" Nov 23 16:19:58 crc kubenswrapper[5050]: I1123 16:19:58.737580 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d-combined-ca-bundle\") pod \"octavia-worker-gtlmz\" (UID: \"8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d\") " pod="openstack/octavia-worker-gtlmz" Nov 23 16:19:58 crc kubenswrapper[5050]: I1123 16:19:58.738699 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d-config-data\") pod \"octavia-worker-gtlmz\" (UID: \"8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d\") " pod="openstack/octavia-worker-gtlmz" Nov 23 16:19:58 crc kubenswrapper[5050]: I1123 16:19:58.741898 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d-amphora-certs\") pod \"octavia-worker-gtlmz\" (UID: \"8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d\") " pod="openstack/octavia-worker-gtlmz" Nov 23 16:19:58 crc kubenswrapper[5050]: I1123 16:19:58.752154 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d-scripts\") pod \"octavia-worker-gtlmz\" (UID: \"8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d\") " pod="openstack/octavia-worker-gtlmz" Nov 23 16:19:58 crc kubenswrapper[5050]: I1123 16:19:58.946524 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-worker-gtlmz" Nov 23 16:19:59 crc kubenswrapper[5050]: I1123 16:19:59.242785 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-96ckg" event={"ID":"31208d48-e815-461a-b520-436b71bc90ce","Type":"ContainerStarted","Data":"7dbfc398e8509852d92bc6e00dd5e2d885fac371131d6f3855ec4825f54dcd52"} Nov 23 16:19:59 crc kubenswrapper[5050]: I1123 16:19:59.246988 5050 generic.go:334] "Generic (PLEG): container finished" podID="4123706e-1979-4f48-96d7-bd5b53ebd04c" containerID="706fc87c9c720f1614330fe579f43b57bb905941800b604c979e3e143b2f3e8c" exitCode=0 Nov 23 16:19:59 crc kubenswrapper[5050]: I1123 16:19:59.247020 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-z4gjs" event={"ID":"4123706e-1979-4f48-96d7-bd5b53ebd04c","Type":"ContainerDied","Data":"706fc87c9c720f1614330fe579f43b57bb905941800b604c979e3e143b2f3e8c"} Nov 23 16:19:59 crc kubenswrapper[5050]: W1123 16:19:59.564557 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8cf1b9f8_28c6_4b6e_b268_cfe4dee35d9d.slice/crio-d4c96faadbe674ce39beeaf5ee9222f0e32485012491f975a6d0a4f7a21c1d7b WatchSource:0}: Error finding container d4c96faadbe674ce39beeaf5ee9222f0e32485012491f975a6d0a4f7a21c1d7b: Status 404 returned error can't find the container with id d4c96faadbe674ce39beeaf5ee9222f0e32485012491f975a6d0a4f7a21c1d7b Nov 23 16:19:59 crc kubenswrapper[5050]: I1123 16:19:59.565922 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-worker-gtlmz"] Nov 23 16:20:00 crc kubenswrapper[5050]: I1123 16:20:00.259000 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-gtlmz" event={"ID":"8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d","Type":"ContainerStarted","Data":"d4c96faadbe674ce39beeaf5ee9222f0e32485012491f975a6d0a4f7a21c1d7b"} Nov 23 16:20:00 crc kubenswrapper[5050]: I1123 16:20:00.262089 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-z4gjs" event={"ID":"4123706e-1979-4f48-96d7-bd5b53ebd04c","Type":"ContainerStarted","Data":"8db14e9ac1fa90fed042e82bbb5be044a39874651dff9c063fc378fba681a09e"} Nov 23 16:20:00 crc kubenswrapper[5050]: I1123 16:20:00.262216 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-b25tk" podUID="f7345504-9bde-40fb-b98e-9939c35fc411" containerName="registry-server" containerID="cri-o://2860bef6c2b9f5a66ad9ae0b92cc8097ee196a6a21302ad08f9f316797a4ee40" gracePeriod=2 Nov 23 16:20:00 crc kubenswrapper[5050]: I1123 16:20:00.262732 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-healthmanager-z4gjs" Nov 23 16:20:00 crc kubenswrapper[5050]: I1123 16:20:00.548607 5050 scope.go:117] "RemoveContainer" containerID="131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00" Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.126024 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b25tk" Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.164590 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-healthmanager-z4gjs" podStartSLOduration=6.164565893 podStartE2EDuration="6.164565893s" podCreationTimestamp="2025-11-23 16:19:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:20:00.292709882 +0000 UTC m=+5895.459706377" watchObservedRunningTime="2025-11-23 16:20:01.164565893 +0000 UTC m=+5896.331562378" Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.200249 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pbdm8\" (UniqueName: \"kubernetes.io/projected/f7345504-9bde-40fb-b98e-9939c35fc411-kube-api-access-pbdm8\") pod \"f7345504-9bde-40fb-b98e-9939c35fc411\" (UID: \"f7345504-9bde-40fb-b98e-9939c35fc411\") " Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.200521 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7345504-9bde-40fb-b98e-9939c35fc411-utilities\") pod \"f7345504-9bde-40fb-b98e-9939c35fc411\" (UID: \"f7345504-9bde-40fb-b98e-9939c35fc411\") " Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.200591 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7345504-9bde-40fb-b98e-9939c35fc411-catalog-content\") pod \"f7345504-9bde-40fb-b98e-9939c35fc411\" (UID: \"f7345504-9bde-40fb-b98e-9939c35fc411\") " Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.201674 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7345504-9bde-40fb-b98e-9939c35fc411-utilities" (OuterVolumeSpecName: "utilities") pod "f7345504-9bde-40fb-b98e-9939c35fc411" (UID: "f7345504-9bde-40fb-b98e-9939c35fc411"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.207784 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7345504-9bde-40fb-b98e-9939c35fc411-kube-api-access-pbdm8" (OuterVolumeSpecName: "kube-api-access-pbdm8") pod "f7345504-9bde-40fb-b98e-9939c35fc411" (UID: "f7345504-9bde-40fb-b98e-9939c35fc411"). InnerVolumeSpecName "kube-api-access-pbdm8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.251228 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7345504-9bde-40fb-b98e-9939c35fc411-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f7345504-9bde-40fb-b98e-9939c35fc411" (UID: "f7345504-9bde-40fb-b98e-9939c35fc411"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.278096 5050 generic.go:334] "Generic (PLEG): container finished" podID="f7345504-9bde-40fb-b98e-9939c35fc411" containerID="2860bef6c2b9f5a66ad9ae0b92cc8097ee196a6a21302ad08f9f316797a4ee40" exitCode=0 Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.278254 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b25tk" Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.278274 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b25tk" event={"ID":"f7345504-9bde-40fb-b98e-9939c35fc411","Type":"ContainerDied","Data":"2860bef6c2b9f5a66ad9ae0b92cc8097ee196a6a21302ad08f9f316797a4ee40"} Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.278764 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b25tk" event={"ID":"f7345504-9bde-40fb-b98e-9939c35fc411","Type":"ContainerDied","Data":"62585c711bff5b8ca1220ef0c9b7b21a6a55d97e47fc2c899839e2c92f0e2407"} Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.278806 5050 scope.go:117] "RemoveContainer" containerID="2860bef6c2b9f5a66ad9ae0b92cc8097ee196a6a21302ad08f9f316797a4ee40" Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.286281 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"6c3e7a6f5a62c9666075227e68f8208f00db4be7096dab6e83261908dedc75a8"} Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.304419 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7345504-9bde-40fb-b98e-9939c35fc411-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.304571 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7345504-9bde-40fb-b98e-9939c35fc411-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.304592 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pbdm8\" (UniqueName: \"kubernetes.io/projected/f7345504-9bde-40fb-b98e-9939c35fc411-kube-api-access-pbdm8\") on node \"crc\" DevicePath \"\"" Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.324810 5050 scope.go:117] "RemoveContainer" containerID="c49ba611418d8c675fdd2119dea20ee597de1ddfaf5c57ddaf45f1f9e2997c2f" Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.338837 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b25tk"] Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.351542 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-b25tk"] Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.393978 5050 scope.go:117] "RemoveContainer" containerID="dd9b7734ab6b9f3673fd13e930e938f34b91641d71ea9c5834427236393cab61" Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.427428 5050 scope.go:117] "RemoveContainer" containerID="2860bef6c2b9f5a66ad9ae0b92cc8097ee196a6a21302ad08f9f316797a4ee40" Nov 23 16:20:01 crc kubenswrapper[5050]: E1123 16:20:01.429743 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2860bef6c2b9f5a66ad9ae0b92cc8097ee196a6a21302ad08f9f316797a4ee40\": container with ID starting with 2860bef6c2b9f5a66ad9ae0b92cc8097ee196a6a21302ad08f9f316797a4ee40 not found: ID does not exist" containerID="2860bef6c2b9f5a66ad9ae0b92cc8097ee196a6a21302ad08f9f316797a4ee40" Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.429792 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2860bef6c2b9f5a66ad9ae0b92cc8097ee196a6a21302ad08f9f316797a4ee40"} err="failed to get container status \"2860bef6c2b9f5a66ad9ae0b92cc8097ee196a6a21302ad08f9f316797a4ee40\": rpc error: code = NotFound desc = could not find container \"2860bef6c2b9f5a66ad9ae0b92cc8097ee196a6a21302ad08f9f316797a4ee40\": container with ID starting with 2860bef6c2b9f5a66ad9ae0b92cc8097ee196a6a21302ad08f9f316797a4ee40 not found: ID does not exist" Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.429822 5050 scope.go:117] "RemoveContainer" containerID="c49ba611418d8c675fdd2119dea20ee597de1ddfaf5c57ddaf45f1f9e2997c2f" Nov 23 16:20:01 crc kubenswrapper[5050]: E1123 16:20:01.431694 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c49ba611418d8c675fdd2119dea20ee597de1ddfaf5c57ddaf45f1f9e2997c2f\": container with ID starting with c49ba611418d8c675fdd2119dea20ee597de1ddfaf5c57ddaf45f1f9e2997c2f not found: ID does not exist" containerID="c49ba611418d8c675fdd2119dea20ee597de1ddfaf5c57ddaf45f1f9e2997c2f" Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.431774 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c49ba611418d8c675fdd2119dea20ee597de1ddfaf5c57ddaf45f1f9e2997c2f"} err="failed to get container status \"c49ba611418d8c675fdd2119dea20ee597de1ddfaf5c57ddaf45f1f9e2997c2f\": rpc error: code = NotFound desc = could not find container \"c49ba611418d8c675fdd2119dea20ee597de1ddfaf5c57ddaf45f1f9e2997c2f\": container with ID starting with c49ba611418d8c675fdd2119dea20ee597de1ddfaf5c57ddaf45f1f9e2997c2f not found: ID does not exist" Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.431823 5050 scope.go:117] "RemoveContainer" containerID="dd9b7734ab6b9f3673fd13e930e938f34b91641d71ea9c5834427236393cab61" Nov 23 16:20:01 crc kubenswrapper[5050]: E1123 16:20:01.432140 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd9b7734ab6b9f3673fd13e930e938f34b91641d71ea9c5834427236393cab61\": container with ID starting with dd9b7734ab6b9f3673fd13e930e938f34b91641d71ea9c5834427236393cab61 not found: ID does not exist" containerID="dd9b7734ab6b9f3673fd13e930e938f34b91641d71ea9c5834427236393cab61" Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.432161 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd9b7734ab6b9f3673fd13e930e938f34b91641d71ea9c5834427236393cab61"} err="failed to get container status \"dd9b7734ab6b9f3673fd13e930e938f34b91641d71ea9c5834427236393cab61\": rpc error: code = NotFound desc = could not find container \"dd9b7734ab6b9f3673fd13e930e938f34b91641d71ea9c5834427236393cab61\": container with ID starting with dd9b7734ab6b9f3673fd13e930e938f34b91641d71ea9c5834427236393cab61 not found: ID does not exist" Nov 23 16:20:01 crc kubenswrapper[5050]: I1123 16:20:01.562499 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7345504-9bde-40fb-b98e-9939c35fc411" path="/var/lib/kubelet/pods/f7345504-9bde-40fb-b98e-9939c35fc411/volumes" Nov 23 16:20:02 crc kubenswrapper[5050]: I1123 16:20:02.307164 5050 generic.go:334] "Generic (PLEG): container finished" podID="31208d48-e815-461a-b520-436b71bc90ce" containerID="08b7eaa1d9d8949a196cbd7e47cad5d2a0027cccc6bae89eb7ce54ac748283f7" exitCode=0 Nov 23 16:20:02 crc kubenswrapper[5050]: I1123 16:20:02.307762 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-96ckg" event={"ID":"31208d48-e815-461a-b520-436b71bc90ce","Type":"ContainerDied","Data":"08b7eaa1d9d8949a196cbd7e47cad5d2a0027cccc6bae89eb7ce54ac748283f7"} Nov 23 16:20:03 crc kubenswrapper[5050]: I1123 16:20:03.318868 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-96ckg" event={"ID":"31208d48-e815-461a-b520-436b71bc90ce","Type":"ContainerStarted","Data":"33d2510bb5faca6d9bf3bcd2f33202b39b6312b39b5e7c73aa29a473d6b19f2b"} Nov 23 16:20:03 crc kubenswrapper[5050]: I1123 16:20:03.319704 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-housekeeping-96ckg" Nov 23 16:20:03 crc kubenswrapper[5050]: I1123 16:20:03.322113 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-gtlmz" event={"ID":"8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d","Type":"ContainerStarted","Data":"a51a4d2996af13ec31f98b27f8eb65c02ff53c487ece62175251e56ef90efd08"} Nov 23 16:20:03 crc kubenswrapper[5050]: I1123 16:20:03.354507 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-housekeeping-96ckg" podStartSLOduration=3.899157476 podStartE2EDuration="6.35444972s" podCreationTimestamp="2025-11-23 16:19:57 +0000 UTC" firstStartedPulling="2025-11-23 16:19:58.326844433 +0000 UTC m=+5893.493840918" lastFinishedPulling="2025-11-23 16:20:00.782136677 +0000 UTC m=+5895.949133162" observedRunningTime="2025-11-23 16:20:03.345025125 +0000 UTC m=+5898.512021660" watchObservedRunningTime="2025-11-23 16:20:03.35444972 +0000 UTC m=+5898.521446245" Nov 23 16:20:04 crc kubenswrapper[5050]: I1123 16:20:04.335412 5050 generic.go:334] "Generic (PLEG): container finished" podID="8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d" containerID="a51a4d2996af13ec31f98b27f8eb65c02ff53c487ece62175251e56ef90efd08" exitCode=0 Nov 23 16:20:04 crc kubenswrapper[5050]: I1123 16:20:04.337841 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-gtlmz" event={"ID":"8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d","Type":"ContainerDied","Data":"a51a4d2996af13ec31f98b27f8eb65c02ff53c487ece62175251e56ef90efd08"} Nov 23 16:20:05 crc kubenswrapper[5050]: I1123 16:20:05.350497 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-gtlmz" event={"ID":"8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d","Type":"ContainerStarted","Data":"2acd80b867b81f0628be5a1611dafac79913fa12d613a49db751db6e2fa74c51"} Nov 23 16:20:05 crc kubenswrapper[5050]: I1123 16:20:05.351257 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-worker-gtlmz" Nov 23 16:20:05 crc kubenswrapper[5050]: I1123 16:20:05.368305 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-worker-gtlmz" podStartSLOduration=4.815113827 podStartE2EDuration="7.368274402s" podCreationTimestamp="2025-11-23 16:19:58 +0000 UTC" firstStartedPulling="2025-11-23 16:19:59.5672771 +0000 UTC m=+5894.734273585" lastFinishedPulling="2025-11-23 16:20:02.120437675 +0000 UTC m=+5897.287434160" observedRunningTime="2025-11-23 16:20:05.365545395 +0000 UTC m=+5900.532541880" watchObservedRunningTime="2025-11-23 16:20:05.368274402 +0000 UTC m=+5900.535270917" Nov 23 16:20:10 crc kubenswrapper[5050]: I1123 16:20:10.759970 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-healthmanager-z4gjs" Nov 23 16:20:12 crc kubenswrapper[5050]: I1123 16:20:12.776745 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-housekeeping-96ckg" Nov 23 16:20:13 crc kubenswrapper[5050]: I1123 16:20:13.983078 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-worker-gtlmz" Nov 23 16:20:31 crc kubenswrapper[5050]: I1123 16:20:31.067256 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-79q9n"] Nov 23 16:20:31 crc kubenswrapper[5050]: I1123 16:20:31.083233 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-e940-account-create-q8fld"] Nov 23 16:20:31 crc kubenswrapper[5050]: I1123 16:20:31.093700 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-79q9n"] Nov 23 16:20:31 crc kubenswrapper[5050]: I1123 16:20:31.104423 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-e940-account-create-q8fld"] Nov 23 16:20:31 crc kubenswrapper[5050]: I1123 16:20:31.569276 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59aa3498-5147-46f0-9783-f0dbc9845bab" path="/var/lib/kubelet/pods/59aa3498-5147-46f0-9783-f0dbc9845bab/volumes" Nov 23 16:20:31 crc kubenswrapper[5050]: I1123 16:20:31.572022 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c" path="/var/lib/kubelet/pods/a69131bd-581a-4dbc-b6b2-a58ca7d5ef5c/volumes" Nov 23 16:20:37 crc kubenswrapper[5050]: I1123 16:20:37.083367 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-xtqpj"] Nov 23 16:20:37 crc kubenswrapper[5050]: I1123 16:20:37.094758 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-xtqpj"] Nov 23 16:20:37 crc kubenswrapper[5050]: I1123 16:20:37.564788 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2730954f-204b-453e-b3a5-e7e50e378343" path="/var/lib/kubelet/pods/2730954f-204b-453e-b3a5-e7e50e378343/volumes" Nov 23 16:20:51 crc kubenswrapper[5050]: I1123 16:20:51.013834 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-zg4kx"] Nov 23 16:20:51 crc kubenswrapper[5050]: E1123 16:20:51.016419 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7345504-9bde-40fb-b98e-9939c35fc411" containerName="extract-content" Nov 23 16:20:51 crc kubenswrapper[5050]: I1123 16:20:51.016528 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7345504-9bde-40fb-b98e-9939c35fc411" containerName="extract-content" Nov 23 16:20:51 crc kubenswrapper[5050]: E1123 16:20:51.016622 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7345504-9bde-40fb-b98e-9939c35fc411" containerName="registry-server" Nov 23 16:20:51 crc kubenswrapper[5050]: I1123 16:20:51.016685 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7345504-9bde-40fb-b98e-9939c35fc411" containerName="registry-server" Nov 23 16:20:51 crc kubenswrapper[5050]: E1123 16:20:51.016767 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7345504-9bde-40fb-b98e-9939c35fc411" containerName="extract-utilities" Nov 23 16:20:51 crc kubenswrapper[5050]: I1123 16:20:51.016832 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7345504-9bde-40fb-b98e-9939c35fc411" containerName="extract-utilities" Nov 23 16:20:51 crc kubenswrapper[5050]: I1123 16:20:51.017142 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7345504-9bde-40fb-b98e-9939c35fc411" containerName="registry-server" Nov 23 16:20:51 crc kubenswrapper[5050]: I1123 16:20:51.019094 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zg4kx" Nov 23 16:20:51 crc kubenswrapper[5050]: I1123 16:20:51.027692 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zg4kx"] Nov 23 16:20:51 crc kubenswrapper[5050]: I1123 16:20:51.201237 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c3b1ea4-2747-47b6-8dad-97d13d1070b8-utilities\") pod \"redhat-operators-zg4kx\" (UID: \"3c3b1ea4-2747-47b6-8dad-97d13d1070b8\") " pod="openshift-marketplace/redhat-operators-zg4kx" Nov 23 16:20:51 crc kubenswrapper[5050]: I1123 16:20:51.201516 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sr7qd\" (UniqueName: \"kubernetes.io/projected/3c3b1ea4-2747-47b6-8dad-97d13d1070b8-kube-api-access-sr7qd\") pod \"redhat-operators-zg4kx\" (UID: \"3c3b1ea4-2747-47b6-8dad-97d13d1070b8\") " pod="openshift-marketplace/redhat-operators-zg4kx" Nov 23 16:20:51 crc kubenswrapper[5050]: I1123 16:20:51.201757 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c3b1ea4-2747-47b6-8dad-97d13d1070b8-catalog-content\") pod \"redhat-operators-zg4kx\" (UID: \"3c3b1ea4-2747-47b6-8dad-97d13d1070b8\") " pod="openshift-marketplace/redhat-operators-zg4kx" Nov 23 16:20:51 crc kubenswrapper[5050]: I1123 16:20:51.303183 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sr7qd\" (UniqueName: \"kubernetes.io/projected/3c3b1ea4-2747-47b6-8dad-97d13d1070b8-kube-api-access-sr7qd\") pod \"redhat-operators-zg4kx\" (UID: \"3c3b1ea4-2747-47b6-8dad-97d13d1070b8\") " pod="openshift-marketplace/redhat-operators-zg4kx" Nov 23 16:20:51 crc kubenswrapper[5050]: I1123 16:20:51.303299 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c3b1ea4-2747-47b6-8dad-97d13d1070b8-catalog-content\") pod \"redhat-operators-zg4kx\" (UID: \"3c3b1ea4-2747-47b6-8dad-97d13d1070b8\") " pod="openshift-marketplace/redhat-operators-zg4kx" Nov 23 16:20:51 crc kubenswrapper[5050]: I1123 16:20:51.303377 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c3b1ea4-2747-47b6-8dad-97d13d1070b8-utilities\") pod \"redhat-operators-zg4kx\" (UID: \"3c3b1ea4-2747-47b6-8dad-97d13d1070b8\") " pod="openshift-marketplace/redhat-operators-zg4kx" Nov 23 16:20:51 crc kubenswrapper[5050]: I1123 16:20:51.304035 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c3b1ea4-2747-47b6-8dad-97d13d1070b8-utilities\") pod \"redhat-operators-zg4kx\" (UID: \"3c3b1ea4-2747-47b6-8dad-97d13d1070b8\") " pod="openshift-marketplace/redhat-operators-zg4kx" Nov 23 16:20:51 crc kubenswrapper[5050]: I1123 16:20:51.304163 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c3b1ea4-2747-47b6-8dad-97d13d1070b8-catalog-content\") pod \"redhat-operators-zg4kx\" (UID: \"3c3b1ea4-2747-47b6-8dad-97d13d1070b8\") " pod="openshift-marketplace/redhat-operators-zg4kx" Nov 23 16:20:51 crc kubenswrapper[5050]: I1123 16:20:51.327755 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sr7qd\" (UniqueName: \"kubernetes.io/projected/3c3b1ea4-2747-47b6-8dad-97d13d1070b8-kube-api-access-sr7qd\") pod \"redhat-operators-zg4kx\" (UID: \"3c3b1ea4-2747-47b6-8dad-97d13d1070b8\") " pod="openshift-marketplace/redhat-operators-zg4kx" Nov 23 16:20:51 crc kubenswrapper[5050]: I1123 16:20:51.379825 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zg4kx" Nov 23 16:20:51 crc kubenswrapper[5050]: I1123 16:20:51.952467 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zg4kx"] Nov 23 16:20:52 crc kubenswrapper[5050]: I1123 16:20:52.989186 5050 generic.go:334] "Generic (PLEG): container finished" podID="3c3b1ea4-2747-47b6-8dad-97d13d1070b8" containerID="99d561c3f529f9160eb3dacdb4b9b184168b264f60ab0722233e37d076351462" exitCode=0 Nov 23 16:20:52 crc kubenswrapper[5050]: I1123 16:20:52.989349 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zg4kx" event={"ID":"3c3b1ea4-2747-47b6-8dad-97d13d1070b8","Type":"ContainerDied","Data":"99d561c3f529f9160eb3dacdb4b9b184168b264f60ab0722233e37d076351462"} Nov 23 16:20:52 crc kubenswrapper[5050]: I1123 16:20:52.991761 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zg4kx" event={"ID":"3c3b1ea4-2747-47b6-8dad-97d13d1070b8","Type":"ContainerStarted","Data":"45fee6e7e130296234f20e924844febf20d8dd0dfabaeeec99d05132b3ae3fd5"} Nov 23 16:20:54 crc kubenswrapper[5050]: I1123 16:20:54.006260 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zg4kx" event={"ID":"3c3b1ea4-2747-47b6-8dad-97d13d1070b8","Type":"ContainerStarted","Data":"c1842dae52591ac085c42484f16c0751706c94adc76abb0f17a883e14f79d089"} Nov 23 16:20:55 crc kubenswrapper[5050]: I1123 16:20:55.026686 5050 generic.go:334] "Generic (PLEG): container finished" podID="3c3b1ea4-2747-47b6-8dad-97d13d1070b8" containerID="c1842dae52591ac085c42484f16c0751706c94adc76abb0f17a883e14f79d089" exitCode=0 Nov 23 16:20:55 crc kubenswrapper[5050]: I1123 16:20:55.026756 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zg4kx" event={"ID":"3c3b1ea4-2747-47b6-8dad-97d13d1070b8","Type":"ContainerDied","Data":"c1842dae52591ac085c42484f16c0751706c94adc76abb0f17a883e14f79d089"} Nov 23 16:20:55 crc kubenswrapper[5050]: I1123 16:20:55.796501 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qnbkg"] Nov 23 16:20:55 crc kubenswrapper[5050]: I1123 16:20:55.799419 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qnbkg" Nov 23 16:20:55 crc kubenswrapper[5050]: I1123 16:20:55.835895 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qnbkg"] Nov 23 16:20:55 crc kubenswrapper[5050]: I1123 16:20:55.842292 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5afd3b44-f6a7-4c3d-acde-51fe6da338a2-catalog-content\") pod \"certified-operators-qnbkg\" (UID: \"5afd3b44-f6a7-4c3d-acde-51fe6da338a2\") " pod="openshift-marketplace/certified-operators-qnbkg" Nov 23 16:20:55 crc kubenswrapper[5050]: I1123 16:20:55.842370 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5afd3b44-f6a7-4c3d-acde-51fe6da338a2-utilities\") pod \"certified-operators-qnbkg\" (UID: \"5afd3b44-f6a7-4c3d-acde-51fe6da338a2\") " pod="openshift-marketplace/certified-operators-qnbkg" Nov 23 16:20:55 crc kubenswrapper[5050]: I1123 16:20:55.842915 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-prq8j\" (UniqueName: \"kubernetes.io/projected/5afd3b44-f6a7-4c3d-acde-51fe6da338a2-kube-api-access-prq8j\") pod \"certified-operators-qnbkg\" (UID: \"5afd3b44-f6a7-4c3d-acde-51fe6da338a2\") " pod="openshift-marketplace/certified-operators-qnbkg" Nov 23 16:20:55 crc kubenswrapper[5050]: I1123 16:20:55.944205 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5afd3b44-f6a7-4c3d-acde-51fe6da338a2-catalog-content\") pod \"certified-operators-qnbkg\" (UID: \"5afd3b44-f6a7-4c3d-acde-51fe6da338a2\") " pod="openshift-marketplace/certified-operators-qnbkg" Nov 23 16:20:55 crc kubenswrapper[5050]: I1123 16:20:55.944285 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5afd3b44-f6a7-4c3d-acde-51fe6da338a2-utilities\") pod \"certified-operators-qnbkg\" (UID: \"5afd3b44-f6a7-4c3d-acde-51fe6da338a2\") " pod="openshift-marketplace/certified-operators-qnbkg" Nov 23 16:20:55 crc kubenswrapper[5050]: I1123 16:20:55.944340 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-prq8j\" (UniqueName: \"kubernetes.io/projected/5afd3b44-f6a7-4c3d-acde-51fe6da338a2-kube-api-access-prq8j\") pod \"certified-operators-qnbkg\" (UID: \"5afd3b44-f6a7-4c3d-acde-51fe6da338a2\") " pod="openshift-marketplace/certified-operators-qnbkg" Nov 23 16:20:55 crc kubenswrapper[5050]: I1123 16:20:55.944763 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5afd3b44-f6a7-4c3d-acde-51fe6da338a2-catalog-content\") pod \"certified-operators-qnbkg\" (UID: \"5afd3b44-f6a7-4c3d-acde-51fe6da338a2\") " pod="openshift-marketplace/certified-operators-qnbkg" Nov 23 16:20:55 crc kubenswrapper[5050]: I1123 16:20:55.944807 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5afd3b44-f6a7-4c3d-acde-51fe6da338a2-utilities\") pod \"certified-operators-qnbkg\" (UID: \"5afd3b44-f6a7-4c3d-acde-51fe6da338a2\") " pod="openshift-marketplace/certified-operators-qnbkg" Nov 23 16:20:55 crc kubenswrapper[5050]: I1123 16:20:55.987160 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-prq8j\" (UniqueName: \"kubernetes.io/projected/5afd3b44-f6a7-4c3d-acde-51fe6da338a2-kube-api-access-prq8j\") pod \"certified-operators-qnbkg\" (UID: \"5afd3b44-f6a7-4c3d-acde-51fe6da338a2\") " pod="openshift-marketplace/certified-operators-qnbkg" Nov 23 16:20:56 crc kubenswrapper[5050]: I1123 16:20:56.039480 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zg4kx" event={"ID":"3c3b1ea4-2747-47b6-8dad-97d13d1070b8","Type":"ContainerStarted","Data":"1fb179844c05a1dd01709e6b61e242844ae409ad1285ac83b34f6e28ab4ff246"} Nov 23 16:20:56 crc kubenswrapper[5050]: I1123 16:20:56.062603 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-zg4kx" podStartSLOduration=3.540551925 podStartE2EDuration="6.062573071s" podCreationTimestamp="2025-11-23 16:20:50 +0000 UTC" firstStartedPulling="2025-11-23 16:20:52.99225977 +0000 UTC m=+5948.159256285" lastFinishedPulling="2025-11-23 16:20:55.514280936 +0000 UTC m=+5950.681277431" observedRunningTime="2025-11-23 16:20:56.059652038 +0000 UTC m=+5951.226648523" watchObservedRunningTime="2025-11-23 16:20:56.062573071 +0000 UTC m=+5951.229569556" Nov 23 16:20:56 crc kubenswrapper[5050]: I1123 16:20:56.132771 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qnbkg" Nov 23 16:20:56 crc kubenswrapper[5050]: I1123 16:20:56.715997 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qnbkg"] Nov 23 16:20:56 crc kubenswrapper[5050]: W1123 16:20:56.726771 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5afd3b44_f6a7_4c3d_acde_51fe6da338a2.slice/crio-7af992f883acd5c50281fb0269926299e3502d1339ce5ad9a86ce59ed0a71750 WatchSource:0}: Error finding container 7af992f883acd5c50281fb0269926299e3502d1339ce5ad9a86ce59ed0a71750: Status 404 returned error can't find the container with id 7af992f883acd5c50281fb0269926299e3502d1339ce5ad9a86ce59ed0a71750 Nov 23 16:20:57 crc kubenswrapper[5050]: I1123 16:20:57.052966 5050 generic.go:334] "Generic (PLEG): container finished" podID="5afd3b44-f6a7-4c3d-acde-51fe6da338a2" containerID="2a51dc0cb25a783e9e597b4a83af703b56f008f54872206431b7a6b4d62b70c7" exitCode=0 Nov 23 16:20:57 crc kubenswrapper[5050]: I1123 16:20:57.053061 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qnbkg" event={"ID":"5afd3b44-f6a7-4c3d-acde-51fe6da338a2","Type":"ContainerDied","Data":"2a51dc0cb25a783e9e597b4a83af703b56f008f54872206431b7a6b4d62b70c7"} Nov 23 16:20:57 crc kubenswrapper[5050]: I1123 16:20:57.053612 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qnbkg" event={"ID":"5afd3b44-f6a7-4c3d-acde-51fe6da338a2","Type":"ContainerStarted","Data":"7af992f883acd5c50281fb0269926299e3502d1339ce5ad9a86ce59ed0a71750"} Nov 23 16:20:58 crc kubenswrapper[5050]: I1123 16:20:58.066951 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qnbkg" event={"ID":"5afd3b44-f6a7-4c3d-acde-51fe6da338a2","Type":"ContainerStarted","Data":"a1bd496c13586434e9a13f460cb1fed4b8d6065775128bed604945350d063b9b"} Nov 23 16:20:59 crc kubenswrapper[5050]: I1123 16:20:59.081559 5050 generic.go:334] "Generic (PLEG): container finished" podID="5afd3b44-f6a7-4c3d-acde-51fe6da338a2" containerID="a1bd496c13586434e9a13f460cb1fed4b8d6065775128bed604945350d063b9b" exitCode=0 Nov 23 16:20:59 crc kubenswrapper[5050]: I1123 16:20:59.081944 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qnbkg" event={"ID":"5afd3b44-f6a7-4c3d-acde-51fe6da338a2","Type":"ContainerDied","Data":"a1bd496c13586434e9a13f460cb1fed4b8d6065775128bed604945350d063b9b"} Nov 23 16:21:01 crc kubenswrapper[5050]: I1123 16:21:01.111874 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qnbkg" event={"ID":"5afd3b44-f6a7-4c3d-acde-51fe6da338a2","Type":"ContainerStarted","Data":"842b342ca8c3febc82b12e0082e5c6bdef58fa05dc1f46e77d4fad2ebd050376"} Nov 23 16:21:01 crc kubenswrapper[5050]: I1123 16:21:01.134159 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qnbkg" podStartSLOduration=3.639529145 podStartE2EDuration="6.134138638s" podCreationTimestamp="2025-11-23 16:20:55 +0000 UTC" firstStartedPulling="2025-11-23 16:20:57.055017933 +0000 UTC m=+5952.222014418" lastFinishedPulling="2025-11-23 16:20:59.549627396 +0000 UTC m=+5954.716623911" observedRunningTime="2025-11-23 16:21:01.130853515 +0000 UTC m=+5956.297850000" watchObservedRunningTime="2025-11-23 16:21:01.134138638 +0000 UTC m=+5956.301135133" Nov 23 16:21:01 crc kubenswrapper[5050]: I1123 16:21:01.380689 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-zg4kx" Nov 23 16:21:01 crc kubenswrapper[5050]: I1123 16:21:01.380918 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-zg4kx" Nov 23 16:21:02 crc kubenswrapper[5050]: I1123 16:21:02.458420 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zg4kx" podUID="3c3b1ea4-2747-47b6-8dad-97d13d1070b8" containerName="registry-server" probeResult="failure" output=< Nov 23 16:21:02 crc kubenswrapper[5050]: timeout: failed to connect service ":50051" within 1s Nov 23 16:21:02 crc kubenswrapper[5050]: > Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.625585 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-69568d7b7c-xt7nq"] Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.641708 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-69568d7b7c-xt7nq" Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.649883 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.654135 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.654912 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-t9wsn" Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.655018 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.707004 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-69568d7b7c-xt7nq"] Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.786549 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.787429 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="54c7819b-2cfc-4c76-8777-ec8ca37e418e" containerName="glance-log" containerID="cri-o://0123ddf672b0c9cd788e8ae216a52c37b57380502b85282f4f6d904a093eade8" gracePeriod=30 Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.787736 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="54c7819b-2cfc-4c76-8777-ec8ca37e418e" containerName="glance-httpd" containerID="cri-o://44bfe94e167510c54c0a0ff25886435bcb8b77e0e097f04e95214b3e4cbc1175" gracePeriod=30 Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.799864 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.800139 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="dd6ddc23-bc16-45ee-adc2-4e354e707c2d" containerName="glance-log" containerID="cri-o://01a7d8a04e112b855020224eb1edacb1527bc653d06ca87278860e7afd6a3cac" gracePeriod=30 Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.802309 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="dd6ddc23-bc16-45ee-adc2-4e354e707c2d" containerName="glance-httpd" containerID="cri-o://d224f6bcfdea9e3c42bcb919fd0586bca9651f6ed43080511d95e49dac735628" gracePeriod=30 Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.830180 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bmvpx\" (UniqueName: \"kubernetes.io/projected/9b167679-73f1-44f8-bd64-f8172c3f8b1b-kube-api-access-bmvpx\") pod \"horizon-69568d7b7c-xt7nq\" (UID: \"9b167679-73f1-44f8-bd64-f8172c3f8b1b\") " pod="openstack/horizon-69568d7b7c-xt7nq" Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.830611 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9b167679-73f1-44f8-bd64-f8172c3f8b1b-config-data\") pod \"horizon-69568d7b7c-xt7nq\" (UID: \"9b167679-73f1-44f8-bd64-f8172c3f8b1b\") " pod="openstack/horizon-69568d7b7c-xt7nq" Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.835700 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-68c8987657-cthvh"] Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.838198 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9b167679-73f1-44f8-bd64-f8172c3f8b1b-horizon-secret-key\") pod \"horizon-69568d7b7c-xt7nq\" (UID: \"9b167679-73f1-44f8-bd64-f8172c3f8b1b\") " pod="openstack/horizon-69568d7b7c-xt7nq" Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.838408 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b167679-73f1-44f8-bd64-f8172c3f8b1b-logs\") pod \"horizon-69568d7b7c-xt7nq\" (UID: \"9b167679-73f1-44f8-bd64-f8172c3f8b1b\") " pod="openstack/horizon-69568d7b7c-xt7nq" Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.838618 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9b167679-73f1-44f8-bd64-f8172c3f8b1b-scripts\") pod \"horizon-69568d7b7c-xt7nq\" (UID: \"9b167679-73f1-44f8-bd64-f8172c3f8b1b\") " pod="openstack/horizon-69568d7b7c-xt7nq" Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.844623 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68c8987657-cthvh" Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.848702 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-68c8987657-cthvh"] Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.940935 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4e1a52e2-909e-440a-b0ba-e4426cc19355-horizon-secret-key\") pod \"horizon-68c8987657-cthvh\" (UID: \"4e1a52e2-909e-440a-b0ba-e4426cc19355\") " pod="openstack/horizon-68c8987657-cthvh" Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.941152 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4e1a52e2-909e-440a-b0ba-e4426cc19355-scripts\") pod \"horizon-68c8987657-cthvh\" (UID: \"4e1a52e2-909e-440a-b0ba-e4426cc19355\") " pod="openstack/horizon-68c8987657-cthvh" Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.941275 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9b167679-73f1-44f8-bd64-f8172c3f8b1b-config-data\") pod \"horizon-69568d7b7c-xt7nq\" (UID: \"9b167679-73f1-44f8-bd64-f8172c3f8b1b\") " pod="openstack/horizon-69568d7b7c-xt7nq" Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.941414 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4e1a52e2-909e-440a-b0ba-e4426cc19355-config-data\") pod \"horizon-68c8987657-cthvh\" (UID: \"4e1a52e2-909e-440a-b0ba-e4426cc19355\") " pod="openstack/horizon-68c8987657-cthvh" Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.941634 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9b167679-73f1-44f8-bd64-f8172c3f8b1b-horizon-secret-key\") pod \"horizon-69568d7b7c-xt7nq\" (UID: \"9b167679-73f1-44f8-bd64-f8172c3f8b1b\") " pod="openstack/horizon-69568d7b7c-xt7nq" Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.941771 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b167679-73f1-44f8-bd64-f8172c3f8b1b-logs\") pod \"horizon-69568d7b7c-xt7nq\" (UID: \"9b167679-73f1-44f8-bd64-f8172c3f8b1b\") " pod="openstack/horizon-69568d7b7c-xt7nq" Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.941866 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xtkl4\" (UniqueName: \"kubernetes.io/projected/4e1a52e2-909e-440a-b0ba-e4426cc19355-kube-api-access-xtkl4\") pod \"horizon-68c8987657-cthvh\" (UID: \"4e1a52e2-909e-440a-b0ba-e4426cc19355\") " pod="openstack/horizon-68c8987657-cthvh" Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.941958 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4e1a52e2-909e-440a-b0ba-e4426cc19355-logs\") pod \"horizon-68c8987657-cthvh\" (UID: \"4e1a52e2-909e-440a-b0ba-e4426cc19355\") " pod="openstack/horizon-68c8987657-cthvh" Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.942039 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9b167679-73f1-44f8-bd64-f8172c3f8b1b-scripts\") pod \"horizon-69568d7b7c-xt7nq\" (UID: \"9b167679-73f1-44f8-bd64-f8172c3f8b1b\") " pod="openstack/horizon-69568d7b7c-xt7nq" Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.942140 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bmvpx\" (UniqueName: \"kubernetes.io/projected/9b167679-73f1-44f8-bd64-f8172c3f8b1b-kube-api-access-bmvpx\") pod \"horizon-69568d7b7c-xt7nq\" (UID: \"9b167679-73f1-44f8-bd64-f8172c3f8b1b\") " pod="openstack/horizon-69568d7b7c-xt7nq" Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.943965 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b167679-73f1-44f8-bd64-f8172c3f8b1b-logs\") pod \"horizon-69568d7b7c-xt7nq\" (UID: \"9b167679-73f1-44f8-bd64-f8172c3f8b1b\") " pod="openstack/horizon-69568d7b7c-xt7nq" Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.944396 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9b167679-73f1-44f8-bd64-f8172c3f8b1b-config-data\") pod \"horizon-69568d7b7c-xt7nq\" (UID: \"9b167679-73f1-44f8-bd64-f8172c3f8b1b\") " pod="openstack/horizon-69568d7b7c-xt7nq" Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.945250 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9b167679-73f1-44f8-bd64-f8172c3f8b1b-scripts\") pod \"horizon-69568d7b7c-xt7nq\" (UID: \"9b167679-73f1-44f8-bd64-f8172c3f8b1b\") " pod="openstack/horizon-69568d7b7c-xt7nq" Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.951926 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9b167679-73f1-44f8-bd64-f8172c3f8b1b-horizon-secret-key\") pod \"horizon-69568d7b7c-xt7nq\" (UID: \"9b167679-73f1-44f8-bd64-f8172c3f8b1b\") " pod="openstack/horizon-69568d7b7c-xt7nq" Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.960489 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bmvpx\" (UniqueName: \"kubernetes.io/projected/9b167679-73f1-44f8-bd64-f8172c3f8b1b-kube-api-access-bmvpx\") pod \"horizon-69568d7b7c-xt7nq\" (UID: \"9b167679-73f1-44f8-bd64-f8172c3f8b1b\") " pod="openstack/horizon-69568d7b7c-xt7nq" Nov 23 16:21:05 crc kubenswrapper[5050]: I1123 16:21:05.968537 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-69568d7b7c-xt7nq" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.045072 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xtkl4\" (UniqueName: \"kubernetes.io/projected/4e1a52e2-909e-440a-b0ba-e4426cc19355-kube-api-access-xtkl4\") pod \"horizon-68c8987657-cthvh\" (UID: \"4e1a52e2-909e-440a-b0ba-e4426cc19355\") " pod="openstack/horizon-68c8987657-cthvh" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.045558 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4e1a52e2-909e-440a-b0ba-e4426cc19355-logs\") pod \"horizon-68c8987657-cthvh\" (UID: \"4e1a52e2-909e-440a-b0ba-e4426cc19355\") " pod="openstack/horizon-68c8987657-cthvh" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.045632 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4e1a52e2-909e-440a-b0ba-e4426cc19355-horizon-secret-key\") pod \"horizon-68c8987657-cthvh\" (UID: \"4e1a52e2-909e-440a-b0ba-e4426cc19355\") " pod="openstack/horizon-68c8987657-cthvh" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.045650 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4e1a52e2-909e-440a-b0ba-e4426cc19355-scripts\") pod \"horizon-68c8987657-cthvh\" (UID: \"4e1a52e2-909e-440a-b0ba-e4426cc19355\") " pod="openstack/horizon-68c8987657-cthvh" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.045712 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4e1a52e2-909e-440a-b0ba-e4426cc19355-config-data\") pod \"horizon-68c8987657-cthvh\" (UID: \"4e1a52e2-909e-440a-b0ba-e4426cc19355\") " pod="openstack/horizon-68c8987657-cthvh" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.046726 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4e1a52e2-909e-440a-b0ba-e4426cc19355-logs\") pod \"horizon-68c8987657-cthvh\" (UID: \"4e1a52e2-909e-440a-b0ba-e4426cc19355\") " pod="openstack/horizon-68c8987657-cthvh" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.047101 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4e1a52e2-909e-440a-b0ba-e4426cc19355-scripts\") pod \"horizon-68c8987657-cthvh\" (UID: \"4e1a52e2-909e-440a-b0ba-e4426cc19355\") " pod="openstack/horizon-68c8987657-cthvh" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.047403 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4e1a52e2-909e-440a-b0ba-e4426cc19355-config-data\") pod \"horizon-68c8987657-cthvh\" (UID: \"4e1a52e2-909e-440a-b0ba-e4426cc19355\") " pod="openstack/horizon-68c8987657-cthvh" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.051086 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4e1a52e2-909e-440a-b0ba-e4426cc19355-horizon-secret-key\") pod \"horizon-68c8987657-cthvh\" (UID: \"4e1a52e2-909e-440a-b0ba-e4426cc19355\") " pod="openstack/horizon-68c8987657-cthvh" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.065244 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xtkl4\" (UniqueName: \"kubernetes.io/projected/4e1a52e2-909e-440a-b0ba-e4426cc19355-kube-api-access-xtkl4\") pod \"horizon-68c8987657-cthvh\" (UID: \"4e1a52e2-909e-440a-b0ba-e4426cc19355\") " pod="openstack/horizon-68c8987657-cthvh" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.133428 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qnbkg" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.133498 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qnbkg" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.137649 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68c8987657-cthvh" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.202211 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qnbkg" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.218731 5050 generic.go:334] "Generic (PLEG): container finished" podID="dd6ddc23-bc16-45ee-adc2-4e354e707c2d" containerID="01a7d8a04e112b855020224eb1edacb1527bc653d06ca87278860e7afd6a3cac" exitCode=143 Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.219073 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"dd6ddc23-bc16-45ee-adc2-4e354e707c2d","Type":"ContainerDied","Data":"01a7d8a04e112b855020224eb1edacb1527bc653d06ca87278860e7afd6a3cac"} Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.229808 5050 generic.go:334] "Generic (PLEG): container finished" podID="54c7819b-2cfc-4c76-8777-ec8ca37e418e" containerID="0123ddf672b0c9cd788e8ae216a52c37b57380502b85282f4f6d904a093eade8" exitCode=143 Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.232521 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"54c7819b-2cfc-4c76-8777-ec8ca37e418e","Type":"ContainerDied","Data":"0123ddf672b0c9cd788e8ae216a52c37b57380502b85282f4f6d904a093eade8"} Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.295679 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qnbkg" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.491375 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-69568d7b7c-xt7nq"] Nov 23 16:21:06 crc kubenswrapper[5050]: W1123 16:21:06.496254 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9b167679_73f1_44f8_bd64_f8172c3f8b1b.slice/crio-70fbcbe064c69b4e18459ecaae782ff443571e566d8eee4e77d3131383b54b20 WatchSource:0}: Error finding container 70fbcbe064c69b4e18459ecaae782ff443571e566d8eee4e77d3131383b54b20: Status 404 returned error can't find the container with id 70fbcbe064c69b4e18459ecaae782ff443571e566d8eee4e77d3131383b54b20 Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.519066 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-68c8987657-cthvh"] Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.574027 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-bf4fdc45f-c4g79"] Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.576480 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-bf4fdc45f-c4g79" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.625152 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-bf4fdc45f-c4g79"] Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.665350 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de915556-22d5-4ea1-a24e-b872c1fdbdae-scripts\") pod \"horizon-bf4fdc45f-c4g79\" (UID: \"de915556-22d5-4ea1-a24e-b872c1fdbdae\") " pod="openstack/horizon-bf4fdc45f-c4g79" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.667096 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/de915556-22d5-4ea1-a24e-b872c1fdbdae-config-data\") pod \"horizon-bf4fdc45f-c4g79\" (UID: \"de915556-22d5-4ea1-a24e-b872c1fdbdae\") " pod="openstack/horizon-bf4fdc45f-c4g79" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.667260 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/de915556-22d5-4ea1-a24e-b872c1fdbdae-horizon-secret-key\") pod \"horizon-bf4fdc45f-c4g79\" (UID: \"de915556-22d5-4ea1-a24e-b872c1fdbdae\") " pod="openstack/horizon-bf4fdc45f-c4g79" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.667543 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tg24g\" (UniqueName: \"kubernetes.io/projected/de915556-22d5-4ea1-a24e-b872c1fdbdae-kube-api-access-tg24g\") pod \"horizon-bf4fdc45f-c4g79\" (UID: \"de915556-22d5-4ea1-a24e-b872c1fdbdae\") " pod="openstack/horizon-bf4fdc45f-c4g79" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.667677 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de915556-22d5-4ea1-a24e-b872c1fdbdae-logs\") pod \"horizon-bf4fdc45f-c4g79\" (UID: \"de915556-22d5-4ea1-a24e-b872c1fdbdae\") " pod="openstack/horizon-bf4fdc45f-c4g79" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.685797 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-68c8987657-cthvh"] Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.772056 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de915556-22d5-4ea1-a24e-b872c1fdbdae-scripts\") pod \"horizon-bf4fdc45f-c4g79\" (UID: \"de915556-22d5-4ea1-a24e-b872c1fdbdae\") " pod="openstack/horizon-bf4fdc45f-c4g79" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.772120 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/de915556-22d5-4ea1-a24e-b872c1fdbdae-config-data\") pod \"horizon-bf4fdc45f-c4g79\" (UID: \"de915556-22d5-4ea1-a24e-b872c1fdbdae\") " pod="openstack/horizon-bf4fdc45f-c4g79" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.772193 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/de915556-22d5-4ea1-a24e-b872c1fdbdae-horizon-secret-key\") pod \"horizon-bf4fdc45f-c4g79\" (UID: \"de915556-22d5-4ea1-a24e-b872c1fdbdae\") " pod="openstack/horizon-bf4fdc45f-c4g79" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.772280 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tg24g\" (UniqueName: \"kubernetes.io/projected/de915556-22d5-4ea1-a24e-b872c1fdbdae-kube-api-access-tg24g\") pod \"horizon-bf4fdc45f-c4g79\" (UID: \"de915556-22d5-4ea1-a24e-b872c1fdbdae\") " pod="openstack/horizon-bf4fdc45f-c4g79" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.772300 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de915556-22d5-4ea1-a24e-b872c1fdbdae-logs\") pod \"horizon-bf4fdc45f-c4g79\" (UID: \"de915556-22d5-4ea1-a24e-b872c1fdbdae\") " pod="openstack/horizon-bf4fdc45f-c4g79" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.782128 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de915556-22d5-4ea1-a24e-b872c1fdbdae-scripts\") pod \"horizon-bf4fdc45f-c4g79\" (UID: \"de915556-22d5-4ea1-a24e-b872c1fdbdae\") " pod="openstack/horizon-bf4fdc45f-c4g79" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.788359 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de915556-22d5-4ea1-a24e-b872c1fdbdae-logs\") pod \"horizon-bf4fdc45f-c4g79\" (UID: \"de915556-22d5-4ea1-a24e-b872c1fdbdae\") " pod="openstack/horizon-bf4fdc45f-c4g79" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.789115 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/de915556-22d5-4ea1-a24e-b872c1fdbdae-config-data\") pod \"horizon-bf4fdc45f-c4g79\" (UID: \"de915556-22d5-4ea1-a24e-b872c1fdbdae\") " pod="openstack/horizon-bf4fdc45f-c4g79" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.797298 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/de915556-22d5-4ea1-a24e-b872c1fdbdae-horizon-secret-key\") pod \"horizon-bf4fdc45f-c4g79\" (UID: \"de915556-22d5-4ea1-a24e-b872c1fdbdae\") " pod="openstack/horizon-bf4fdc45f-c4g79" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.810154 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tg24g\" (UniqueName: \"kubernetes.io/projected/de915556-22d5-4ea1-a24e-b872c1fdbdae-kube-api-access-tg24g\") pod \"horizon-bf4fdc45f-c4g79\" (UID: \"de915556-22d5-4ea1-a24e-b872c1fdbdae\") " pod="openstack/horizon-bf4fdc45f-c4g79" Nov 23 16:21:06 crc kubenswrapper[5050]: I1123 16:21:06.895253 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-bf4fdc45f-c4g79" Nov 23 16:21:07 crc kubenswrapper[5050]: I1123 16:21:07.063099 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-6w8pk"] Nov 23 16:21:07 crc kubenswrapper[5050]: I1123 16:21:07.086098 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-8743-account-create-jsxsw"] Nov 23 16:21:07 crc kubenswrapper[5050]: I1123 16:21:07.097196 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-6w8pk"] Nov 23 16:21:07 crc kubenswrapper[5050]: I1123 16:21:07.108073 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-8743-account-create-jsxsw"] Nov 23 16:21:07 crc kubenswrapper[5050]: I1123 16:21:07.191789 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qnbkg"] Nov 23 16:21:07 crc kubenswrapper[5050]: I1123 16:21:07.247337 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68c8987657-cthvh" event={"ID":"4e1a52e2-909e-440a-b0ba-e4426cc19355","Type":"ContainerStarted","Data":"551d17a70823ae1137383eeb48609be2d7ff83f9156482b2d8de45f44d3ee4ac"} Nov 23 16:21:07 crc kubenswrapper[5050]: I1123 16:21:07.250574 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-69568d7b7c-xt7nq" event={"ID":"9b167679-73f1-44f8-bd64-f8172c3f8b1b","Type":"ContainerStarted","Data":"70fbcbe064c69b4e18459ecaae782ff443571e566d8eee4e77d3131383b54b20"} Nov 23 16:21:07 crc kubenswrapper[5050]: I1123 16:21:07.454117 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-bf4fdc45f-c4g79"] Nov 23 16:21:07 crc kubenswrapper[5050]: W1123 16:21:07.477592 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podde915556_22d5_4ea1_a24e_b872c1fdbdae.slice/crio-9e27fa55752d82d6a61c87526fc03615534b80de3a8c4b1bbb287f4f40f1f89a WatchSource:0}: Error finding container 9e27fa55752d82d6a61c87526fc03615534b80de3a8c4b1bbb287f4f40f1f89a: Status 404 returned error can't find the container with id 9e27fa55752d82d6a61c87526fc03615534b80de3a8c4b1bbb287f4f40f1f89a Nov 23 16:21:07 crc kubenswrapper[5050]: I1123 16:21:07.481485 5050 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 23 16:21:07 crc kubenswrapper[5050]: I1123 16:21:07.568082 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="469f11cd-455f-4c87-92fd-88a05570426a" path="/var/lib/kubelet/pods/469f11cd-455f-4c87-92fd-88a05570426a/volumes" Nov 23 16:21:07 crc kubenswrapper[5050]: I1123 16:21:07.569081 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e84edb55-d2c4-4f7d-bff8-e47b020fb8ae" path="/var/lib/kubelet/pods/e84edb55-d2c4-4f7d-bff8-e47b020fb8ae/volumes" Nov 23 16:21:08 crc kubenswrapper[5050]: I1123 16:21:08.264020 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qnbkg" podUID="5afd3b44-f6a7-4c3d-acde-51fe6da338a2" containerName="registry-server" containerID="cri-o://842b342ca8c3febc82b12e0082e5c6bdef58fa05dc1f46e77d4fad2ebd050376" gracePeriod=2 Nov 23 16:21:08 crc kubenswrapper[5050]: I1123 16:21:08.264184 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-bf4fdc45f-c4g79" event={"ID":"de915556-22d5-4ea1-a24e-b872c1fdbdae","Type":"ContainerStarted","Data":"9e27fa55752d82d6a61c87526fc03615534b80de3a8c4b1bbb287f4f40f1f89a"} Nov 23 16:21:08 crc kubenswrapper[5050]: I1123 16:21:08.840995 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qnbkg" Nov 23 16:21:08 crc kubenswrapper[5050]: I1123 16:21:08.945990 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5afd3b44-f6a7-4c3d-acde-51fe6da338a2-utilities\") pod \"5afd3b44-f6a7-4c3d-acde-51fe6da338a2\" (UID: \"5afd3b44-f6a7-4c3d-acde-51fe6da338a2\") " Nov 23 16:21:08 crc kubenswrapper[5050]: I1123 16:21:08.946038 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5afd3b44-f6a7-4c3d-acde-51fe6da338a2-catalog-content\") pod \"5afd3b44-f6a7-4c3d-acde-51fe6da338a2\" (UID: \"5afd3b44-f6a7-4c3d-acde-51fe6da338a2\") " Nov 23 16:21:08 crc kubenswrapper[5050]: I1123 16:21:08.946080 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-prq8j\" (UniqueName: \"kubernetes.io/projected/5afd3b44-f6a7-4c3d-acde-51fe6da338a2-kube-api-access-prq8j\") pod \"5afd3b44-f6a7-4c3d-acde-51fe6da338a2\" (UID: \"5afd3b44-f6a7-4c3d-acde-51fe6da338a2\") " Nov 23 16:21:08 crc kubenswrapper[5050]: I1123 16:21:08.948115 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5afd3b44-f6a7-4c3d-acde-51fe6da338a2-utilities" (OuterVolumeSpecName: "utilities") pod "5afd3b44-f6a7-4c3d-acde-51fe6da338a2" (UID: "5afd3b44-f6a7-4c3d-acde-51fe6da338a2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:21:08 crc kubenswrapper[5050]: I1123 16:21:08.953357 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5afd3b44-f6a7-4c3d-acde-51fe6da338a2-kube-api-access-prq8j" (OuterVolumeSpecName: "kube-api-access-prq8j") pod "5afd3b44-f6a7-4c3d-acde-51fe6da338a2" (UID: "5afd3b44-f6a7-4c3d-acde-51fe6da338a2"). InnerVolumeSpecName "kube-api-access-prq8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.015922 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5afd3b44-f6a7-4c3d-acde-51fe6da338a2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5afd3b44-f6a7-4c3d-acde-51fe6da338a2" (UID: "5afd3b44-f6a7-4c3d-acde-51fe6da338a2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.047974 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5afd3b44-f6a7-4c3d-acde-51fe6da338a2-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.048045 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5afd3b44-f6a7-4c3d-acde-51fe6da338a2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.048060 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-prq8j\" (UniqueName: \"kubernetes.io/projected/5afd3b44-f6a7-4c3d-acde-51fe6da338a2-kube-api-access-prq8j\") on node \"crc\" DevicePath \"\"" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.283241 5050 generic.go:334] "Generic (PLEG): container finished" podID="5afd3b44-f6a7-4c3d-acde-51fe6da338a2" containerID="842b342ca8c3febc82b12e0082e5c6bdef58fa05dc1f46e77d4fad2ebd050376" exitCode=0 Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.283355 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qnbkg" event={"ID":"5afd3b44-f6a7-4c3d-acde-51fe6da338a2","Type":"ContainerDied","Data":"842b342ca8c3febc82b12e0082e5c6bdef58fa05dc1f46e77d4fad2ebd050376"} Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.283398 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qnbkg" event={"ID":"5afd3b44-f6a7-4c3d-acde-51fe6da338a2","Type":"ContainerDied","Data":"7af992f883acd5c50281fb0269926299e3502d1339ce5ad9a86ce59ed0a71750"} Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.283426 5050 scope.go:117] "RemoveContainer" containerID="842b342ca8c3febc82b12e0082e5c6bdef58fa05dc1f46e77d4fad2ebd050376" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.283835 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qnbkg" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.292941 5050 generic.go:334] "Generic (PLEG): container finished" podID="dd6ddc23-bc16-45ee-adc2-4e354e707c2d" containerID="d224f6bcfdea9e3c42bcb919fd0586bca9651f6ed43080511d95e49dac735628" exitCode=0 Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.293006 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"dd6ddc23-bc16-45ee-adc2-4e354e707c2d","Type":"ContainerDied","Data":"d224f6bcfdea9e3c42bcb919fd0586bca9651f6ed43080511d95e49dac735628"} Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.329340 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qnbkg"] Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.344504 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qnbkg"] Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.370901 5050 scope.go:117] "RemoveContainer" containerID="06eeb66169a349a79bdefa5d2af36c8631843103d54a811997de71f01c5933e6" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.393392 5050 scope.go:117] "RemoveContainer" containerID="a1bd496c13586434e9a13f460cb1fed4b8d6065775128bed604945350d063b9b" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.444509 5050 scope.go:117] "RemoveContainer" containerID="2a51dc0cb25a783e9e597b4a83af703b56f008f54872206431b7a6b4d62b70c7" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.482912 5050 scope.go:117] "RemoveContainer" containerID="59878a85985966908c44e4a90b5d44cb40a08524c405feaac487f8a639db9856" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.573254 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5afd3b44-f6a7-4c3d-acde-51fe6da338a2" path="/var/lib/kubelet/pods/5afd3b44-f6a7-4c3d-acde-51fe6da338a2/volumes" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.588355 5050 scope.go:117] "RemoveContainer" containerID="86b0c0be72b5eaea73831a0e2efec4a0ab1c1f8be5e2cc93a1c23fa3a6e79f40" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.602743 5050 scope.go:117] "RemoveContainer" containerID="842b342ca8c3febc82b12e0082e5c6bdef58fa05dc1f46e77d4fad2ebd050376" Nov 23 16:21:09 crc kubenswrapper[5050]: E1123 16:21:09.603376 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"842b342ca8c3febc82b12e0082e5c6bdef58fa05dc1f46e77d4fad2ebd050376\": container with ID starting with 842b342ca8c3febc82b12e0082e5c6bdef58fa05dc1f46e77d4fad2ebd050376 not found: ID does not exist" containerID="842b342ca8c3febc82b12e0082e5c6bdef58fa05dc1f46e77d4fad2ebd050376" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.603431 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"842b342ca8c3febc82b12e0082e5c6bdef58fa05dc1f46e77d4fad2ebd050376"} err="failed to get container status \"842b342ca8c3febc82b12e0082e5c6bdef58fa05dc1f46e77d4fad2ebd050376\": rpc error: code = NotFound desc = could not find container \"842b342ca8c3febc82b12e0082e5c6bdef58fa05dc1f46e77d4fad2ebd050376\": container with ID starting with 842b342ca8c3febc82b12e0082e5c6bdef58fa05dc1f46e77d4fad2ebd050376 not found: ID does not exist" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.603488 5050 scope.go:117] "RemoveContainer" containerID="a1bd496c13586434e9a13f460cb1fed4b8d6065775128bed604945350d063b9b" Nov 23 16:21:09 crc kubenswrapper[5050]: E1123 16:21:09.603808 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1bd496c13586434e9a13f460cb1fed4b8d6065775128bed604945350d063b9b\": container with ID starting with a1bd496c13586434e9a13f460cb1fed4b8d6065775128bed604945350d063b9b not found: ID does not exist" containerID="a1bd496c13586434e9a13f460cb1fed4b8d6065775128bed604945350d063b9b" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.603837 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1bd496c13586434e9a13f460cb1fed4b8d6065775128bed604945350d063b9b"} err="failed to get container status \"a1bd496c13586434e9a13f460cb1fed4b8d6065775128bed604945350d063b9b\": rpc error: code = NotFound desc = could not find container \"a1bd496c13586434e9a13f460cb1fed4b8d6065775128bed604945350d063b9b\": container with ID starting with a1bd496c13586434e9a13f460cb1fed4b8d6065775128bed604945350d063b9b not found: ID does not exist" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.603852 5050 scope.go:117] "RemoveContainer" containerID="2a51dc0cb25a783e9e597b4a83af703b56f008f54872206431b7a6b4d62b70c7" Nov 23 16:21:09 crc kubenswrapper[5050]: E1123 16:21:09.604270 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2a51dc0cb25a783e9e597b4a83af703b56f008f54872206431b7a6b4d62b70c7\": container with ID starting with 2a51dc0cb25a783e9e597b4a83af703b56f008f54872206431b7a6b4d62b70c7 not found: ID does not exist" containerID="2a51dc0cb25a783e9e597b4a83af703b56f008f54872206431b7a6b4d62b70c7" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.604293 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a51dc0cb25a783e9e597b4a83af703b56f008f54872206431b7a6b4d62b70c7"} err="failed to get container status \"2a51dc0cb25a783e9e597b4a83af703b56f008f54872206431b7a6b4d62b70c7\": rpc error: code = NotFound desc = could not find container \"2a51dc0cb25a783e9e597b4a83af703b56f008f54872206431b7a6b4d62b70c7\": container with ID starting with 2a51dc0cb25a783e9e597b4a83af703b56f008f54872206431b7a6b4d62b70c7 not found: ID does not exist" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.621249 5050 scope.go:117] "RemoveContainer" containerID="1fa29c3e03df4598e827f87c1230956f3a237e7f5c51dc82b2df8ece9c184337" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.674237 5050 scope.go:117] "RemoveContainer" containerID="e3222612ce606cca9d830c9a71106e3a15f0f4ad7e1803fa600d8aea026e4915" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.708168 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.765110 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-combined-ca-bundle\") pod \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.765280 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-httpd-run\") pod \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.765350 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-scripts\") pod \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.765458 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-config-data\") pod \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.765528 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wtzrr\" (UniqueName: \"kubernetes.io/projected/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-kube-api-access-wtzrr\") pod \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.765549 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-ceph\") pod \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.765727 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-logs\") pod \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\" (UID: \"dd6ddc23-bc16-45ee-adc2-4e354e707c2d\") " Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.767614 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-logs" (OuterVolumeSpecName: "logs") pod "dd6ddc23-bc16-45ee-adc2-4e354e707c2d" (UID: "dd6ddc23-bc16-45ee-adc2-4e354e707c2d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.768377 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "dd6ddc23-bc16-45ee-adc2-4e354e707c2d" (UID: "dd6ddc23-bc16-45ee-adc2-4e354e707c2d"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.775650 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-kube-api-access-wtzrr" (OuterVolumeSpecName: "kube-api-access-wtzrr") pod "dd6ddc23-bc16-45ee-adc2-4e354e707c2d" (UID: "dd6ddc23-bc16-45ee-adc2-4e354e707c2d"). InnerVolumeSpecName "kube-api-access-wtzrr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.776981 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-scripts" (OuterVolumeSpecName: "scripts") pod "dd6ddc23-bc16-45ee-adc2-4e354e707c2d" (UID: "dd6ddc23-bc16-45ee-adc2-4e354e707c2d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.777994 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-ceph" (OuterVolumeSpecName: "ceph") pod "dd6ddc23-bc16-45ee-adc2-4e354e707c2d" (UID: "dd6ddc23-bc16-45ee-adc2-4e354e707c2d"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.880801 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-logs\") on node \"crc\" DevicePath \"\"" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.880846 5050 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.880931 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.880940 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wtzrr\" (UniqueName: \"kubernetes.io/projected/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-kube-api-access-wtzrr\") on node \"crc\" DevicePath \"\"" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.880969 5050 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-ceph\") on node \"crc\" DevicePath \"\"" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.899839 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dd6ddc23-bc16-45ee-adc2-4e354e707c2d" (UID: "dd6ddc23-bc16-45ee-adc2-4e354e707c2d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.917088 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-config-data" (OuterVolumeSpecName: "config-data") pod "dd6ddc23-bc16-45ee-adc2-4e354e707c2d" (UID: "dd6ddc23-bc16-45ee-adc2-4e354e707c2d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.983388 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:21:09 crc kubenswrapper[5050]: I1123 16:21:09.983423 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd6ddc23-bc16-45ee-adc2-4e354e707c2d-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.312418 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"dd6ddc23-bc16-45ee-adc2-4e354e707c2d","Type":"ContainerDied","Data":"4f8f883108c7994e47117d265eafb1e516c08f05f30f3178f6bddc87826fabfe"} Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.312495 5050 scope.go:117] "RemoveContainer" containerID="d224f6bcfdea9e3c42bcb919fd0586bca9651f6ed43080511d95e49dac735628" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.312636 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.319710 5050 generic.go:334] "Generic (PLEG): container finished" podID="54c7819b-2cfc-4c76-8777-ec8ca37e418e" containerID="44bfe94e167510c54c0a0ff25886435bcb8b77e0e097f04e95214b3e4cbc1175" exitCode=0 Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.319751 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"54c7819b-2cfc-4c76-8777-ec8ca37e418e","Type":"ContainerDied","Data":"44bfe94e167510c54c0a0ff25886435bcb8b77e0e097f04e95214b3e4cbc1175"} Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.367422 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.379662 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.391273 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 16:21:10 crc kubenswrapper[5050]: E1123 16:21:10.392085 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd6ddc23-bc16-45ee-adc2-4e354e707c2d" containerName="glance-httpd" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.392113 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd6ddc23-bc16-45ee-adc2-4e354e707c2d" containerName="glance-httpd" Nov 23 16:21:10 crc kubenswrapper[5050]: E1123 16:21:10.392134 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5afd3b44-f6a7-4c3d-acde-51fe6da338a2" containerName="registry-server" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.392142 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="5afd3b44-f6a7-4c3d-acde-51fe6da338a2" containerName="registry-server" Nov 23 16:21:10 crc kubenswrapper[5050]: E1123 16:21:10.392200 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd6ddc23-bc16-45ee-adc2-4e354e707c2d" containerName="glance-log" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.392211 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd6ddc23-bc16-45ee-adc2-4e354e707c2d" containerName="glance-log" Nov 23 16:21:10 crc kubenswrapper[5050]: E1123 16:21:10.392232 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5afd3b44-f6a7-4c3d-acde-51fe6da338a2" containerName="extract-content" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.392239 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="5afd3b44-f6a7-4c3d-acde-51fe6da338a2" containerName="extract-content" Nov 23 16:21:10 crc kubenswrapper[5050]: E1123 16:21:10.392274 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5afd3b44-f6a7-4c3d-acde-51fe6da338a2" containerName="extract-utilities" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.392282 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="5afd3b44-f6a7-4c3d-acde-51fe6da338a2" containerName="extract-utilities" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.392545 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd6ddc23-bc16-45ee-adc2-4e354e707c2d" containerName="glance-log" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.392571 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd6ddc23-bc16-45ee-adc2-4e354e707c2d" containerName="glance-httpd" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.392591 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="5afd3b44-f6a7-4c3d-acde-51fe6da338a2" containerName="registry-server" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.394285 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.399296 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.401122 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.500157 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/679ef9b6-a937-459d-86e7-125151f37744-config-data\") pod \"glance-default-internal-api-0\" (UID: \"679ef9b6-a937-459d-86e7-125151f37744\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.500240 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dfqd9\" (UniqueName: \"kubernetes.io/projected/679ef9b6-a937-459d-86e7-125151f37744-kube-api-access-dfqd9\") pod \"glance-default-internal-api-0\" (UID: \"679ef9b6-a937-459d-86e7-125151f37744\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.500417 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/679ef9b6-a937-459d-86e7-125151f37744-ceph\") pod \"glance-default-internal-api-0\" (UID: \"679ef9b6-a937-459d-86e7-125151f37744\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.500913 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/679ef9b6-a937-459d-86e7-125151f37744-scripts\") pod \"glance-default-internal-api-0\" (UID: \"679ef9b6-a937-459d-86e7-125151f37744\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.500964 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/679ef9b6-a937-459d-86e7-125151f37744-logs\") pod \"glance-default-internal-api-0\" (UID: \"679ef9b6-a937-459d-86e7-125151f37744\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.501177 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/679ef9b6-a937-459d-86e7-125151f37744-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"679ef9b6-a937-459d-86e7-125151f37744\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.501213 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/679ef9b6-a937-459d-86e7-125151f37744-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"679ef9b6-a937-459d-86e7-125151f37744\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.603905 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/679ef9b6-a937-459d-86e7-125151f37744-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"679ef9b6-a937-459d-86e7-125151f37744\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.604270 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/679ef9b6-a937-459d-86e7-125151f37744-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"679ef9b6-a937-459d-86e7-125151f37744\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.604488 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/679ef9b6-a937-459d-86e7-125151f37744-config-data\") pod \"glance-default-internal-api-0\" (UID: \"679ef9b6-a937-459d-86e7-125151f37744\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.604609 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/679ef9b6-a937-459d-86e7-125151f37744-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"679ef9b6-a937-459d-86e7-125151f37744\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.604627 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dfqd9\" (UniqueName: \"kubernetes.io/projected/679ef9b6-a937-459d-86e7-125151f37744-kube-api-access-dfqd9\") pod \"glance-default-internal-api-0\" (UID: \"679ef9b6-a937-459d-86e7-125151f37744\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.604808 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/679ef9b6-a937-459d-86e7-125151f37744-ceph\") pod \"glance-default-internal-api-0\" (UID: \"679ef9b6-a937-459d-86e7-125151f37744\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.605126 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/679ef9b6-a937-459d-86e7-125151f37744-scripts\") pod \"glance-default-internal-api-0\" (UID: \"679ef9b6-a937-459d-86e7-125151f37744\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.605211 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/679ef9b6-a937-459d-86e7-125151f37744-logs\") pod \"glance-default-internal-api-0\" (UID: \"679ef9b6-a937-459d-86e7-125151f37744\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.605650 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/679ef9b6-a937-459d-86e7-125151f37744-logs\") pod \"glance-default-internal-api-0\" (UID: \"679ef9b6-a937-459d-86e7-125151f37744\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.610316 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/679ef9b6-a937-459d-86e7-125151f37744-ceph\") pod \"glance-default-internal-api-0\" (UID: \"679ef9b6-a937-459d-86e7-125151f37744\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.617808 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/679ef9b6-a937-459d-86e7-125151f37744-scripts\") pod \"glance-default-internal-api-0\" (UID: \"679ef9b6-a937-459d-86e7-125151f37744\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.621308 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/679ef9b6-a937-459d-86e7-125151f37744-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"679ef9b6-a937-459d-86e7-125151f37744\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.621424 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dfqd9\" (UniqueName: \"kubernetes.io/projected/679ef9b6-a937-459d-86e7-125151f37744-kube-api-access-dfqd9\") pod \"glance-default-internal-api-0\" (UID: \"679ef9b6-a937-459d-86e7-125151f37744\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.622465 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/679ef9b6-a937-459d-86e7-125151f37744-config-data\") pod \"glance-default-internal-api-0\" (UID: \"679ef9b6-a937-459d-86e7-125151f37744\") " pod="openstack/glance-default-internal-api-0" Nov 23 16:21:10 crc kubenswrapper[5050]: I1123 16:21:10.783558 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 23 16:21:11 crc kubenswrapper[5050]: I1123 16:21:11.460245 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-zg4kx" Nov 23 16:21:11 crc kubenswrapper[5050]: I1123 16:21:11.515671 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-zg4kx" Nov 23 16:21:11 crc kubenswrapper[5050]: I1123 16:21:11.565043 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd6ddc23-bc16-45ee-adc2-4e354e707c2d" path="/var/lib/kubelet/pods/dd6ddc23-bc16-45ee-adc2-4e354e707c2d/volumes" Nov 23 16:21:12 crc kubenswrapper[5050]: I1123 16:21:12.587713 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zg4kx"] Nov 23 16:21:13 crc kubenswrapper[5050]: I1123 16:21:13.372961 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-zg4kx" podUID="3c3b1ea4-2747-47b6-8dad-97d13d1070b8" containerName="registry-server" containerID="cri-o://1fb179844c05a1dd01709e6b61e242844ae409ad1285ac83b34f6e28ab4ff246" gracePeriod=2 Nov 23 16:21:14 crc kubenswrapper[5050]: I1123 16:21:14.391967 5050 generic.go:334] "Generic (PLEG): container finished" podID="3c3b1ea4-2747-47b6-8dad-97d13d1070b8" containerID="1fb179844c05a1dd01709e6b61e242844ae409ad1285ac83b34f6e28ab4ff246" exitCode=0 Nov 23 16:21:14 crc kubenswrapper[5050]: I1123 16:21:14.392033 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zg4kx" event={"ID":"3c3b1ea4-2747-47b6-8dad-97d13d1070b8","Type":"ContainerDied","Data":"1fb179844c05a1dd01709e6b61e242844ae409ad1285ac83b34f6e28ab4ff246"} Nov 23 16:21:15 crc kubenswrapper[5050]: I1123 16:21:15.283395 5050 scope.go:117] "RemoveContainer" containerID="01a7d8a04e112b855020224eb1edacb1527bc653d06ca87278860e7afd6a3cac" Nov 23 16:21:15 crc kubenswrapper[5050]: I1123 16:21:15.363678 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 23 16:21:15 crc kubenswrapper[5050]: I1123 16:21:15.438394 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54c7819b-2cfc-4c76-8777-ec8ca37e418e-config-data\") pod \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " Nov 23 16:21:15 crc kubenswrapper[5050]: I1123 16:21:15.438507 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/54c7819b-2cfc-4c76-8777-ec8ca37e418e-httpd-run\") pod \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " Nov 23 16:21:15 crc kubenswrapper[5050]: I1123 16:21:15.438542 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2bffs\" (UniqueName: \"kubernetes.io/projected/54c7819b-2cfc-4c76-8777-ec8ca37e418e-kube-api-access-2bffs\") pod \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " Nov 23 16:21:15 crc kubenswrapper[5050]: I1123 16:21:15.438756 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/54c7819b-2cfc-4c76-8777-ec8ca37e418e-ceph\") pod \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " Nov 23 16:21:15 crc kubenswrapper[5050]: I1123 16:21:15.438845 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54c7819b-2cfc-4c76-8777-ec8ca37e418e-combined-ca-bundle\") pod \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " Nov 23 16:21:15 crc kubenswrapper[5050]: I1123 16:21:15.438869 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54c7819b-2cfc-4c76-8777-ec8ca37e418e-scripts\") pod \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " Nov 23 16:21:15 crc kubenswrapper[5050]: I1123 16:21:15.439031 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/54c7819b-2cfc-4c76-8777-ec8ca37e418e-logs\") pod \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " Nov 23 16:21:15 crc kubenswrapper[5050]: I1123 16:21:15.442043 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"54c7819b-2cfc-4c76-8777-ec8ca37e418e","Type":"ContainerDied","Data":"e32fb52b7342bd2da4178f8ee9f9b81bddff10c652ab4daea2f45296d2f026cc"} Nov 23 16:21:15 crc kubenswrapper[5050]: I1123 16:21:15.442340 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 23 16:21:15 crc kubenswrapper[5050]: I1123 16:21:15.445459 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/54c7819b-2cfc-4c76-8777-ec8ca37e418e-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "54c7819b-2cfc-4c76-8777-ec8ca37e418e" (UID: "54c7819b-2cfc-4c76-8777-ec8ca37e418e"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:21:15 crc kubenswrapper[5050]: I1123 16:21:15.452673 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54c7819b-2cfc-4c76-8777-ec8ca37e418e-kube-api-access-2bffs" (OuterVolumeSpecName: "kube-api-access-2bffs") pod "54c7819b-2cfc-4c76-8777-ec8ca37e418e" (UID: "54c7819b-2cfc-4c76-8777-ec8ca37e418e"). InnerVolumeSpecName "kube-api-access-2bffs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:21:15 crc kubenswrapper[5050]: I1123 16:21:15.460649 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/54c7819b-2cfc-4c76-8777-ec8ca37e418e-logs" (OuterVolumeSpecName: "logs") pod "54c7819b-2cfc-4c76-8777-ec8ca37e418e" (UID: "54c7819b-2cfc-4c76-8777-ec8ca37e418e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:21:15 crc kubenswrapper[5050]: I1123 16:21:15.463567 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54c7819b-2cfc-4c76-8777-ec8ca37e418e-scripts" (OuterVolumeSpecName: "scripts") pod "54c7819b-2cfc-4c76-8777-ec8ca37e418e" (UID: "54c7819b-2cfc-4c76-8777-ec8ca37e418e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:21:15 crc kubenswrapper[5050]: I1123 16:21:15.463759 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54c7819b-2cfc-4c76-8777-ec8ca37e418e-ceph" (OuterVolumeSpecName: "ceph") pod "54c7819b-2cfc-4c76-8777-ec8ca37e418e" (UID: "54c7819b-2cfc-4c76-8777-ec8ca37e418e"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:21:15 crc kubenswrapper[5050]: E1123 16:21:15.532495 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/54c7819b-2cfc-4c76-8777-ec8ca37e418e-combined-ca-bundle podName:54c7819b-2cfc-4c76-8777-ec8ca37e418e nodeName:}" failed. No retries permitted until 2025-11-23 16:21:16.032417183 +0000 UTC m=+5971.199413668 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "combined-ca-bundle" (UniqueName: "kubernetes.io/secret/54c7819b-2cfc-4c76-8777-ec8ca37e418e-combined-ca-bundle") pod "54c7819b-2cfc-4c76-8777-ec8ca37e418e" (UID: "54c7819b-2cfc-4c76-8777-ec8ca37e418e") : error deleting /var/lib/kubelet/pods/54c7819b-2cfc-4c76-8777-ec8ca37e418e/volume-subpaths: remove /var/lib/kubelet/pods/54c7819b-2cfc-4c76-8777-ec8ca37e418e/volume-subpaths: no such file or directory Nov 23 16:21:15 crc kubenswrapper[5050]: I1123 16:21:15.536653 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54c7819b-2cfc-4c76-8777-ec8ca37e418e-config-data" (OuterVolumeSpecName: "config-data") pod "54c7819b-2cfc-4c76-8777-ec8ca37e418e" (UID: "54c7819b-2cfc-4c76-8777-ec8ca37e418e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:21:15 crc kubenswrapper[5050]: I1123 16:21:15.541835 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54c7819b-2cfc-4c76-8777-ec8ca37e418e-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:21:15 crc kubenswrapper[5050]: I1123 16:21:15.541863 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/54c7819b-2cfc-4c76-8777-ec8ca37e418e-logs\") on node \"crc\" DevicePath \"\"" Nov 23 16:21:15 crc kubenswrapper[5050]: I1123 16:21:15.541886 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54c7819b-2cfc-4c76-8777-ec8ca37e418e-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:21:15 crc kubenswrapper[5050]: I1123 16:21:15.541896 5050 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/54c7819b-2cfc-4c76-8777-ec8ca37e418e-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 23 16:21:15 crc kubenswrapper[5050]: I1123 16:21:15.541906 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2bffs\" (UniqueName: \"kubernetes.io/projected/54c7819b-2cfc-4c76-8777-ec8ca37e418e-kube-api-access-2bffs\") on node \"crc\" DevicePath \"\"" Nov 23 16:21:15 crc kubenswrapper[5050]: I1123 16:21:15.541915 5050 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/54c7819b-2cfc-4c76-8777-ec8ca37e418e-ceph\") on node \"crc\" DevicePath \"\"" Nov 23 16:21:15 crc kubenswrapper[5050]: I1123 16:21:15.930619 5050 scope.go:117] "RemoveContainer" containerID="44bfe94e167510c54c0a0ff25886435bcb8b77e0e097f04e95214b3e4cbc1175" Nov 23 16:21:15 crc kubenswrapper[5050]: I1123 16:21:15.980398 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zg4kx" Nov 23 16:21:15 crc kubenswrapper[5050]: I1123 16:21:15.996060 5050 scope.go:117] "RemoveContainer" containerID="0123ddf672b0c9cd788e8ae216a52c37b57380502b85282f4f6d904a093eade8" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.055858 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54c7819b-2cfc-4c76-8777-ec8ca37e418e-combined-ca-bundle\") pod \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\" (UID: \"54c7819b-2cfc-4c76-8777-ec8ca37e418e\") " Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.061638 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54c7819b-2cfc-4c76-8777-ec8ca37e418e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "54c7819b-2cfc-4c76-8777-ec8ca37e418e" (UID: "54c7819b-2cfc-4c76-8777-ec8ca37e418e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.158239 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c3b1ea4-2747-47b6-8dad-97d13d1070b8-catalog-content\") pod \"3c3b1ea4-2747-47b6-8dad-97d13d1070b8\" (UID: \"3c3b1ea4-2747-47b6-8dad-97d13d1070b8\") " Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.158298 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c3b1ea4-2747-47b6-8dad-97d13d1070b8-utilities\") pod \"3c3b1ea4-2747-47b6-8dad-97d13d1070b8\" (UID: \"3c3b1ea4-2747-47b6-8dad-97d13d1070b8\") " Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.158386 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sr7qd\" (UniqueName: \"kubernetes.io/projected/3c3b1ea4-2747-47b6-8dad-97d13d1070b8-kube-api-access-sr7qd\") pod \"3c3b1ea4-2747-47b6-8dad-97d13d1070b8\" (UID: \"3c3b1ea4-2747-47b6-8dad-97d13d1070b8\") " Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.159047 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54c7819b-2cfc-4c76-8777-ec8ca37e418e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.159365 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c3b1ea4-2747-47b6-8dad-97d13d1070b8-utilities" (OuterVolumeSpecName: "utilities") pod "3c3b1ea4-2747-47b6-8dad-97d13d1070b8" (UID: "3c3b1ea4-2747-47b6-8dad-97d13d1070b8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.164798 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c3b1ea4-2747-47b6-8dad-97d13d1070b8-kube-api-access-sr7qd" (OuterVolumeSpecName: "kube-api-access-sr7qd") pod "3c3b1ea4-2747-47b6-8dad-97d13d1070b8" (UID: "3c3b1ea4-2747-47b6-8dad-97d13d1070b8"). InnerVolumeSpecName "kube-api-access-sr7qd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.260989 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c3b1ea4-2747-47b6-8dad-97d13d1070b8-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.261635 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sr7qd\" (UniqueName: \"kubernetes.io/projected/3c3b1ea4-2747-47b6-8dad-97d13d1070b8-kube-api-access-sr7qd\") on node \"crc\" DevicePath \"\"" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.286348 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c3b1ea4-2747-47b6-8dad-97d13d1070b8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3c3b1ea4-2747-47b6-8dad-97d13d1070b8" (UID: "3c3b1ea4-2747-47b6-8dad-97d13d1070b8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.364325 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c3b1ea4-2747-47b6-8dad-97d13d1070b8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.429458 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.444602 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.460203 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 16:21:16 crc kubenswrapper[5050]: E1123 16:21:16.461092 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54c7819b-2cfc-4c76-8777-ec8ca37e418e" containerName="glance-httpd" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.461123 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="54c7819b-2cfc-4c76-8777-ec8ca37e418e" containerName="glance-httpd" Nov 23 16:21:16 crc kubenswrapper[5050]: E1123 16:21:16.461145 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c3b1ea4-2747-47b6-8dad-97d13d1070b8" containerName="extract-content" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.461155 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c3b1ea4-2747-47b6-8dad-97d13d1070b8" containerName="extract-content" Nov 23 16:21:16 crc kubenswrapper[5050]: E1123 16:21:16.461189 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c3b1ea4-2747-47b6-8dad-97d13d1070b8" containerName="registry-server" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.461197 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c3b1ea4-2747-47b6-8dad-97d13d1070b8" containerName="registry-server" Nov 23 16:21:16 crc kubenswrapper[5050]: E1123 16:21:16.461211 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54c7819b-2cfc-4c76-8777-ec8ca37e418e" containerName="glance-log" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.461220 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="54c7819b-2cfc-4c76-8777-ec8ca37e418e" containerName="glance-log" Nov 23 16:21:16 crc kubenswrapper[5050]: E1123 16:21:16.461245 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c3b1ea4-2747-47b6-8dad-97d13d1070b8" containerName="extract-utilities" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.461256 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c3b1ea4-2747-47b6-8dad-97d13d1070b8" containerName="extract-utilities" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.461633 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="54c7819b-2cfc-4c76-8777-ec8ca37e418e" containerName="glance-log" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.461676 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="54c7819b-2cfc-4c76-8777-ec8ca37e418e" containerName="glance-httpd" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.461700 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c3b1ea4-2747-47b6-8dad-97d13d1070b8" containerName="registry-server" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.463584 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.474050 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.480404 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.496707 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zg4kx" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.497641 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zg4kx" event={"ID":"3c3b1ea4-2747-47b6-8dad-97d13d1070b8","Type":"ContainerDied","Data":"45fee6e7e130296234f20e924844febf20d8dd0dfabaeeec99d05132b3ae3fd5"} Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.497812 5050 scope.go:117] "RemoveContainer" containerID="1fb179844c05a1dd01709e6b61e242844ae409ad1285ac83b34f6e28ab4ff246" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.518695 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-bf4fdc45f-c4g79" event={"ID":"de915556-22d5-4ea1-a24e-b872c1fdbdae","Type":"ContainerStarted","Data":"3fb9d120d923a2f08457f57e1b459fa07470dbb98aab5e7227cebe4e0a6b5e14"} Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.525314 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68c8987657-cthvh" event={"ID":"4e1a52e2-909e-440a-b0ba-e4426cc19355","Type":"ContainerStarted","Data":"f22da02f9c1d7769eca2ddb8261145ac289031daea82f417adb2b7c59a44c44e"} Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.530172 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-69568d7b7c-xt7nq" event={"ID":"9b167679-73f1-44f8-bd64-f8172c3f8b1b","Type":"ContainerStarted","Data":"3f748f53d7f464e00bf2848d3bd1d832edb705f84d97a01c4f1be3e26d3bb99e"} Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.565781 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.570600 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6aadf9b9-c5a7-4938-8ba5-85d12ea935f0-scripts\") pod \"glance-default-external-api-0\" (UID: \"6aadf9b9-c5a7-4938-8ba5-85d12ea935f0\") " pod="openstack/glance-default-external-api-0" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.570770 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6aadf9b9-c5a7-4938-8ba5-85d12ea935f0-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6aadf9b9-c5a7-4938-8ba5-85d12ea935f0\") " pod="openstack/glance-default-external-api-0" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.571044 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6aadf9b9-c5a7-4938-8ba5-85d12ea935f0-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6aadf9b9-c5a7-4938-8ba5-85d12ea935f0\") " pod="openstack/glance-default-external-api-0" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.571288 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4tspg\" (UniqueName: \"kubernetes.io/projected/6aadf9b9-c5a7-4938-8ba5-85d12ea935f0-kube-api-access-4tspg\") pod \"glance-default-external-api-0\" (UID: \"6aadf9b9-c5a7-4938-8ba5-85d12ea935f0\") " pod="openstack/glance-default-external-api-0" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.571339 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6aadf9b9-c5a7-4938-8ba5-85d12ea935f0-config-data\") pod \"glance-default-external-api-0\" (UID: \"6aadf9b9-c5a7-4938-8ba5-85d12ea935f0\") " pod="openstack/glance-default-external-api-0" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.571559 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6aadf9b9-c5a7-4938-8ba5-85d12ea935f0-logs\") pod \"glance-default-external-api-0\" (UID: \"6aadf9b9-c5a7-4938-8ba5-85d12ea935f0\") " pod="openstack/glance-default-external-api-0" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.571627 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/6aadf9b9-c5a7-4938-8ba5-85d12ea935f0-ceph\") pod \"glance-default-external-api-0\" (UID: \"6aadf9b9-c5a7-4938-8ba5-85d12ea935f0\") " pod="openstack/glance-default-external-api-0" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.665433 5050 scope.go:117] "RemoveContainer" containerID="c1842dae52591ac085c42484f16c0751706c94adc76abb0f17a883e14f79d089" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.673552 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6aadf9b9-c5a7-4938-8ba5-85d12ea935f0-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6aadf9b9-c5a7-4938-8ba5-85d12ea935f0\") " pod="openstack/glance-default-external-api-0" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.673662 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6aadf9b9-c5a7-4938-8ba5-85d12ea935f0-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6aadf9b9-c5a7-4938-8ba5-85d12ea935f0\") " pod="openstack/glance-default-external-api-0" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.673741 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4tspg\" (UniqueName: \"kubernetes.io/projected/6aadf9b9-c5a7-4938-8ba5-85d12ea935f0-kube-api-access-4tspg\") pod \"glance-default-external-api-0\" (UID: \"6aadf9b9-c5a7-4938-8ba5-85d12ea935f0\") " pod="openstack/glance-default-external-api-0" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.673767 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6aadf9b9-c5a7-4938-8ba5-85d12ea935f0-config-data\") pod \"glance-default-external-api-0\" (UID: \"6aadf9b9-c5a7-4938-8ba5-85d12ea935f0\") " pod="openstack/glance-default-external-api-0" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.673830 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6aadf9b9-c5a7-4938-8ba5-85d12ea935f0-logs\") pod \"glance-default-external-api-0\" (UID: \"6aadf9b9-c5a7-4938-8ba5-85d12ea935f0\") " pod="openstack/glance-default-external-api-0" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.673862 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/6aadf9b9-c5a7-4938-8ba5-85d12ea935f0-ceph\") pod \"glance-default-external-api-0\" (UID: \"6aadf9b9-c5a7-4938-8ba5-85d12ea935f0\") " pod="openstack/glance-default-external-api-0" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.673930 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6aadf9b9-c5a7-4938-8ba5-85d12ea935f0-scripts\") pod \"glance-default-external-api-0\" (UID: \"6aadf9b9-c5a7-4938-8ba5-85d12ea935f0\") " pod="openstack/glance-default-external-api-0" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.674669 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6aadf9b9-c5a7-4938-8ba5-85d12ea935f0-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6aadf9b9-c5a7-4938-8ba5-85d12ea935f0\") " pod="openstack/glance-default-external-api-0" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.675673 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6aadf9b9-c5a7-4938-8ba5-85d12ea935f0-logs\") pod \"glance-default-external-api-0\" (UID: \"6aadf9b9-c5a7-4938-8ba5-85d12ea935f0\") " pod="openstack/glance-default-external-api-0" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.699862 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6aadf9b9-c5a7-4938-8ba5-85d12ea935f0-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6aadf9b9-c5a7-4938-8ba5-85d12ea935f0\") " pod="openstack/glance-default-external-api-0" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.705392 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6aadf9b9-c5a7-4938-8ba5-85d12ea935f0-scripts\") pod \"glance-default-external-api-0\" (UID: \"6aadf9b9-c5a7-4938-8ba5-85d12ea935f0\") " pod="openstack/glance-default-external-api-0" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.705432 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/6aadf9b9-c5a7-4938-8ba5-85d12ea935f0-ceph\") pod \"glance-default-external-api-0\" (UID: \"6aadf9b9-c5a7-4938-8ba5-85d12ea935f0\") " pod="openstack/glance-default-external-api-0" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.708690 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6aadf9b9-c5a7-4938-8ba5-85d12ea935f0-config-data\") pod \"glance-default-external-api-0\" (UID: \"6aadf9b9-c5a7-4938-8ba5-85d12ea935f0\") " pod="openstack/glance-default-external-api-0" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.711715 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4tspg\" (UniqueName: \"kubernetes.io/projected/6aadf9b9-c5a7-4938-8ba5-85d12ea935f0-kube-api-access-4tspg\") pod \"glance-default-external-api-0\" (UID: \"6aadf9b9-c5a7-4938-8ba5-85d12ea935f0\") " pod="openstack/glance-default-external-api-0" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.714550 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zg4kx"] Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.727973 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-zg4kx"] Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.737908 5050 scope.go:117] "RemoveContainer" containerID="99d561c3f529f9160eb3dacdb4b9b184168b264f60ab0722233e37d076351462" Nov 23 16:21:16 crc kubenswrapper[5050]: I1123 16:21:16.976679 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 23 16:21:17 crc kubenswrapper[5050]: I1123 16:21:17.039857 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-4gnmq"] Nov 23 16:21:17 crc kubenswrapper[5050]: I1123 16:21:17.060388 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-4gnmq"] Nov 23 16:21:17 crc kubenswrapper[5050]: I1123 16:21:17.564234 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05d7c58c-c46a-486e-9853-a880b565240c" path="/var/lib/kubelet/pods/05d7c58c-c46a-486e-9853-a880b565240c/volumes" Nov 23 16:21:17 crc kubenswrapper[5050]: I1123 16:21:17.565731 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c3b1ea4-2747-47b6-8dad-97d13d1070b8" path="/var/lib/kubelet/pods/3c3b1ea4-2747-47b6-8dad-97d13d1070b8/volumes" Nov 23 16:21:17 crc kubenswrapper[5050]: I1123 16:21:17.567031 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54c7819b-2cfc-4c76-8777-ec8ca37e418e" path="/var/lib/kubelet/pods/54c7819b-2cfc-4c76-8777-ec8ca37e418e/volumes" Nov 23 16:21:17 crc kubenswrapper[5050]: I1123 16:21:17.578183 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-69568d7b7c-xt7nq" event={"ID":"9b167679-73f1-44f8-bd64-f8172c3f8b1b","Type":"ContainerStarted","Data":"50a11349922f6836e32d8a74cf35526a9d967231e29201339170a81e3d564e88"} Nov 23 16:21:17 crc kubenswrapper[5050]: I1123 16:21:17.590576 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"679ef9b6-a937-459d-86e7-125151f37744","Type":"ContainerStarted","Data":"d08de61ea9c42f8edaf0b7eb538b5c22dcab40f339499f9cf66ac10d36acf1fa"} Nov 23 16:21:17 crc kubenswrapper[5050]: I1123 16:21:17.590663 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"679ef9b6-a937-459d-86e7-125151f37744","Type":"ContainerStarted","Data":"accb0df8ad15b9f6b5790b9ca53eb40e96b58ba54cfc0da7239e16c7614d25d9"} Nov 23 16:21:17 crc kubenswrapper[5050]: I1123 16:21:17.594425 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-bf4fdc45f-c4g79" event={"ID":"de915556-22d5-4ea1-a24e-b872c1fdbdae","Type":"ContainerStarted","Data":"9608d064b7f70771a47550972552fb2c792e46fb586df55e5f3770c7cdd92919"} Nov 23 16:21:17 crc kubenswrapper[5050]: I1123 16:21:17.597332 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 23 16:21:17 crc kubenswrapper[5050]: I1123 16:21:17.619285 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68c8987657-cthvh" event={"ID":"4e1a52e2-909e-440a-b0ba-e4426cc19355","Type":"ContainerStarted","Data":"91bfeb2a43022409b7b2833d25bfce9ef867d4185a8f84f80852ba670651ec7d"} Nov 23 16:21:17 crc kubenswrapper[5050]: I1123 16:21:17.619578 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-68c8987657-cthvh" podUID="4e1a52e2-909e-440a-b0ba-e4426cc19355" containerName="horizon-log" containerID="cri-o://f22da02f9c1d7769eca2ddb8261145ac289031daea82f417adb2b7c59a44c44e" gracePeriod=30 Nov 23 16:21:17 crc kubenswrapper[5050]: I1123 16:21:17.619881 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-68c8987657-cthvh" podUID="4e1a52e2-909e-440a-b0ba-e4426cc19355" containerName="horizon" containerID="cri-o://91bfeb2a43022409b7b2833d25bfce9ef867d4185a8f84f80852ba670651ec7d" gracePeriod=30 Nov 23 16:21:17 crc kubenswrapper[5050]: I1123 16:21:17.655575 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-69568d7b7c-xt7nq" podStartSLOduration=3.184032186 podStartE2EDuration="12.655541967s" podCreationTimestamp="2025-11-23 16:21:05 +0000 UTC" firstStartedPulling="2025-11-23 16:21:06.501842809 +0000 UTC m=+5961.668839294" lastFinishedPulling="2025-11-23 16:21:15.9733526 +0000 UTC m=+5971.140349075" observedRunningTime="2025-11-23 16:21:17.599967829 +0000 UTC m=+5972.766964334" watchObservedRunningTime="2025-11-23 16:21:17.655541967 +0000 UTC m=+5972.822538452" Nov 23 16:21:17 crc kubenswrapper[5050]: I1123 16:21:17.688289 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-bf4fdc45f-c4g79" podStartSLOduration=3.173528685 podStartE2EDuration="11.688258159s" podCreationTimestamp="2025-11-23 16:21:06 +0000 UTC" firstStartedPulling="2025-11-23 16:21:07.481165991 +0000 UTC m=+5962.648162476" lastFinishedPulling="2025-11-23 16:21:15.995895455 +0000 UTC m=+5971.162891950" observedRunningTime="2025-11-23 16:21:17.637527898 +0000 UTC m=+5972.804524383" watchObservedRunningTime="2025-11-23 16:21:17.688258159 +0000 UTC m=+5972.855254644" Nov 23 16:21:17 crc kubenswrapper[5050]: I1123 16:21:17.692314 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-68c8987657-cthvh" podStartSLOduration=3.479425697 podStartE2EDuration="12.692300043s" podCreationTimestamp="2025-11-23 16:21:05 +0000 UTC" firstStartedPulling="2025-11-23 16:21:06.719082876 +0000 UTC m=+5961.886079361" lastFinishedPulling="2025-11-23 16:21:15.931957212 +0000 UTC m=+5971.098953707" observedRunningTime="2025-11-23 16:21:17.671356462 +0000 UTC m=+5972.838352947" watchObservedRunningTime="2025-11-23 16:21:17.692300043 +0000 UTC m=+5972.859296528" Nov 23 16:21:18 crc kubenswrapper[5050]: I1123 16:21:18.659845 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6aadf9b9-c5a7-4938-8ba5-85d12ea935f0","Type":"ContainerStarted","Data":"bf90d1e9429a51e9886d5ac1f88bc5251be4afb03ddd4df640ca8e4f3973e786"} Nov 23 16:21:18 crc kubenswrapper[5050]: I1123 16:21:18.660291 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6aadf9b9-c5a7-4938-8ba5-85d12ea935f0","Type":"ContainerStarted","Data":"7762ad3b0e811c20c6f70b21643ca3661ca8d6863d445f764993840baf75921c"} Nov 23 16:21:18 crc kubenswrapper[5050]: I1123 16:21:18.663012 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"679ef9b6-a937-459d-86e7-125151f37744","Type":"ContainerStarted","Data":"ae08ef3e53a6a6df6621003de0cb265a8c18d95b223333ec59fd1d46c8ca3e18"} Nov 23 16:21:18 crc kubenswrapper[5050]: I1123 16:21:18.696708 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=8.696673193 podStartE2EDuration="8.696673193s" podCreationTimestamp="2025-11-23 16:21:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:21:18.688930954 +0000 UTC m=+5973.855927469" watchObservedRunningTime="2025-11-23 16:21:18.696673193 +0000 UTC m=+5973.863669678" Nov 23 16:21:19 crc kubenswrapper[5050]: I1123 16:21:19.678370 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6aadf9b9-c5a7-4938-8ba5-85d12ea935f0","Type":"ContainerStarted","Data":"60de9e2dbda900ec5aa6721f07830c61420764a7c51efa9f8fb64a52d3b6517a"} Nov 23 16:21:19 crc kubenswrapper[5050]: I1123 16:21:19.722754 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.722722003 podStartE2EDuration="3.722722003s" podCreationTimestamp="2025-11-23 16:21:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:21:19.700679121 +0000 UTC m=+5974.867675616" watchObservedRunningTime="2025-11-23 16:21:19.722722003 +0000 UTC m=+5974.889718518" Nov 23 16:21:20 crc kubenswrapper[5050]: I1123 16:21:20.783776 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 23 16:21:20 crc kubenswrapper[5050]: I1123 16:21:20.784346 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 23 16:21:20 crc kubenswrapper[5050]: I1123 16:21:20.829475 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 23 16:21:20 crc kubenswrapper[5050]: I1123 16:21:20.850836 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 23 16:21:21 crc kubenswrapper[5050]: I1123 16:21:21.714065 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 23 16:21:21 crc kubenswrapper[5050]: I1123 16:21:21.714602 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 23 16:21:25 crc kubenswrapper[5050]: I1123 16:21:25.042282 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 23 16:21:25 crc kubenswrapper[5050]: I1123 16:21:25.053241 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 23 16:21:25 crc kubenswrapper[5050]: I1123 16:21:25.972458 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-69568d7b7c-xt7nq" Nov 23 16:21:25 crc kubenswrapper[5050]: I1123 16:21:25.972963 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-69568d7b7c-xt7nq" Nov 23 16:21:26 crc kubenswrapper[5050]: I1123 16:21:26.138585 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-68c8987657-cthvh" Nov 23 16:21:26 crc kubenswrapper[5050]: I1123 16:21:26.895491 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-bf4fdc45f-c4g79" Nov 23 16:21:26 crc kubenswrapper[5050]: I1123 16:21:26.896073 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-bf4fdc45f-c4g79" Nov 23 16:21:26 crc kubenswrapper[5050]: I1123 16:21:26.898363 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-bf4fdc45f-c4g79" podUID="de915556-22d5-4ea1-a24e-b872c1fdbdae" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.114:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.114:8080: connect: connection refused" Nov 23 16:21:26 crc kubenswrapper[5050]: I1123 16:21:26.977690 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 23 16:21:26 crc kubenswrapper[5050]: I1123 16:21:26.977795 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 23 16:21:27 crc kubenswrapper[5050]: I1123 16:21:27.038874 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 23 16:21:27 crc kubenswrapper[5050]: I1123 16:21:27.049499 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 23 16:21:27 crc kubenswrapper[5050]: I1123 16:21:27.797613 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 23 16:21:27 crc kubenswrapper[5050]: I1123 16:21:27.797745 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 23 16:21:30 crc kubenswrapper[5050]: I1123 16:21:30.231598 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 23 16:21:30 crc kubenswrapper[5050]: I1123 16:21:30.232898 5050 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 23 16:21:30 crc kubenswrapper[5050]: I1123 16:21:30.244850 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 23 16:21:35 crc kubenswrapper[5050]: I1123 16:21:35.971938 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-69568d7b7c-xt7nq" podUID="9b167679-73f1-44f8-bd64-f8172c3f8b1b" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.112:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.112:8080: connect: connection refused" Nov 23 16:21:36 crc kubenswrapper[5050]: I1123 16:21:36.896340 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-bf4fdc45f-c4g79" podUID="de915556-22d5-4ea1-a24e-b872c1fdbdae" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.114:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.114:8080: connect: connection refused" Nov 23 16:21:44 crc kubenswrapper[5050]: I1123 16:21:44.310195 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="54c7819b-2cfc-4c76-8777-ec8ca37e418e" containerName="glance-log" probeResult="failure" output="Get \"http://10.217.1.43:9292/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 23 16:21:44 crc kubenswrapper[5050]: I1123 16:21:44.310301 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="54c7819b-2cfc-4c76-8777-ec8ca37e418e" containerName="glance-httpd" probeResult="failure" output="Get \"http://10.217.1.43:9292/healthcheck\": dial tcp 10.217.1.43:9292: i/o timeout (Client.Timeout exceeded while awaiting headers)" Nov 23 16:21:47 crc kubenswrapper[5050]: I1123 16:21:47.749998 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-69568d7b7c-xt7nq" Nov 23 16:21:48 crc kubenswrapper[5050]: I1123 16:21:48.079168 5050 generic.go:334] "Generic (PLEG): container finished" podID="4e1a52e2-909e-440a-b0ba-e4426cc19355" containerID="91bfeb2a43022409b7b2833d25bfce9ef867d4185a8f84f80852ba670651ec7d" exitCode=137 Nov 23 16:21:48 crc kubenswrapper[5050]: I1123 16:21:48.079212 5050 generic.go:334] "Generic (PLEG): container finished" podID="4e1a52e2-909e-440a-b0ba-e4426cc19355" containerID="f22da02f9c1d7769eca2ddb8261145ac289031daea82f417adb2b7c59a44c44e" exitCode=137 Nov 23 16:21:48 crc kubenswrapper[5050]: I1123 16:21:48.079242 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68c8987657-cthvh" event={"ID":"4e1a52e2-909e-440a-b0ba-e4426cc19355","Type":"ContainerDied","Data":"91bfeb2a43022409b7b2833d25bfce9ef867d4185a8f84f80852ba670651ec7d"} Nov 23 16:21:48 crc kubenswrapper[5050]: I1123 16:21:48.079275 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68c8987657-cthvh" event={"ID":"4e1a52e2-909e-440a-b0ba-e4426cc19355","Type":"ContainerDied","Data":"f22da02f9c1d7769eca2ddb8261145ac289031daea82f417adb2b7c59a44c44e"} Nov 23 16:21:48 crc kubenswrapper[5050]: I1123 16:21:48.596830 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68c8987657-cthvh" Nov 23 16:21:48 crc kubenswrapper[5050]: I1123 16:21:48.741740 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4e1a52e2-909e-440a-b0ba-e4426cc19355-config-data\") pod \"4e1a52e2-909e-440a-b0ba-e4426cc19355\" (UID: \"4e1a52e2-909e-440a-b0ba-e4426cc19355\") " Nov 23 16:21:48 crc kubenswrapper[5050]: I1123 16:21:48.741811 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4e1a52e2-909e-440a-b0ba-e4426cc19355-horizon-secret-key\") pod \"4e1a52e2-909e-440a-b0ba-e4426cc19355\" (UID: \"4e1a52e2-909e-440a-b0ba-e4426cc19355\") " Nov 23 16:21:48 crc kubenswrapper[5050]: I1123 16:21:48.742028 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4e1a52e2-909e-440a-b0ba-e4426cc19355-scripts\") pod \"4e1a52e2-909e-440a-b0ba-e4426cc19355\" (UID: \"4e1a52e2-909e-440a-b0ba-e4426cc19355\") " Nov 23 16:21:48 crc kubenswrapper[5050]: I1123 16:21:48.742152 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xtkl4\" (UniqueName: \"kubernetes.io/projected/4e1a52e2-909e-440a-b0ba-e4426cc19355-kube-api-access-xtkl4\") pod \"4e1a52e2-909e-440a-b0ba-e4426cc19355\" (UID: \"4e1a52e2-909e-440a-b0ba-e4426cc19355\") " Nov 23 16:21:48 crc kubenswrapper[5050]: I1123 16:21:48.742277 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4e1a52e2-909e-440a-b0ba-e4426cc19355-logs\") pod \"4e1a52e2-909e-440a-b0ba-e4426cc19355\" (UID: \"4e1a52e2-909e-440a-b0ba-e4426cc19355\") " Nov 23 16:21:48 crc kubenswrapper[5050]: I1123 16:21:48.743288 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e1a52e2-909e-440a-b0ba-e4426cc19355-logs" (OuterVolumeSpecName: "logs") pod "4e1a52e2-909e-440a-b0ba-e4426cc19355" (UID: "4e1a52e2-909e-440a-b0ba-e4426cc19355"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:21:48 crc kubenswrapper[5050]: I1123 16:21:48.752581 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e1a52e2-909e-440a-b0ba-e4426cc19355-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "4e1a52e2-909e-440a-b0ba-e4426cc19355" (UID: "4e1a52e2-909e-440a-b0ba-e4426cc19355"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:21:48 crc kubenswrapper[5050]: I1123 16:21:48.759246 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e1a52e2-909e-440a-b0ba-e4426cc19355-kube-api-access-xtkl4" (OuterVolumeSpecName: "kube-api-access-xtkl4") pod "4e1a52e2-909e-440a-b0ba-e4426cc19355" (UID: "4e1a52e2-909e-440a-b0ba-e4426cc19355"). InnerVolumeSpecName "kube-api-access-xtkl4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:21:48 crc kubenswrapper[5050]: I1123 16:21:48.776334 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e1a52e2-909e-440a-b0ba-e4426cc19355-config-data" (OuterVolumeSpecName: "config-data") pod "4e1a52e2-909e-440a-b0ba-e4426cc19355" (UID: "4e1a52e2-909e-440a-b0ba-e4426cc19355"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:21:48 crc kubenswrapper[5050]: I1123 16:21:48.781912 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e1a52e2-909e-440a-b0ba-e4426cc19355-scripts" (OuterVolumeSpecName: "scripts") pod "4e1a52e2-909e-440a-b0ba-e4426cc19355" (UID: "4e1a52e2-909e-440a-b0ba-e4426cc19355"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:21:48 crc kubenswrapper[5050]: I1123 16:21:48.820272 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-bf4fdc45f-c4g79" Nov 23 16:21:48 crc kubenswrapper[5050]: I1123 16:21:48.845948 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4e1a52e2-909e-440a-b0ba-e4426cc19355-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:21:48 crc kubenswrapper[5050]: I1123 16:21:48.845981 5050 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4e1a52e2-909e-440a-b0ba-e4426cc19355-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 23 16:21:48 crc kubenswrapper[5050]: I1123 16:21:48.845992 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4e1a52e2-909e-440a-b0ba-e4426cc19355-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:21:48 crc kubenswrapper[5050]: I1123 16:21:48.846002 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xtkl4\" (UniqueName: \"kubernetes.io/projected/4e1a52e2-909e-440a-b0ba-e4426cc19355-kube-api-access-xtkl4\") on node \"crc\" DevicePath \"\"" Nov 23 16:21:48 crc kubenswrapper[5050]: I1123 16:21:48.846011 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4e1a52e2-909e-440a-b0ba-e4426cc19355-logs\") on node \"crc\" DevicePath \"\"" Nov 23 16:21:49 crc kubenswrapper[5050]: I1123 16:21:49.093890 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68c8987657-cthvh" event={"ID":"4e1a52e2-909e-440a-b0ba-e4426cc19355","Type":"ContainerDied","Data":"551d17a70823ae1137383eeb48609be2d7ff83f9156482b2d8de45f44d3ee4ac"} Nov 23 16:21:49 crc kubenswrapper[5050]: I1123 16:21:49.094373 5050 scope.go:117] "RemoveContainer" containerID="91bfeb2a43022409b7b2833d25bfce9ef867d4185a8f84f80852ba670651ec7d" Nov 23 16:21:49 crc kubenswrapper[5050]: I1123 16:21:49.094011 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68c8987657-cthvh" Nov 23 16:21:49 crc kubenswrapper[5050]: I1123 16:21:49.141225 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-68c8987657-cthvh"] Nov 23 16:21:49 crc kubenswrapper[5050]: I1123 16:21:49.159212 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-68c8987657-cthvh"] Nov 23 16:21:49 crc kubenswrapper[5050]: I1123 16:21:49.285976 5050 scope.go:117] "RemoveContainer" containerID="f22da02f9c1d7769eca2ddb8261145ac289031daea82f417adb2b7c59a44c44e" Nov 23 16:21:49 crc kubenswrapper[5050]: I1123 16:21:49.567574 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e1a52e2-909e-440a-b0ba-e4426cc19355" path="/var/lib/kubelet/pods/4e1a52e2-909e-440a-b0ba-e4426cc19355/volumes" Nov 23 16:21:49 crc kubenswrapper[5050]: I1123 16:21:49.572342 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-69568d7b7c-xt7nq" Nov 23 16:21:50 crc kubenswrapper[5050]: I1123 16:21:50.531690 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-bf4fdc45f-c4g79" Nov 23 16:21:50 crc kubenswrapper[5050]: I1123 16:21:50.627197 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-69568d7b7c-xt7nq"] Nov 23 16:21:50 crc kubenswrapper[5050]: I1123 16:21:50.628021 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-69568d7b7c-xt7nq" podUID="9b167679-73f1-44f8-bd64-f8172c3f8b1b" containerName="horizon-log" containerID="cri-o://3f748f53d7f464e00bf2848d3bd1d832edb705f84d97a01c4f1be3e26d3bb99e" gracePeriod=30 Nov 23 16:21:50 crc kubenswrapper[5050]: I1123 16:21:50.628149 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-69568d7b7c-xt7nq" podUID="9b167679-73f1-44f8-bd64-f8172c3f8b1b" containerName="horizon" containerID="cri-o://50a11349922f6836e32d8a74cf35526a9d967231e29201339170a81e3d564e88" gracePeriod=30 Nov 23 16:21:54 crc kubenswrapper[5050]: I1123 16:21:54.173901 5050 generic.go:334] "Generic (PLEG): container finished" podID="9b167679-73f1-44f8-bd64-f8172c3f8b1b" containerID="50a11349922f6836e32d8a74cf35526a9d967231e29201339170a81e3d564e88" exitCode=0 Nov 23 16:21:54 crc kubenswrapper[5050]: I1123 16:21:54.174005 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-69568d7b7c-xt7nq" event={"ID":"9b167679-73f1-44f8-bd64-f8172c3f8b1b","Type":"ContainerDied","Data":"50a11349922f6836e32d8a74cf35526a9d967231e29201339170a81e3d564e88"} Nov 23 16:21:55 crc kubenswrapper[5050]: I1123 16:21:55.970000 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-69568d7b7c-xt7nq" podUID="9b167679-73f1-44f8-bd64-f8172c3f8b1b" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.112:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.112:8080: connect: connection refused" Nov 23 16:22:01 crc kubenswrapper[5050]: I1123 16:22:01.084949 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-d123-account-create-vrjwv"] Nov 23 16:22:01 crc kubenswrapper[5050]: I1123 16:22:01.097347 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-b5m5p"] Nov 23 16:22:01 crc kubenswrapper[5050]: I1123 16:22:01.109031 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-d123-account-create-vrjwv"] Nov 23 16:22:01 crc kubenswrapper[5050]: I1123 16:22:01.125064 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-b5m5p"] Nov 23 16:22:01 crc kubenswrapper[5050]: I1123 16:22:01.571166 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0702c0ea-259a-411a-994b-47da1140eea2" path="/var/lib/kubelet/pods/0702c0ea-259a-411a-994b-47da1140eea2/volumes" Nov 23 16:22:01 crc kubenswrapper[5050]: I1123 16:22:01.572432 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="571ac77e-9976-4957-a214-035ad47339d8" path="/var/lib/kubelet/pods/571ac77e-9976-4957-a214-035ad47339d8/volumes" Nov 23 16:22:05 crc kubenswrapper[5050]: I1123 16:22:05.969943 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-69568d7b7c-xt7nq" podUID="9b167679-73f1-44f8-bd64-f8172c3f8b1b" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.112:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.112:8080: connect: connection refused" Nov 23 16:22:09 crc kubenswrapper[5050]: I1123 16:22:09.058698 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-rwxg2"] Nov 23 16:22:09 crc kubenswrapper[5050]: I1123 16:22:09.072453 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-rwxg2"] Nov 23 16:22:09 crc kubenswrapper[5050]: I1123 16:22:09.562807 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94b5a9ae-4d31-4397-bec3-313a3012f2b8" path="/var/lib/kubelet/pods/94b5a9ae-4d31-4397-bec3-313a3012f2b8/volumes" Nov 23 16:22:15 crc kubenswrapper[5050]: I1123 16:22:15.971812 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-69568d7b7c-xt7nq" podUID="9b167679-73f1-44f8-bd64-f8172c3f8b1b" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.112:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.112:8080: connect: connection refused" Nov 23 16:22:15 crc kubenswrapper[5050]: I1123 16:22:15.972714 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-69568d7b7c-xt7nq" Nov 23 16:22:16 crc kubenswrapper[5050]: I1123 16:22:16.142797 5050 scope.go:117] "RemoveContainer" containerID="bc2658aa9d45357ecc17228dc0b9024a120340b6b5351df41f2c2e9536c261b5" Nov 23 16:22:16 crc kubenswrapper[5050]: I1123 16:22:16.202499 5050 scope.go:117] "RemoveContainer" containerID="f89498a27c03b95319123445e203cf9965a3db21fba29a4926e46a8623aeca3b" Nov 23 16:22:16 crc kubenswrapper[5050]: I1123 16:22:16.240508 5050 scope.go:117] "RemoveContainer" containerID="4eaf80a1bcd5f62c60185ecb37c51df8529829694a56d6dd5702eed631f63f9d" Nov 23 16:22:16 crc kubenswrapper[5050]: I1123 16:22:16.289893 5050 scope.go:117] "RemoveContainer" containerID="6acb5eb612d76d8b9edb1f3a7e88d2fd0a36a42b5a47f8b9f2c6bd3f1cd24b4c" Nov 23 16:22:16 crc kubenswrapper[5050]: I1123 16:22:16.346944 5050 scope.go:117] "RemoveContainer" containerID="c206774ae0b9813bda1fce04e9c5974f141ba16a926981b101f7a40459d9531d" Nov 23 16:22:16 crc kubenswrapper[5050]: I1123 16:22:16.373956 5050 scope.go:117] "RemoveContainer" containerID="4ea39488e0657df8a49260e7ffa852bf3494ba997b3337ca5f7daa319b480709" Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.158017 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-69568d7b7c-xt7nq" Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.243535 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9b167679-73f1-44f8-bd64-f8172c3f8b1b-horizon-secret-key\") pod \"9b167679-73f1-44f8-bd64-f8172c3f8b1b\" (UID: \"9b167679-73f1-44f8-bd64-f8172c3f8b1b\") " Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.243686 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bmvpx\" (UniqueName: \"kubernetes.io/projected/9b167679-73f1-44f8-bd64-f8172c3f8b1b-kube-api-access-bmvpx\") pod \"9b167679-73f1-44f8-bd64-f8172c3f8b1b\" (UID: \"9b167679-73f1-44f8-bd64-f8172c3f8b1b\") " Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.243750 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b167679-73f1-44f8-bd64-f8172c3f8b1b-logs\") pod \"9b167679-73f1-44f8-bd64-f8172c3f8b1b\" (UID: \"9b167679-73f1-44f8-bd64-f8172c3f8b1b\") " Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.243833 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9b167679-73f1-44f8-bd64-f8172c3f8b1b-config-data\") pod \"9b167679-73f1-44f8-bd64-f8172c3f8b1b\" (UID: \"9b167679-73f1-44f8-bd64-f8172c3f8b1b\") " Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.244273 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b167679-73f1-44f8-bd64-f8172c3f8b1b-logs" (OuterVolumeSpecName: "logs") pod "9b167679-73f1-44f8-bd64-f8172c3f8b1b" (UID: "9b167679-73f1-44f8-bd64-f8172c3f8b1b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.244658 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9b167679-73f1-44f8-bd64-f8172c3f8b1b-scripts\") pod \"9b167679-73f1-44f8-bd64-f8172c3f8b1b\" (UID: \"9b167679-73f1-44f8-bd64-f8172c3f8b1b\") " Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.245178 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b167679-73f1-44f8-bd64-f8172c3f8b1b-logs\") on node \"crc\" DevicePath \"\"" Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.263847 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b167679-73f1-44f8-bd64-f8172c3f8b1b-kube-api-access-bmvpx" (OuterVolumeSpecName: "kube-api-access-bmvpx") pod "9b167679-73f1-44f8-bd64-f8172c3f8b1b" (UID: "9b167679-73f1-44f8-bd64-f8172c3f8b1b"). InnerVolumeSpecName "kube-api-access-bmvpx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.265309 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b167679-73f1-44f8-bd64-f8172c3f8b1b-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "9b167679-73f1-44f8-bd64-f8172c3f8b1b" (UID: "9b167679-73f1-44f8-bd64-f8172c3f8b1b"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.278854 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b167679-73f1-44f8-bd64-f8172c3f8b1b-scripts" (OuterVolumeSpecName: "scripts") pod "9b167679-73f1-44f8-bd64-f8172c3f8b1b" (UID: "9b167679-73f1-44f8-bd64-f8172c3f8b1b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.282705 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b167679-73f1-44f8-bd64-f8172c3f8b1b-config-data" (OuterVolumeSpecName: "config-data") pod "9b167679-73f1-44f8-bd64-f8172c3f8b1b" (UID: "9b167679-73f1-44f8-bd64-f8172c3f8b1b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.348263 5050 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9b167679-73f1-44f8-bd64-f8172c3f8b1b-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.348666 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bmvpx\" (UniqueName: \"kubernetes.io/projected/9b167679-73f1-44f8-bd64-f8172c3f8b1b-kube-api-access-bmvpx\") on node \"crc\" DevicePath \"\"" Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.348899 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9b167679-73f1-44f8-bd64-f8172c3f8b1b-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.349085 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9b167679-73f1-44f8-bd64-f8172c3f8b1b-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.564321 5050 generic.go:334] "Generic (PLEG): container finished" podID="9b167679-73f1-44f8-bd64-f8172c3f8b1b" containerID="3f748f53d7f464e00bf2848d3bd1d832edb705f84d97a01c4f1be3e26d3bb99e" exitCode=137 Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.564380 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-69568d7b7c-xt7nq" event={"ID":"9b167679-73f1-44f8-bd64-f8172c3f8b1b","Type":"ContainerDied","Data":"3f748f53d7f464e00bf2848d3bd1d832edb705f84d97a01c4f1be3e26d3bb99e"} Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.564394 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-69568d7b7c-xt7nq" Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.564412 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-69568d7b7c-xt7nq" event={"ID":"9b167679-73f1-44f8-bd64-f8172c3f8b1b","Type":"ContainerDied","Data":"70fbcbe064c69b4e18459ecaae782ff443571e566d8eee4e77d3131383b54b20"} Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.564464 5050 scope.go:117] "RemoveContainer" containerID="50a11349922f6836e32d8a74cf35526a9d967231e29201339170a81e3d564e88" Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.618406 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-69568d7b7c-xt7nq"] Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.628843 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-69568d7b7c-xt7nq"] Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.836980 5050 scope.go:117] "RemoveContainer" containerID="3f748f53d7f464e00bf2848d3bd1d832edb705f84d97a01c4f1be3e26d3bb99e" Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.884564 5050 scope.go:117] "RemoveContainer" containerID="50a11349922f6836e32d8a74cf35526a9d967231e29201339170a81e3d564e88" Nov 23 16:22:21 crc kubenswrapper[5050]: E1123 16:22:21.885060 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"50a11349922f6836e32d8a74cf35526a9d967231e29201339170a81e3d564e88\": container with ID starting with 50a11349922f6836e32d8a74cf35526a9d967231e29201339170a81e3d564e88 not found: ID does not exist" containerID="50a11349922f6836e32d8a74cf35526a9d967231e29201339170a81e3d564e88" Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.885095 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"50a11349922f6836e32d8a74cf35526a9d967231e29201339170a81e3d564e88"} err="failed to get container status \"50a11349922f6836e32d8a74cf35526a9d967231e29201339170a81e3d564e88\": rpc error: code = NotFound desc = could not find container \"50a11349922f6836e32d8a74cf35526a9d967231e29201339170a81e3d564e88\": container with ID starting with 50a11349922f6836e32d8a74cf35526a9d967231e29201339170a81e3d564e88 not found: ID does not exist" Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.885119 5050 scope.go:117] "RemoveContainer" containerID="3f748f53d7f464e00bf2848d3bd1d832edb705f84d97a01c4f1be3e26d3bb99e" Nov 23 16:22:21 crc kubenswrapper[5050]: E1123 16:22:21.885521 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f748f53d7f464e00bf2848d3bd1d832edb705f84d97a01c4f1be3e26d3bb99e\": container with ID starting with 3f748f53d7f464e00bf2848d3bd1d832edb705f84d97a01c4f1be3e26d3bb99e not found: ID does not exist" containerID="3f748f53d7f464e00bf2848d3bd1d832edb705f84d97a01c4f1be3e26d3bb99e" Nov 23 16:22:21 crc kubenswrapper[5050]: I1123 16:22:21.885544 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f748f53d7f464e00bf2848d3bd1d832edb705f84d97a01c4f1be3e26d3bb99e"} err="failed to get container status \"3f748f53d7f464e00bf2848d3bd1d832edb705f84d97a01c4f1be3e26d3bb99e\": rpc error: code = NotFound desc = could not find container \"3f748f53d7f464e00bf2848d3bd1d832edb705f84d97a01c4f1be3e26d3bb99e\": container with ID starting with 3f748f53d7f464e00bf2848d3bd1d832edb705f84d97a01c4f1be3e26d3bb99e not found: ID does not exist" Nov 23 16:22:23 crc kubenswrapper[5050]: I1123 16:22:23.566396 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b167679-73f1-44f8-bd64-f8172c3f8b1b" path="/var/lib/kubelet/pods/9b167679-73f1-44f8-bd64-f8172c3f8b1b/volumes" Nov 23 16:22:28 crc kubenswrapper[5050]: I1123 16:22:28.958007 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-58696f4c7c-gzq55"] Nov 23 16:22:28 crc kubenswrapper[5050]: E1123 16:22:28.959120 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e1a52e2-909e-440a-b0ba-e4426cc19355" containerName="horizon" Nov 23 16:22:28 crc kubenswrapper[5050]: I1123 16:22:28.959135 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e1a52e2-909e-440a-b0ba-e4426cc19355" containerName="horizon" Nov 23 16:22:28 crc kubenswrapper[5050]: E1123 16:22:28.959148 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b167679-73f1-44f8-bd64-f8172c3f8b1b" containerName="horizon" Nov 23 16:22:28 crc kubenswrapper[5050]: I1123 16:22:28.959154 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b167679-73f1-44f8-bd64-f8172c3f8b1b" containerName="horizon" Nov 23 16:22:28 crc kubenswrapper[5050]: E1123 16:22:28.959176 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b167679-73f1-44f8-bd64-f8172c3f8b1b" containerName="horizon-log" Nov 23 16:22:28 crc kubenswrapper[5050]: I1123 16:22:28.959182 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b167679-73f1-44f8-bd64-f8172c3f8b1b" containerName="horizon-log" Nov 23 16:22:28 crc kubenswrapper[5050]: E1123 16:22:28.959209 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e1a52e2-909e-440a-b0ba-e4426cc19355" containerName="horizon-log" Nov 23 16:22:28 crc kubenswrapper[5050]: I1123 16:22:28.959216 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e1a52e2-909e-440a-b0ba-e4426cc19355" containerName="horizon-log" Nov 23 16:22:28 crc kubenswrapper[5050]: I1123 16:22:28.959420 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e1a52e2-909e-440a-b0ba-e4426cc19355" containerName="horizon" Nov 23 16:22:28 crc kubenswrapper[5050]: I1123 16:22:28.959458 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b167679-73f1-44f8-bd64-f8172c3f8b1b" containerName="horizon-log" Nov 23 16:22:28 crc kubenswrapper[5050]: I1123 16:22:28.959468 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b167679-73f1-44f8-bd64-f8172c3f8b1b" containerName="horizon" Nov 23 16:22:28 crc kubenswrapper[5050]: I1123 16:22:28.959476 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e1a52e2-909e-440a-b0ba-e4426cc19355" containerName="horizon-log" Nov 23 16:22:28 crc kubenswrapper[5050]: I1123 16:22:28.960549 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-58696f4c7c-gzq55" Nov 23 16:22:28 crc kubenswrapper[5050]: I1123 16:22:28.983151 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-58696f4c7c-gzq55"] Nov 23 16:22:29 crc kubenswrapper[5050]: I1123 16:22:29.043962 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/18576c60-9ba2-4cb0-9ae3-9e1d98a71811-config-data\") pod \"horizon-58696f4c7c-gzq55\" (UID: \"18576c60-9ba2-4cb0-9ae3-9e1d98a71811\") " pod="openstack/horizon-58696f4c7c-gzq55" Nov 23 16:22:29 crc kubenswrapper[5050]: I1123 16:22:29.044090 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/18576c60-9ba2-4cb0-9ae3-9e1d98a71811-scripts\") pod \"horizon-58696f4c7c-gzq55\" (UID: \"18576c60-9ba2-4cb0-9ae3-9e1d98a71811\") " pod="openstack/horizon-58696f4c7c-gzq55" Nov 23 16:22:29 crc kubenswrapper[5050]: I1123 16:22:29.044162 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/18576c60-9ba2-4cb0-9ae3-9e1d98a71811-horizon-secret-key\") pod \"horizon-58696f4c7c-gzq55\" (UID: \"18576c60-9ba2-4cb0-9ae3-9e1d98a71811\") " pod="openstack/horizon-58696f4c7c-gzq55" Nov 23 16:22:29 crc kubenswrapper[5050]: I1123 16:22:29.044213 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/18576c60-9ba2-4cb0-9ae3-9e1d98a71811-logs\") pod \"horizon-58696f4c7c-gzq55\" (UID: \"18576c60-9ba2-4cb0-9ae3-9e1d98a71811\") " pod="openstack/horizon-58696f4c7c-gzq55" Nov 23 16:22:29 crc kubenswrapper[5050]: I1123 16:22:29.044305 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4j7k\" (UniqueName: \"kubernetes.io/projected/18576c60-9ba2-4cb0-9ae3-9e1d98a71811-kube-api-access-x4j7k\") pod \"horizon-58696f4c7c-gzq55\" (UID: \"18576c60-9ba2-4cb0-9ae3-9e1d98a71811\") " pod="openstack/horizon-58696f4c7c-gzq55" Nov 23 16:22:29 crc kubenswrapper[5050]: I1123 16:22:29.146194 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/18576c60-9ba2-4cb0-9ae3-9e1d98a71811-config-data\") pod \"horizon-58696f4c7c-gzq55\" (UID: \"18576c60-9ba2-4cb0-9ae3-9e1d98a71811\") " pod="openstack/horizon-58696f4c7c-gzq55" Nov 23 16:22:29 crc kubenswrapper[5050]: I1123 16:22:29.146306 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/18576c60-9ba2-4cb0-9ae3-9e1d98a71811-scripts\") pod \"horizon-58696f4c7c-gzq55\" (UID: \"18576c60-9ba2-4cb0-9ae3-9e1d98a71811\") " pod="openstack/horizon-58696f4c7c-gzq55" Nov 23 16:22:29 crc kubenswrapper[5050]: I1123 16:22:29.146350 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/18576c60-9ba2-4cb0-9ae3-9e1d98a71811-horizon-secret-key\") pod \"horizon-58696f4c7c-gzq55\" (UID: \"18576c60-9ba2-4cb0-9ae3-9e1d98a71811\") " pod="openstack/horizon-58696f4c7c-gzq55" Nov 23 16:22:29 crc kubenswrapper[5050]: I1123 16:22:29.146380 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/18576c60-9ba2-4cb0-9ae3-9e1d98a71811-logs\") pod \"horizon-58696f4c7c-gzq55\" (UID: \"18576c60-9ba2-4cb0-9ae3-9e1d98a71811\") " pod="openstack/horizon-58696f4c7c-gzq55" Nov 23 16:22:29 crc kubenswrapper[5050]: I1123 16:22:29.146461 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4j7k\" (UniqueName: \"kubernetes.io/projected/18576c60-9ba2-4cb0-9ae3-9e1d98a71811-kube-api-access-x4j7k\") pod \"horizon-58696f4c7c-gzq55\" (UID: \"18576c60-9ba2-4cb0-9ae3-9e1d98a71811\") " pod="openstack/horizon-58696f4c7c-gzq55" Nov 23 16:22:29 crc kubenswrapper[5050]: I1123 16:22:29.147271 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/18576c60-9ba2-4cb0-9ae3-9e1d98a71811-logs\") pod \"horizon-58696f4c7c-gzq55\" (UID: \"18576c60-9ba2-4cb0-9ae3-9e1d98a71811\") " pod="openstack/horizon-58696f4c7c-gzq55" Nov 23 16:22:29 crc kubenswrapper[5050]: I1123 16:22:29.148079 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/18576c60-9ba2-4cb0-9ae3-9e1d98a71811-config-data\") pod \"horizon-58696f4c7c-gzq55\" (UID: \"18576c60-9ba2-4cb0-9ae3-9e1d98a71811\") " pod="openstack/horizon-58696f4c7c-gzq55" Nov 23 16:22:29 crc kubenswrapper[5050]: I1123 16:22:29.148123 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/18576c60-9ba2-4cb0-9ae3-9e1d98a71811-scripts\") pod \"horizon-58696f4c7c-gzq55\" (UID: \"18576c60-9ba2-4cb0-9ae3-9e1d98a71811\") " pod="openstack/horizon-58696f4c7c-gzq55" Nov 23 16:22:29 crc kubenswrapper[5050]: I1123 16:22:29.152695 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/18576c60-9ba2-4cb0-9ae3-9e1d98a71811-horizon-secret-key\") pod \"horizon-58696f4c7c-gzq55\" (UID: \"18576c60-9ba2-4cb0-9ae3-9e1d98a71811\") " pod="openstack/horizon-58696f4c7c-gzq55" Nov 23 16:22:29 crc kubenswrapper[5050]: I1123 16:22:29.167561 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4j7k\" (UniqueName: \"kubernetes.io/projected/18576c60-9ba2-4cb0-9ae3-9e1d98a71811-kube-api-access-x4j7k\") pod \"horizon-58696f4c7c-gzq55\" (UID: \"18576c60-9ba2-4cb0-9ae3-9e1d98a71811\") " pod="openstack/horizon-58696f4c7c-gzq55" Nov 23 16:22:29 crc kubenswrapper[5050]: I1123 16:22:29.224465 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:22:29 crc kubenswrapper[5050]: I1123 16:22:29.224524 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:22:29 crc kubenswrapper[5050]: I1123 16:22:29.292382 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-58696f4c7c-gzq55" Nov 23 16:22:29 crc kubenswrapper[5050]: I1123 16:22:29.813163 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-58696f4c7c-gzq55"] Nov 23 16:22:30 crc kubenswrapper[5050]: I1123 16:22:30.332519 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-l664c"] Nov 23 16:22:30 crc kubenswrapper[5050]: I1123 16:22:30.334126 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-l664c" Nov 23 16:22:30 crc kubenswrapper[5050]: I1123 16:22:30.351682 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-l664c"] Nov 23 16:22:30 crc kubenswrapper[5050]: I1123 16:22:30.432000 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-0d12-account-create-h9wr8"] Nov 23 16:22:30 crc kubenswrapper[5050]: I1123 16:22:30.434381 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-0d12-account-create-h9wr8" Nov 23 16:22:30 crc kubenswrapper[5050]: I1123 16:22:30.436736 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Nov 23 16:22:30 crc kubenswrapper[5050]: I1123 16:22:30.479517 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-0d12-account-create-h9wr8"] Nov 23 16:22:30 crc kubenswrapper[5050]: I1123 16:22:30.483257 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/34b6d8e9-a394-439a-95bd-ed5c9b073581-operator-scripts\") pod \"heat-db-create-l664c\" (UID: \"34b6d8e9-a394-439a-95bd-ed5c9b073581\") " pod="openstack/heat-db-create-l664c" Nov 23 16:22:30 crc kubenswrapper[5050]: I1123 16:22:30.483799 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r6jt6\" (UniqueName: \"kubernetes.io/projected/34b6d8e9-a394-439a-95bd-ed5c9b073581-kube-api-access-r6jt6\") pod \"heat-db-create-l664c\" (UID: \"34b6d8e9-a394-439a-95bd-ed5c9b073581\") " pod="openstack/heat-db-create-l664c" Nov 23 16:22:30 crc kubenswrapper[5050]: I1123 16:22:30.587381 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0-operator-scripts\") pod \"heat-0d12-account-create-h9wr8\" (UID: \"bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0\") " pod="openstack/heat-0d12-account-create-h9wr8" Nov 23 16:22:30 crc kubenswrapper[5050]: I1123 16:22:30.587729 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/34b6d8e9-a394-439a-95bd-ed5c9b073581-operator-scripts\") pod \"heat-db-create-l664c\" (UID: \"34b6d8e9-a394-439a-95bd-ed5c9b073581\") " pod="openstack/heat-db-create-l664c" Nov 23 16:22:30 crc kubenswrapper[5050]: I1123 16:22:30.588059 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r6jt6\" (UniqueName: \"kubernetes.io/projected/34b6d8e9-a394-439a-95bd-ed5c9b073581-kube-api-access-r6jt6\") pod \"heat-db-create-l664c\" (UID: \"34b6d8e9-a394-439a-95bd-ed5c9b073581\") " pod="openstack/heat-db-create-l664c" Nov 23 16:22:30 crc kubenswrapper[5050]: I1123 16:22:30.588113 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4g2hh\" (UniqueName: \"kubernetes.io/projected/bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0-kube-api-access-4g2hh\") pod \"heat-0d12-account-create-h9wr8\" (UID: \"bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0\") " pod="openstack/heat-0d12-account-create-h9wr8" Nov 23 16:22:30 crc kubenswrapper[5050]: I1123 16:22:30.589750 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/34b6d8e9-a394-439a-95bd-ed5c9b073581-operator-scripts\") pod \"heat-db-create-l664c\" (UID: \"34b6d8e9-a394-439a-95bd-ed5c9b073581\") " pod="openstack/heat-db-create-l664c" Nov 23 16:22:30 crc kubenswrapper[5050]: I1123 16:22:30.616672 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r6jt6\" (UniqueName: \"kubernetes.io/projected/34b6d8e9-a394-439a-95bd-ed5c9b073581-kube-api-access-r6jt6\") pod \"heat-db-create-l664c\" (UID: \"34b6d8e9-a394-439a-95bd-ed5c9b073581\") " pod="openstack/heat-db-create-l664c" Nov 23 16:22:30 crc kubenswrapper[5050]: I1123 16:22:30.653238 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-l664c" Nov 23 16:22:30 crc kubenswrapper[5050]: I1123 16:22:30.672747 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-58696f4c7c-gzq55" event={"ID":"18576c60-9ba2-4cb0-9ae3-9e1d98a71811","Type":"ContainerStarted","Data":"e241554ce08cf368762bf3e8c9a139fa71cf43315ccf83edb3cfbb74ce1b64b9"} Nov 23 16:22:30 crc kubenswrapper[5050]: I1123 16:22:30.672816 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-58696f4c7c-gzq55" event={"ID":"18576c60-9ba2-4cb0-9ae3-9e1d98a71811","Type":"ContainerStarted","Data":"4e571aa3b693fab40c7e2ff5e07fdf6c693cef5d2730554ad3138fc6df7ab543"} Nov 23 16:22:30 crc kubenswrapper[5050]: I1123 16:22:30.672828 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-58696f4c7c-gzq55" event={"ID":"18576c60-9ba2-4cb0-9ae3-9e1d98a71811","Type":"ContainerStarted","Data":"177433bf4858ab4c76ffde5c8728c31d33e79399e233c2ad9dc0dce678711539"} Nov 23 16:22:30 crc kubenswrapper[5050]: I1123 16:22:30.696080 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4g2hh\" (UniqueName: \"kubernetes.io/projected/bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0-kube-api-access-4g2hh\") pod \"heat-0d12-account-create-h9wr8\" (UID: \"bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0\") " pod="openstack/heat-0d12-account-create-h9wr8" Nov 23 16:22:30 crc kubenswrapper[5050]: I1123 16:22:30.696213 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0-operator-scripts\") pod \"heat-0d12-account-create-h9wr8\" (UID: \"bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0\") " pod="openstack/heat-0d12-account-create-h9wr8" Nov 23 16:22:30 crc kubenswrapper[5050]: I1123 16:22:30.699031 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0-operator-scripts\") pod \"heat-0d12-account-create-h9wr8\" (UID: \"bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0\") " pod="openstack/heat-0d12-account-create-h9wr8" Nov 23 16:22:30 crc kubenswrapper[5050]: I1123 16:22:30.715101 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-58696f4c7c-gzq55" podStartSLOduration=2.7150766219999998 podStartE2EDuration="2.715076622s" podCreationTimestamp="2025-11-23 16:22:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:22:30.689307106 +0000 UTC m=+6045.856303611" watchObservedRunningTime="2025-11-23 16:22:30.715076622 +0000 UTC m=+6045.882073117" Nov 23 16:22:30 crc kubenswrapper[5050]: I1123 16:22:30.721015 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4g2hh\" (UniqueName: \"kubernetes.io/projected/bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0-kube-api-access-4g2hh\") pod \"heat-0d12-account-create-h9wr8\" (UID: \"bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0\") " pod="openstack/heat-0d12-account-create-h9wr8" Nov 23 16:22:30 crc kubenswrapper[5050]: I1123 16:22:30.761810 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-0d12-account-create-h9wr8" Nov 23 16:22:31 crc kubenswrapper[5050]: I1123 16:22:31.070119 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-l664c"] Nov 23 16:22:31 crc kubenswrapper[5050]: I1123 16:22:31.334384 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-0d12-account-create-h9wr8"] Nov 23 16:22:31 crc kubenswrapper[5050]: I1123 16:22:31.741197 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-l664c" event={"ID":"34b6d8e9-a394-439a-95bd-ed5c9b073581","Type":"ContainerStarted","Data":"e7d757ef1206a8f367e725b8dbf9fea21ac64a4d36e84a55f3454a5c3edd0fa3"} Nov 23 16:22:31 crc kubenswrapper[5050]: I1123 16:22:31.741253 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-l664c" event={"ID":"34b6d8e9-a394-439a-95bd-ed5c9b073581","Type":"ContainerStarted","Data":"ac106cc72cf67a168151ad5202f2840c981cf0782c93a91146fab95d55610a28"} Nov 23 16:22:31 crc kubenswrapper[5050]: I1123 16:22:31.752774 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-0d12-account-create-h9wr8" event={"ID":"bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0","Type":"ContainerStarted","Data":"6c80117a9e017ec386d83f6ea689ee8b15abcadcc3f45495abd75afe1abc396d"} Nov 23 16:22:31 crc kubenswrapper[5050]: I1123 16:22:31.752825 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-0d12-account-create-h9wr8" event={"ID":"bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0","Type":"ContainerStarted","Data":"c8f757681e13d8d4d3adcd6f0ec875752b9a1fd569554d3fa5b957a819a0d3c9"} Nov 23 16:22:31 crc kubenswrapper[5050]: I1123 16:22:31.785842 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-0d12-account-create-h9wr8" podStartSLOduration=1.7858188940000002 podStartE2EDuration="1.785818894s" podCreationTimestamp="2025-11-23 16:22:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:22:31.780785172 +0000 UTC m=+6046.947781657" watchObservedRunningTime="2025-11-23 16:22:31.785818894 +0000 UTC m=+6046.952815379" Nov 23 16:22:32 crc kubenswrapper[5050]: I1123 16:22:32.765581 5050 generic.go:334] "Generic (PLEG): container finished" podID="34b6d8e9-a394-439a-95bd-ed5c9b073581" containerID="e7d757ef1206a8f367e725b8dbf9fea21ac64a4d36e84a55f3454a5c3edd0fa3" exitCode=0 Nov 23 16:22:32 crc kubenswrapper[5050]: I1123 16:22:32.766574 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-l664c" event={"ID":"34b6d8e9-a394-439a-95bd-ed5c9b073581","Type":"ContainerDied","Data":"e7d757ef1206a8f367e725b8dbf9fea21ac64a4d36e84a55f3454a5c3edd0fa3"} Nov 23 16:22:32 crc kubenswrapper[5050]: I1123 16:22:32.769764 5050 generic.go:334] "Generic (PLEG): container finished" podID="bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0" containerID="6c80117a9e017ec386d83f6ea689ee8b15abcadcc3f45495abd75afe1abc396d" exitCode=0 Nov 23 16:22:32 crc kubenswrapper[5050]: I1123 16:22:32.769808 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-0d12-account-create-h9wr8" event={"ID":"bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0","Type":"ContainerDied","Data":"6c80117a9e017ec386d83f6ea689ee8b15abcadcc3f45495abd75afe1abc396d"} Nov 23 16:22:33 crc kubenswrapper[5050]: I1123 16:22:33.227803 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-l664c" Nov 23 16:22:33 crc kubenswrapper[5050]: I1123 16:22:33.276472 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/34b6d8e9-a394-439a-95bd-ed5c9b073581-operator-scripts\") pod \"34b6d8e9-a394-439a-95bd-ed5c9b073581\" (UID: \"34b6d8e9-a394-439a-95bd-ed5c9b073581\") " Nov 23 16:22:33 crc kubenswrapper[5050]: I1123 16:22:33.276682 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r6jt6\" (UniqueName: \"kubernetes.io/projected/34b6d8e9-a394-439a-95bd-ed5c9b073581-kube-api-access-r6jt6\") pod \"34b6d8e9-a394-439a-95bd-ed5c9b073581\" (UID: \"34b6d8e9-a394-439a-95bd-ed5c9b073581\") " Nov 23 16:22:33 crc kubenswrapper[5050]: I1123 16:22:33.277941 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34b6d8e9-a394-439a-95bd-ed5c9b073581-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "34b6d8e9-a394-439a-95bd-ed5c9b073581" (UID: "34b6d8e9-a394-439a-95bd-ed5c9b073581"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:22:33 crc kubenswrapper[5050]: I1123 16:22:33.278497 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/34b6d8e9-a394-439a-95bd-ed5c9b073581-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:22:33 crc kubenswrapper[5050]: I1123 16:22:33.286164 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34b6d8e9-a394-439a-95bd-ed5c9b073581-kube-api-access-r6jt6" (OuterVolumeSpecName: "kube-api-access-r6jt6") pod "34b6d8e9-a394-439a-95bd-ed5c9b073581" (UID: "34b6d8e9-a394-439a-95bd-ed5c9b073581"). InnerVolumeSpecName "kube-api-access-r6jt6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:22:33 crc kubenswrapper[5050]: I1123 16:22:33.380428 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r6jt6\" (UniqueName: \"kubernetes.io/projected/34b6d8e9-a394-439a-95bd-ed5c9b073581-kube-api-access-r6jt6\") on node \"crc\" DevicePath \"\"" Nov 23 16:22:33 crc kubenswrapper[5050]: I1123 16:22:33.780074 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-l664c" Nov 23 16:22:33 crc kubenswrapper[5050]: I1123 16:22:33.781417 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-l664c" event={"ID":"34b6d8e9-a394-439a-95bd-ed5c9b073581","Type":"ContainerDied","Data":"ac106cc72cf67a168151ad5202f2840c981cf0782c93a91146fab95d55610a28"} Nov 23 16:22:33 crc kubenswrapper[5050]: I1123 16:22:33.781487 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ac106cc72cf67a168151ad5202f2840c981cf0782c93a91146fab95d55610a28" Nov 23 16:22:34 crc kubenswrapper[5050]: I1123 16:22:34.251485 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-0d12-account-create-h9wr8" Nov 23 16:22:34 crc kubenswrapper[5050]: I1123 16:22:34.327375 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4g2hh\" (UniqueName: \"kubernetes.io/projected/bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0-kube-api-access-4g2hh\") pod \"bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0\" (UID: \"bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0\") " Nov 23 16:22:34 crc kubenswrapper[5050]: I1123 16:22:34.327468 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0-operator-scripts\") pod \"bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0\" (UID: \"bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0\") " Nov 23 16:22:34 crc kubenswrapper[5050]: I1123 16:22:34.328363 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0" (UID: "bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:22:34 crc kubenswrapper[5050]: I1123 16:22:34.340253 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0-kube-api-access-4g2hh" (OuterVolumeSpecName: "kube-api-access-4g2hh") pod "bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0" (UID: "bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0"). InnerVolumeSpecName "kube-api-access-4g2hh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:22:34 crc kubenswrapper[5050]: I1123 16:22:34.430312 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4g2hh\" (UniqueName: \"kubernetes.io/projected/bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0-kube-api-access-4g2hh\") on node \"crc\" DevicePath \"\"" Nov 23 16:22:34 crc kubenswrapper[5050]: I1123 16:22:34.430757 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:22:34 crc kubenswrapper[5050]: I1123 16:22:34.797950 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-0d12-account-create-h9wr8" event={"ID":"bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0","Type":"ContainerDied","Data":"c8f757681e13d8d4d3adcd6f0ec875752b9a1fd569554d3fa5b957a819a0d3c9"} Nov 23 16:22:34 crc kubenswrapper[5050]: I1123 16:22:34.798012 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c8f757681e13d8d4d3adcd6f0ec875752b9a1fd569554d3fa5b957a819a0d3c9" Nov 23 16:22:34 crc kubenswrapper[5050]: I1123 16:22:34.798096 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-0d12-account-create-h9wr8" Nov 23 16:22:35 crc kubenswrapper[5050]: I1123 16:22:35.641313 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-qpgwc"] Nov 23 16:22:35 crc kubenswrapper[5050]: E1123 16:22:35.642509 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0" containerName="mariadb-account-create" Nov 23 16:22:35 crc kubenswrapper[5050]: I1123 16:22:35.642534 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0" containerName="mariadb-account-create" Nov 23 16:22:35 crc kubenswrapper[5050]: E1123 16:22:35.642605 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34b6d8e9-a394-439a-95bd-ed5c9b073581" containerName="mariadb-database-create" Nov 23 16:22:35 crc kubenswrapper[5050]: I1123 16:22:35.642617 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="34b6d8e9-a394-439a-95bd-ed5c9b073581" containerName="mariadb-database-create" Nov 23 16:22:35 crc kubenswrapper[5050]: I1123 16:22:35.642964 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="34b6d8e9-a394-439a-95bd-ed5c9b073581" containerName="mariadb-database-create" Nov 23 16:22:35 crc kubenswrapper[5050]: I1123 16:22:35.643013 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0" containerName="mariadb-account-create" Nov 23 16:22:35 crc kubenswrapper[5050]: I1123 16:22:35.643941 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-qpgwc" Nov 23 16:22:35 crc kubenswrapper[5050]: I1123 16:22:35.648935 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 23 16:22:35 crc kubenswrapper[5050]: I1123 16:22:35.648930 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-sg7zm" Nov 23 16:22:35 crc kubenswrapper[5050]: I1123 16:22:35.677974 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-qpgwc"] Nov 23 16:22:35 crc kubenswrapper[5050]: I1123 16:22:35.771700 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caa63d8d-4035-4c91-8a74-64b692dd7838-config-data\") pod \"heat-db-sync-qpgwc\" (UID: \"caa63d8d-4035-4c91-8a74-64b692dd7838\") " pod="openstack/heat-db-sync-qpgwc" Nov 23 16:22:35 crc kubenswrapper[5050]: I1123 16:22:35.771776 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caa63d8d-4035-4c91-8a74-64b692dd7838-combined-ca-bundle\") pod \"heat-db-sync-qpgwc\" (UID: \"caa63d8d-4035-4c91-8a74-64b692dd7838\") " pod="openstack/heat-db-sync-qpgwc" Nov 23 16:22:35 crc kubenswrapper[5050]: I1123 16:22:35.771804 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-psqzk\" (UniqueName: \"kubernetes.io/projected/caa63d8d-4035-4c91-8a74-64b692dd7838-kube-api-access-psqzk\") pod \"heat-db-sync-qpgwc\" (UID: \"caa63d8d-4035-4c91-8a74-64b692dd7838\") " pod="openstack/heat-db-sync-qpgwc" Nov 23 16:22:35 crc kubenswrapper[5050]: I1123 16:22:35.874687 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caa63d8d-4035-4c91-8a74-64b692dd7838-config-data\") pod \"heat-db-sync-qpgwc\" (UID: \"caa63d8d-4035-4c91-8a74-64b692dd7838\") " pod="openstack/heat-db-sync-qpgwc" Nov 23 16:22:35 crc kubenswrapper[5050]: I1123 16:22:35.874798 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caa63d8d-4035-4c91-8a74-64b692dd7838-combined-ca-bundle\") pod \"heat-db-sync-qpgwc\" (UID: \"caa63d8d-4035-4c91-8a74-64b692dd7838\") " pod="openstack/heat-db-sync-qpgwc" Nov 23 16:22:35 crc kubenswrapper[5050]: I1123 16:22:35.874826 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-psqzk\" (UniqueName: \"kubernetes.io/projected/caa63d8d-4035-4c91-8a74-64b692dd7838-kube-api-access-psqzk\") pod \"heat-db-sync-qpgwc\" (UID: \"caa63d8d-4035-4c91-8a74-64b692dd7838\") " pod="openstack/heat-db-sync-qpgwc" Nov 23 16:22:35 crc kubenswrapper[5050]: I1123 16:22:35.891078 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caa63d8d-4035-4c91-8a74-64b692dd7838-config-data\") pod \"heat-db-sync-qpgwc\" (UID: \"caa63d8d-4035-4c91-8a74-64b692dd7838\") " pod="openstack/heat-db-sync-qpgwc" Nov 23 16:22:35 crc kubenswrapper[5050]: I1123 16:22:35.892967 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caa63d8d-4035-4c91-8a74-64b692dd7838-combined-ca-bundle\") pod \"heat-db-sync-qpgwc\" (UID: \"caa63d8d-4035-4c91-8a74-64b692dd7838\") " pod="openstack/heat-db-sync-qpgwc" Nov 23 16:22:35 crc kubenswrapper[5050]: I1123 16:22:35.896305 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-psqzk\" (UniqueName: \"kubernetes.io/projected/caa63d8d-4035-4c91-8a74-64b692dd7838-kube-api-access-psqzk\") pod \"heat-db-sync-qpgwc\" (UID: \"caa63d8d-4035-4c91-8a74-64b692dd7838\") " pod="openstack/heat-db-sync-qpgwc" Nov 23 16:22:35 crc kubenswrapper[5050]: I1123 16:22:35.980486 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-qpgwc" Nov 23 16:22:36 crc kubenswrapper[5050]: I1123 16:22:36.588911 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-qpgwc"] Nov 23 16:22:36 crc kubenswrapper[5050]: I1123 16:22:36.820202 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-qpgwc" event={"ID":"caa63d8d-4035-4c91-8a74-64b692dd7838","Type":"ContainerStarted","Data":"fda79a6e95d56864b2dff74a01601085249fe0481354a584053d3d8b5592a6a9"} Nov 23 16:22:39 crc kubenswrapper[5050]: I1123 16:22:39.293365 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-58696f4c7c-gzq55" Nov 23 16:22:39 crc kubenswrapper[5050]: I1123 16:22:39.294731 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-58696f4c7c-gzq55" Nov 23 16:22:43 crc kubenswrapper[5050]: I1123 16:22:43.081613 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-28bd-account-create-jrbzz"] Nov 23 16:22:43 crc kubenswrapper[5050]: I1123 16:22:43.087986 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-28bd-account-create-jrbzz"] Nov 23 16:22:43 crc kubenswrapper[5050]: I1123 16:22:43.100617 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-4rkt4"] Nov 23 16:22:43 crc kubenswrapper[5050]: I1123 16:22:43.110582 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-4rkt4"] Nov 23 16:22:43 crc kubenswrapper[5050]: I1123 16:22:43.561794 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6bfb09d3-730b-4a95-ac71-574889caabe0" path="/var/lib/kubelet/pods/6bfb09d3-730b-4a95-ac71-574889caabe0/volumes" Nov 23 16:22:43 crc kubenswrapper[5050]: I1123 16:22:43.562718 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1d2c6c7-ea81-4348-a921-8ab8bf07bd55" path="/var/lib/kubelet/pods/d1d2c6c7-ea81-4348-a921-8ab8bf07bd55/volumes" Nov 23 16:22:46 crc kubenswrapper[5050]: I1123 16:22:46.936103 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-qpgwc" event={"ID":"caa63d8d-4035-4c91-8a74-64b692dd7838","Type":"ContainerStarted","Data":"5ee80f98bb9be001a5ed99e866bff219ebe3c0d4127de00bea8006af0b706e9a"} Nov 23 16:22:46 crc kubenswrapper[5050]: I1123 16:22:46.973726 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-qpgwc" podStartSLOduration=2.575173307 podStartE2EDuration="11.973694348s" podCreationTimestamp="2025-11-23 16:22:35 +0000 UTC" firstStartedPulling="2025-11-23 16:22:36.578363301 +0000 UTC m=+6051.745359786" lastFinishedPulling="2025-11-23 16:22:45.976884342 +0000 UTC m=+6061.143880827" observedRunningTime="2025-11-23 16:22:46.961612928 +0000 UTC m=+6062.128609433" watchObservedRunningTime="2025-11-23 16:22:46.973694348 +0000 UTC m=+6062.140690843" Nov 23 16:22:48 crc kubenswrapper[5050]: I1123 16:22:48.967100 5050 generic.go:334] "Generic (PLEG): container finished" podID="caa63d8d-4035-4c91-8a74-64b692dd7838" containerID="5ee80f98bb9be001a5ed99e866bff219ebe3c0d4127de00bea8006af0b706e9a" exitCode=0 Nov 23 16:22:48 crc kubenswrapper[5050]: I1123 16:22:48.968074 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-qpgwc" event={"ID":"caa63d8d-4035-4c91-8a74-64b692dd7838","Type":"ContainerDied","Data":"5ee80f98bb9be001a5ed99e866bff219ebe3c0d4127de00bea8006af0b706e9a"} Nov 23 16:22:49 crc kubenswrapper[5050]: I1123 16:22:49.053304 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-fz8ng"] Nov 23 16:22:49 crc kubenswrapper[5050]: I1123 16:22:49.065229 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-fz8ng"] Nov 23 16:22:49 crc kubenswrapper[5050]: I1123 16:22:49.295603 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-58696f4c7c-gzq55" podUID="18576c60-9ba2-4cb0-9ae3-9e1d98a71811" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.117:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.117:8080: connect: connection refused" Nov 23 16:22:49 crc kubenswrapper[5050]: I1123 16:22:49.569715 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="248de275-6c68-4a0d-934d-af45b35a9320" path="/var/lib/kubelet/pods/248de275-6c68-4a0d-934d-af45b35a9320/volumes" Nov 23 16:22:50 crc kubenswrapper[5050]: I1123 16:22:50.396219 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-qpgwc" Nov 23 16:22:50 crc kubenswrapper[5050]: I1123 16:22:50.490203 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-psqzk\" (UniqueName: \"kubernetes.io/projected/caa63d8d-4035-4c91-8a74-64b692dd7838-kube-api-access-psqzk\") pod \"caa63d8d-4035-4c91-8a74-64b692dd7838\" (UID: \"caa63d8d-4035-4c91-8a74-64b692dd7838\") " Nov 23 16:22:50 crc kubenswrapper[5050]: I1123 16:22:50.490384 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caa63d8d-4035-4c91-8a74-64b692dd7838-combined-ca-bundle\") pod \"caa63d8d-4035-4c91-8a74-64b692dd7838\" (UID: \"caa63d8d-4035-4c91-8a74-64b692dd7838\") " Nov 23 16:22:50 crc kubenswrapper[5050]: I1123 16:22:50.490560 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caa63d8d-4035-4c91-8a74-64b692dd7838-config-data\") pod \"caa63d8d-4035-4c91-8a74-64b692dd7838\" (UID: \"caa63d8d-4035-4c91-8a74-64b692dd7838\") " Nov 23 16:22:50 crc kubenswrapper[5050]: I1123 16:22:50.498759 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/caa63d8d-4035-4c91-8a74-64b692dd7838-kube-api-access-psqzk" (OuterVolumeSpecName: "kube-api-access-psqzk") pod "caa63d8d-4035-4c91-8a74-64b692dd7838" (UID: "caa63d8d-4035-4c91-8a74-64b692dd7838"). InnerVolumeSpecName "kube-api-access-psqzk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:22:50 crc kubenswrapper[5050]: I1123 16:22:50.553436 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/caa63d8d-4035-4c91-8a74-64b692dd7838-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "caa63d8d-4035-4c91-8a74-64b692dd7838" (UID: "caa63d8d-4035-4c91-8a74-64b692dd7838"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:22:50 crc kubenswrapper[5050]: I1123 16:22:50.600678 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caa63d8d-4035-4c91-8a74-64b692dd7838-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:22:50 crc kubenswrapper[5050]: I1123 16:22:50.600737 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-psqzk\" (UniqueName: \"kubernetes.io/projected/caa63d8d-4035-4c91-8a74-64b692dd7838-kube-api-access-psqzk\") on node \"crc\" DevicePath \"\"" Nov 23 16:22:50 crc kubenswrapper[5050]: I1123 16:22:50.601200 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/caa63d8d-4035-4c91-8a74-64b692dd7838-config-data" (OuterVolumeSpecName: "config-data") pod "caa63d8d-4035-4c91-8a74-64b692dd7838" (UID: "caa63d8d-4035-4c91-8a74-64b692dd7838"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:22:50 crc kubenswrapper[5050]: I1123 16:22:50.703009 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caa63d8d-4035-4c91-8a74-64b692dd7838-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:22:50 crc kubenswrapper[5050]: I1123 16:22:50.999748 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-qpgwc" event={"ID":"caa63d8d-4035-4c91-8a74-64b692dd7838","Type":"ContainerDied","Data":"fda79a6e95d56864b2dff74a01601085249fe0481354a584053d3d8b5592a6a9"} Nov 23 16:22:50 crc kubenswrapper[5050]: I1123 16:22:50.999852 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fda79a6e95d56864b2dff74a01601085249fe0481354a584053d3d8b5592a6a9" Nov 23 16:22:51 crc kubenswrapper[5050]: I1123 16:22:50.999890 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-qpgwc" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.237395 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-5cb68c5c69-fjmhq"] Nov 23 16:22:52 crc kubenswrapper[5050]: E1123 16:22:52.237916 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="caa63d8d-4035-4c91-8a74-64b692dd7838" containerName="heat-db-sync" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.237932 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="caa63d8d-4035-4c91-8a74-64b692dd7838" containerName="heat-db-sync" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.238127 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="caa63d8d-4035-4c91-8a74-64b692dd7838" containerName="heat-db-sync" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.238994 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-5cb68c5c69-fjmhq" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.244773 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-sg7zm" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.245365 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.246384 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.250895 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-5cb68c5c69-fjmhq"] Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.355675 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2c7gw\" (UniqueName: \"kubernetes.io/projected/b63b2bec-ce8e-4140-97cc-697d06fba98c-kube-api-access-2c7gw\") pod \"heat-engine-5cb68c5c69-fjmhq\" (UID: \"b63b2bec-ce8e-4140-97cc-697d06fba98c\") " pod="openstack/heat-engine-5cb68c5c69-fjmhq" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.356154 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b63b2bec-ce8e-4140-97cc-697d06fba98c-config-data-custom\") pod \"heat-engine-5cb68c5c69-fjmhq\" (UID: \"b63b2bec-ce8e-4140-97cc-697d06fba98c\") " pod="openstack/heat-engine-5cb68c5c69-fjmhq" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.356221 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b63b2bec-ce8e-4140-97cc-697d06fba98c-config-data\") pod \"heat-engine-5cb68c5c69-fjmhq\" (UID: \"b63b2bec-ce8e-4140-97cc-697d06fba98c\") " pod="openstack/heat-engine-5cb68c5c69-fjmhq" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.356318 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b63b2bec-ce8e-4140-97cc-697d06fba98c-combined-ca-bundle\") pod \"heat-engine-5cb68c5c69-fjmhq\" (UID: \"b63b2bec-ce8e-4140-97cc-697d06fba98c\") " pod="openstack/heat-engine-5cb68c5c69-fjmhq" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.458407 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b63b2bec-ce8e-4140-97cc-697d06fba98c-config-data\") pod \"heat-engine-5cb68c5c69-fjmhq\" (UID: \"b63b2bec-ce8e-4140-97cc-697d06fba98c\") " pod="openstack/heat-engine-5cb68c5c69-fjmhq" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.458648 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b63b2bec-ce8e-4140-97cc-697d06fba98c-combined-ca-bundle\") pod \"heat-engine-5cb68c5c69-fjmhq\" (UID: \"b63b2bec-ce8e-4140-97cc-697d06fba98c\") " pod="openstack/heat-engine-5cb68c5c69-fjmhq" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.458751 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2c7gw\" (UniqueName: \"kubernetes.io/projected/b63b2bec-ce8e-4140-97cc-697d06fba98c-kube-api-access-2c7gw\") pod \"heat-engine-5cb68c5c69-fjmhq\" (UID: \"b63b2bec-ce8e-4140-97cc-697d06fba98c\") " pod="openstack/heat-engine-5cb68c5c69-fjmhq" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.458785 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b63b2bec-ce8e-4140-97cc-697d06fba98c-config-data-custom\") pod \"heat-engine-5cb68c5c69-fjmhq\" (UID: \"b63b2bec-ce8e-4140-97cc-697d06fba98c\") " pod="openstack/heat-engine-5cb68c5c69-fjmhq" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.466523 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b63b2bec-ce8e-4140-97cc-697d06fba98c-config-data-custom\") pod \"heat-engine-5cb68c5c69-fjmhq\" (UID: \"b63b2bec-ce8e-4140-97cc-697d06fba98c\") " pod="openstack/heat-engine-5cb68c5c69-fjmhq" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.470247 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b63b2bec-ce8e-4140-97cc-697d06fba98c-combined-ca-bundle\") pod \"heat-engine-5cb68c5c69-fjmhq\" (UID: \"b63b2bec-ce8e-4140-97cc-697d06fba98c\") " pod="openstack/heat-engine-5cb68c5c69-fjmhq" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.470781 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b63b2bec-ce8e-4140-97cc-697d06fba98c-config-data\") pod \"heat-engine-5cb68c5c69-fjmhq\" (UID: \"b63b2bec-ce8e-4140-97cc-697d06fba98c\") " pod="openstack/heat-engine-5cb68c5c69-fjmhq" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.481474 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2c7gw\" (UniqueName: \"kubernetes.io/projected/b63b2bec-ce8e-4140-97cc-697d06fba98c-kube-api-access-2c7gw\") pod \"heat-engine-5cb68c5c69-fjmhq\" (UID: \"b63b2bec-ce8e-4140-97cc-697d06fba98c\") " pod="openstack/heat-engine-5cb68c5c69-fjmhq" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.538503 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-65d549d5cc-6rmg6"] Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.540398 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-65d549d5cc-6rmg6" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.545542 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.553643 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-7d69779b5c-bjxtz"] Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.555295 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7d69779b5c-bjxtz" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.560565 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.561200 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-5cb68c5c69-fjmhq" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.573841 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-65d549d5cc-6rmg6"] Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.599008 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7d69779b5c-bjxtz"] Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.669981 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55e60cb2-77a0-4ee5-bef3-a3760eb098a7-combined-ca-bundle\") pod \"heat-api-65d549d5cc-6rmg6\" (UID: \"55e60cb2-77a0-4ee5-bef3-a3760eb098a7\") " pod="openstack/heat-api-65d549d5cc-6rmg6" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.670396 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03fc802d-a360-46cf-8f71-153e1f2d60f6-config-data\") pod \"heat-cfnapi-7d69779b5c-bjxtz\" (UID: \"03fc802d-a360-46cf-8f71-153e1f2d60f6\") " pod="openstack/heat-cfnapi-7d69779b5c-bjxtz" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.670486 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03fc802d-a360-46cf-8f71-153e1f2d60f6-combined-ca-bundle\") pod \"heat-cfnapi-7d69779b5c-bjxtz\" (UID: \"03fc802d-a360-46cf-8f71-153e1f2d60f6\") " pod="openstack/heat-cfnapi-7d69779b5c-bjxtz" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.670544 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/55e60cb2-77a0-4ee5-bef3-a3760eb098a7-config-data-custom\") pod \"heat-api-65d549d5cc-6rmg6\" (UID: \"55e60cb2-77a0-4ee5-bef3-a3760eb098a7\") " pod="openstack/heat-api-65d549d5cc-6rmg6" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.670565 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wztrc\" (UniqueName: \"kubernetes.io/projected/55e60cb2-77a0-4ee5-bef3-a3760eb098a7-kube-api-access-wztrc\") pod \"heat-api-65d549d5cc-6rmg6\" (UID: \"55e60cb2-77a0-4ee5-bef3-a3760eb098a7\") " pod="openstack/heat-api-65d549d5cc-6rmg6" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.670601 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55e60cb2-77a0-4ee5-bef3-a3760eb098a7-config-data\") pod \"heat-api-65d549d5cc-6rmg6\" (UID: \"55e60cb2-77a0-4ee5-bef3-a3760eb098a7\") " pod="openstack/heat-api-65d549d5cc-6rmg6" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.670646 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/03fc802d-a360-46cf-8f71-153e1f2d60f6-config-data-custom\") pod \"heat-cfnapi-7d69779b5c-bjxtz\" (UID: \"03fc802d-a360-46cf-8f71-153e1f2d60f6\") " pod="openstack/heat-cfnapi-7d69779b5c-bjxtz" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.670678 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-929p6\" (UniqueName: \"kubernetes.io/projected/03fc802d-a360-46cf-8f71-153e1f2d60f6-kube-api-access-929p6\") pod \"heat-cfnapi-7d69779b5c-bjxtz\" (UID: \"03fc802d-a360-46cf-8f71-153e1f2d60f6\") " pod="openstack/heat-cfnapi-7d69779b5c-bjxtz" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.773118 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55e60cb2-77a0-4ee5-bef3-a3760eb098a7-combined-ca-bundle\") pod \"heat-api-65d549d5cc-6rmg6\" (UID: \"55e60cb2-77a0-4ee5-bef3-a3760eb098a7\") " pod="openstack/heat-api-65d549d5cc-6rmg6" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.773173 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03fc802d-a360-46cf-8f71-153e1f2d60f6-config-data\") pod \"heat-cfnapi-7d69779b5c-bjxtz\" (UID: \"03fc802d-a360-46cf-8f71-153e1f2d60f6\") " pod="openstack/heat-cfnapi-7d69779b5c-bjxtz" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.773246 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03fc802d-a360-46cf-8f71-153e1f2d60f6-combined-ca-bundle\") pod \"heat-cfnapi-7d69779b5c-bjxtz\" (UID: \"03fc802d-a360-46cf-8f71-153e1f2d60f6\") " pod="openstack/heat-cfnapi-7d69779b5c-bjxtz" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.773288 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/55e60cb2-77a0-4ee5-bef3-a3760eb098a7-config-data-custom\") pod \"heat-api-65d549d5cc-6rmg6\" (UID: \"55e60cb2-77a0-4ee5-bef3-a3760eb098a7\") " pod="openstack/heat-api-65d549d5cc-6rmg6" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.773305 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wztrc\" (UniqueName: \"kubernetes.io/projected/55e60cb2-77a0-4ee5-bef3-a3760eb098a7-kube-api-access-wztrc\") pod \"heat-api-65d549d5cc-6rmg6\" (UID: \"55e60cb2-77a0-4ee5-bef3-a3760eb098a7\") " pod="openstack/heat-api-65d549d5cc-6rmg6" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.773326 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55e60cb2-77a0-4ee5-bef3-a3760eb098a7-config-data\") pod \"heat-api-65d549d5cc-6rmg6\" (UID: \"55e60cb2-77a0-4ee5-bef3-a3760eb098a7\") " pod="openstack/heat-api-65d549d5cc-6rmg6" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.773356 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/03fc802d-a360-46cf-8f71-153e1f2d60f6-config-data-custom\") pod \"heat-cfnapi-7d69779b5c-bjxtz\" (UID: \"03fc802d-a360-46cf-8f71-153e1f2d60f6\") " pod="openstack/heat-cfnapi-7d69779b5c-bjxtz" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.773387 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-929p6\" (UniqueName: \"kubernetes.io/projected/03fc802d-a360-46cf-8f71-153e1f2d60f6-kube-api-access-929p6\") pod \"heat-cfnapi-7d69779b5c-bjxtz\" (UID: \"03fc802d-a360-46cf-8f71-153e1f2d60f6\") " pod="openstack/heat-cfnapi-7d69779b5c-bjxtz" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.783752 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55e60cb2-77a0-4ee5-bef3-a3760eb098a7-combined-ca-bundle\") pod \"heat-api-65d549d5cc-6rmg6\" (UID: \"55e60cb2-77a0-4ee5-bef3-a3760eb098a7\") " pod="openstack/heat-api-65d549d5cc-6rmg6" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.786503 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/55e60cb2-77a0-4ee5-bef3-a3760eb098a7-config-data-custom\") pod \"heat-api-65d549d5cc-6rmg6\" (UID: \"55e60cb2-77a0-4ee5-bef3-a3760eb098a7\") " pod="openstack/heat-api-65d549d5cc-6rmg6" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.792288 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/03fc802d-a360-46cf-8f71-153e1f2d60f6-config-data-custom\") pod \"heat-cfnapi-7d69779b5c-bjxtz\" (UID: \"03fc802d-a360-46cf-8f71-153e1f2d60f6\") " pod="openstack/heat-cfnapi-7d69779b5c-bjxtz" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.794437 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-929p6\" (UniqueName: \"kubernetes.io/projected/03fc802d-a360-46cf-8f71-153e1f2d60f6-kube-api-access-929p6\") pod \"heat-cfnapi-7d69779b5c-bjxtz\" (UID: \"03fc802d-a360-46cf-8f71-153e1f2d60f6\") " pod="openstack/heat-cfnapi-7d69779b5c-bjxtz" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.795274 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03fc802d-a360-46cf-8f71-153e1f2d60f6-combined-ca-bundle\") pod \"heat-cfnapi-7d69779b5c-bjxtz\" (UID: \"03fc802d-a360-46cf-8f71-153e1f2d60f6\") " pod="openstack/heat-cfnapi-7d69779b5c-bjxtz" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.796499 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wztrc\" (UniqueName: \"kubernetes.io/projected/55e60cb2-77a0-4ee5-bef3-a3760eb098a7-kube-api-access-wztrc\") pod \"heat-api-65d549d5cc-6rmg6\" (UID: \"55e60cb2-77a0-4ee5-bef3-a3760eb098a7\") " pod="openstack/heat-api-65d549d5cc-6rmg6" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.796776 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03fc802d-a360-46cf-8f71-153e1f2d60f6-config-data\") pod \"heat-cfnapi-7d69779b5c-bjxtz\" (UID: \"03fc802d-a360-46cf-8f71-153e1f2d60f6\") " pod="openstack/heat-cfnapi-7d69779b5c-bjxtz" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.808820 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55e60cb2-77a0-4ee5-bef3-a3760eb098a7-config-data\") pod \"heat-api-65d549d5cc-6rmg6\" (UID: \"55e60cb2-77a0-4ee5-bef3-a3760eb098a7\") " pod="openstack/heat-api-65d549d5cc-6rmg6" Nov 23 16:22:52 crc kubenswrapper[5050]: I1123 16:22:52.999158 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-65d549d5cc-6rmg6" Nov 23 16:22:53 crc kubenswrapper[5050]: I1123 16:22:53.017915 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7d69779b5c-bjxtz" Nov 23 16:22:53 crc kubenswrapper[5050]: I1123 16:22:53.070437 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-5cb68c5c69-fjmhq"] Nov 23 16:22:53 crc kubenswrapper[5050]: W1123 16:22:53.082860 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb63b2bec_ce8e_4140_97cc_697d06fba98c.slice/crio-c84be339db5e2a8bb5bd5ba7097576f7750e10b041676c1d3931d32d3ee44636 WatchSource:0}: Error finding container c84be339db5e2a8bb5bd5ba7097576f7750e10b041676c1d3931d32d3ee44636: Status 404 returned error can't find the container with id c84be339db5e2a8bb5bd5ba7097576f7750e10b041676c1d3931d32d3ee44636 Nov 23 16:22:53 crc kubenswrapper[5050]: I1123 16:22:53.564832 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-65d549d5cc-6rmg6"] Nov 23 16:22:53 crc kubenswrapper[5050]: W1123 16:22:53.572477 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod55e60cb2_77a0_4ee5_bef3_a3760eb098a7.slice/crio-dd3722b0a7a13816c02aedc69803082f745d4f7113de11e64cf1b4d01597e5bd WatchSource:0}: Error finding container dd3722b0a7a13816c02aedc69803082f745d4f7113de11e64cf1b4d01597e5bd: Status 404 returned error can't find the container with id dd3722b0a7a13816c02aedc69803082f745d4f7113de11e64cf1b4d01597e5bd Nov 23 16:22:53 crc kubenswrapper[5050]: I1123 16:22:53.654286 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7d69779b5c-bjxtz"] Nov 23 16:22:53 crc kubenswrapper[5050]: W1123 16:22:53.695148 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod03fc802d_a360_46cf_8f71_153e1f2d60f6.slice/crio-927cf5353ba076ede0a6997f224ac17a45ea9fc3bdeb2e5176ea0c9eb4531bf3 WatchSource:0}: Error finding container 927cf5353ba076ede0a6997f224ac17a45ea9fc3bdeb2e5176ea0c9eb4531bf3: Status 404 returned error can't find the container with id 927cf5353ba076ede0a6997f224ac17a45ea9fc3bdeb2e5176ea0c9eb4531bf3 Nov 23 16:22:54 crc kubenswrapper[5050]: I1123 16:22:54.035031 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-65d549d5cc-6rmg6" event={"ID":"55e60cb2-77a0-4ee5-bef3-a3760eb098a7","Type":"ContainerStarted","Data":"dd3722b0a7a13816c02aedc69803082f745d4f7113de11e64cf1b4d01597e5bd"} Nov 23 16:22:54 crc kubenswrapper[5050]: I1123 16:22:54.037587 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-5cb68c5c69-fjmhq" event={"ID":"b63b2bec-ce8e-4140-97cc-697d06fba98c","Type":"ContainerStarted","Data":"5e9590b320120994fc5c07b6bb885128a9525a3c157371b54300029a13b2386e"} Nov 23 16:22:54 crc kubenswrapper[5050]: I1123 16:22:54.037626 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-5cb68c5c69-fjmhq" event={"ID":"b63b2bec-ce8e-4140-97cc-697d06fba98c","Type":"ContainerStarted","Data":"c84be339db5e2a8bb5bd5ba7097576f7750e10b041676c1d3931d32d3ee44636"} Nov 23 16:22:54 crc kubenswrapper[5050]: I1123 16:22:54.037770 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-5cb68c5c69-fjmhq" Nov 23 16:22:54 crc kubenswrapper[5050]: I1123 16:22:54.039083 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7d69779b5c-bjxtz" event={"ID":"03fc802d-a360-46cf-8f71-153e1f2d60f6","Type":"ContainerStarted","Data":"927cf5353ba076ede0a6997f224ac17a45ea9fc3bdeb2e5176ea0c9eb4531bf3"} Nov 23 16:22:54 crc kubenswrapper[5050]: I1123 16:22:54.058826 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-5cb68c5c69-fjmhq" podStartSLOduration=2.058805339 podStartE2EDuration="2.058805339s" podCreationTimestamp="2025-11-23 16:22:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:22:54.053437018 +0000 UTC m=+6069.220433503" watchObservedRunningTime="2025-11-23 16:22:54.058805339 +0000 UTC m=+6069.225801824" Nov 23 16:22:56 crc kubenswrapper[5050]: I1123 16:22:56.068781 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-65d549d5cc-6rmg6" event={"ID":"55e60cb2-77a0-4ee5-bef3-a3760eb098a7","Type":"ContainerStarted","Data":"cbb3e30c084349effc9335793fa3a7c88ab5fffc59597f51598aac976d47d64e"} Nov 23 16:22:56 crc kubenswrapper[5050]: I1123 16:22:56.069669 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-65d549d5cc-6rmg6" Nov 23 16:22:56 crc kubenswrapper[5050]: I1123 16:22:56.077365 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7d69779b5c-bjxtz" event={"ID":"03fc802d-a360-46cf-8f71-153e1f2d60f6","Type":"ContainerStarted","Data":"3481526b6d49973aad4f32f1e45f18f3c3176214cad24d8cb7d7e5a0a3a27cd5"} Nov 23 16:22:56 crc kubenswrapper[5050]: I1123 16:22:56.078144 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-7d69779b5c-bjxtz" Nov 23 16:22:56 crc kubenswrapper[5050]: I1123 16:22:56.106479 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-65d549d5cc-6rmg6" podStartSLOduration=2.574094514 podStartE2EDuration="4.106428004s" podCreationTimestamp="2025-11-23 16:22:52 +0000 UTC" firstStartedPulling="2025-11-23 16:22:53.579382007 +0000 UTC m=+6068.746378502" lastFinishedPulling="2025-11-23 16:22:55.111715507 +0000 UTC m=+6070.278711992" observedRunningTime="2025-11-23 16:22:56.0988429 +0000 UTC m=+6071.265839385" watchObservedRunningTime="2025-11-23 16:22:56.106428004 +0000 UTC m=+6071.273424489" Nov 23 16:22:56 crc kubenswrapper[5050]: I1123 16:22:56.135059 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-7d69779b5c-bjxtz" podStartSLOduration=2.71614225 podStartE2EDuration="4.13502023s" podCreationTimestamp="2025-11-23 16:22:52 +0000 UTC" firstStartedPulling="2025-11-23 16:22:53.699307259 +0000 UTC m=+6068.866303754" lastFinishedPulling="2025-11-23 16:22:55.118185249 +0000 UTC m=+6070.285181734" observedRunningTime="2025-11-23 16:22:56.128813885 +0000 UTC m=+6071.295810370" watchObservedRunningTime="2025-11-23 16:22:56.13502023 +0000 UTC m=+6071.302016715" Nov 23 16:22:59 crc kubenswrapper[5050]: I1123 16:22:59.224080 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:22:59 crc kubenswrapper[5050]: I1123 16:22:59.224987 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:23:01 crc kubenswrapper[5050]: I1123 16:23:01.130784 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-58696f4c7c-gzq55" Nov 23 16:23:02 crc kubenswrapper[5050]: I1123 16:23:02.826984 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-58696f4c7c-gzq55" Nov 23 16:23:02 crc kubenswrapper[5050]: I1123 16:23:02.916624 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-bf4fdc45f-c4g79"] Nov 23 16:23:02 crc kubenswrapper[5050]: I1123 16:23:02.917975 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-bf4fdc45f-c4g79" podUID="de915556-22d5-4ea1-a24e-b872c1fdbdae" containerName="horizon-log" containerID="cri-o://3fb9d120d923a2f08457f57e1b459fa07470dbb98aab5e7227cebe4e0a6b5e14" gracePeriod=30 Nov 23 16:23:02 crc kubenswrapper[5050]: I1123 16:23:02.918197 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-bf4fdc45f-c4g79" podUID="de915556-22d5-4ea1-a24e-b872c1fdbdae" containerName="horizon" containerID="cri-o://9608d064b7f70771a47550972552fb2c792e46fb586df55e5f3770c7cdd92919" gracePeriod=30 Nov 23 16:23:04 crc kubenswrapper[5050]: I1123 16:23:04.301548 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-65d549d5cc-6rmg6" Nov 23 16:23:04 crc kubenswrapper[5050]: I1123 16:23:04.484871 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-7d69779b5c-bjxtz" Nov 23 16:23:06 crc kubenswrapper[5050]: I1123 16:23:06.230039 5050 generic.go:334] "Generic (PLEG): container finished" podID="de915556-22d5-4ea1-a24e-b872c1fdbdae" containerID="9608d064b7f70771a47550972552fb2c792e46fb586df55e5f3770c7cdd92919" exitCode=0 Nov 23 16:23:06 crc kubenswrapper[5050]: I1123 16:23:06.230138 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-bf4fdc45f-c4g79" event={"ID":"de915556-22d5-4ea1-a24e-b872c1fdbdae","Type":"ContainerDied","Data":"9608d064b7f70771a47550972552fb2c792e46fb586df55e5f3770c7cdd92919"} Nov 23 16:23:06 crc kubenswrapper[5050]: I1123 16:23:06.897602 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-bf4fdc45f-c4g79" podUID="de915556-22d5-4ea1-a24e-b872c1fdbdae" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.114:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.114:8080: connect: connection refused" Nov 23 16:23:12 crc kubenswrapper[5050]: I1123 16:23:12.623594 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-5cb68c5c69-fjmhq" Nov 23 16:23:16 crc kubenswrapper[5050]: I1123 16:23:16.596618 5050 scope.go:117] "RemoveContainer" containerID="7c8fc80449ca0c19e8661ad6179aa994e213e0322e6fd99d9e8f23105750256b" Nov 23 16:23:16 crc kubenswrapper[5050]: I1123 16:23:16.652253 5050 scope.go:117] "RemoveContainer" containerID="43c8751ce43e53af7dc46687f352a575aa9b444ef0876b79bdf802d51b7ea0d1" Nov 23 16:23:16 crc kubenswrapper[5050]: I1123 16:23:16.725245 5050 scope.go:117] "RemoveContainer" containerID="c8104aa956841f96d01e75b04873b116aeed3e3eed953c633101027759d9fa54" Nov 23 16:23:16 crc kubenswrapper[5050]: I1123 16:23:16.896947 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-bf4fdc45f-c4g79" podUID="de915556-22d5-4ea1-a24e-b872c1fdbdae" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.114:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.114:8080: connect: connection refused" Nov 23 16:23:22 crc kubenswrapper[5050]: I1123 16:23:22.021632 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5"] Nov 23 16:23:22 crc kubenswrapper[5050]: I1123 16:23:22.025941 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5" Nov 23 16:23:22 crc kubenswrapper[5050]: I1123 16:23:22.033106 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 23 16:23:22 crc kubenswrapper[5050]: I1123 16:23:22.070157 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5"] Nov 23 16:23:22 crc kubenswrapper[5050]: I1123 16:23:22.142805 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a0d69f72-272a-4871-80ff-09f031c8019d-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5\" (UID: \"a0d69f72-272a-4871-80ff-09f031c8019d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5" Nov 23 16:23:22 crc kubenswrapper[5050]: I1123 16:23:22.142876 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gm4mr\" (UniqueName: \"kubernetes.io/projected/a0d69f72-272a-4871-80ff-09f031c8019d-kube-api-access-gm4mr\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5\" (UID: \"a0d69f72-272a-4871-80ff-09f031c8019d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5" Nov 23 16:23:22 crc kubenswrapper[5050]: I1123 16:23:22.143178 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a0d69f72-272a-4871-80ff-09f031c8019d-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5\" (UID: \"a0d69f72-272a-4871-80ff-09f031c8019d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5" Nov 23 16:23:22 crc kubenswrapper[5050]: I1123 16:23:22.245696 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a0d69f72-272a-4871-80ff-09f031c8019d-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5\" (UID: \"a0d69f72-272a-4871-80ff-09f031c8019d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5" Nov 23 16:23:22 crc kubenswrapper[5050]: I1123 16:23:22.245773 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gm4mr\" (UniqueName: \"kubernetes.io/projected/a0d69f72-272a-4871-80ff-09f031c8019d-kube-api-access-gm4mr\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5\" (UID: \"a0d69f72-272a-4871-80ff-09f031c8019d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5" Nov 23 16:23:22 crc kubenswrapper[5050]: I1123 16:23:22.245892 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a0d69f72-272a-4871-80ff-09f031c8019d-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5\" (UID: \"a0d69f72-272a-4871-80ff-09f031c8019d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5" Nov 23 16:23:22 crc kubenswrapper[5050]: I1123 16:23:22.246300 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a0d69f72-272a-4871-80ff-09f031c8019d-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5\" (UID: \"a0d69f72-272a-4871-80ff-09f031c8019d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5" Nov 23 16:23:22 crc kubenswrapper[5050]: I1123 16:23:22.246556 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a0d69f72-272a-4871-80ff-09f031c8019d-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5\" (UID: \"a0d69f72-272a-4871-80ff-09f031c8019d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5" Nov 23 16:23:22 crc kubenswrapper[5050]: I1123 16:23:22.289608 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gm4mr\" (UniqueName: \"kubernetes.io/projected/a0d69f72-272a-4871-80ff-09f031c8019d-kube-api-access-gm4mr\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5\" (UID: \"a0d69f72-272a-4871-80ff-09f031c8019d\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5" Nov 23 16:23:22 crc kubenswrapper[5050]: I1123 16:23:22.403022 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5" Nov 23 16:23:23 crc kubenswrapper[5050]: I1123 16:23:23.037758 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5"] Nov 23 16:23:23 crc kubenswrapper[5050]: W1123 16:23:23.054408 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda0d69f72_272a_4871_80ff_09f031c8019d.slice/crio-fa5cfba4c0c0d0d82ed2b855b824b9c433517410e216129d9a440580becb342b WatchSource:0}: Error finding container fa5cfba4c0c0d0d82ed2b855b824b9c433517410e216129d9a440580becb342b: Status 404 returned error can't find the container with id fa5cfba4c0c0d0d82ed2b855b824b9c433517410e216129d9a440580becb342b Nov 23 16:23:23 crc kubenswrapper[5050]: I1123 16:23:23.504920 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5" event={"ID":"a0d69f72-272a-4871-80ff-09f031c8019d","Type":"ContainerStarted","Data":"e07b86aed96b791b510f69d733b82802e29af2cf2e576427f563217f0571b35e"} Nov 23 16:23:23 crc kubenswrapper[5050]: I1123 16:23:23.505690 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5" event={"ID":"a0d69f72-272a-4871-80ff-09f031c8019d","Type":"ContainerStarted","Data":"fa5cfba4c0c0d0d82ed2b855b824b9c433517410e216129d9a440580becb342b"} Nov 23 16:23:24 crc kubenswrapper[5050]: I1123 16:23:24.521695 5050 generic.go:334] "Generic (PLEG): container finished" podID="a0d69f72-272a-4871-80ff-09f031c8019d" containerID="e07b86aed96b791b510f69d733b82802e29af2cf2e576427f563217f0571b35e" exitCode=0 Nov 23 16:23:24 crc kubenswrapper[5050]: I1123 16:23:24.521868 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5" event={"ID":"a0d69f72-272a-4871-80ff-09f031c8019d","Type":"ContainerDied","Data":"e07b86aed96b791b510f69d733b82802e29af2cf2e576427f563217f0571b35e"} Nov 23 16:23:26 crc kubenswrapper[5050]: I1123 16:23:26.547129 5050 generic.go:334] "Generic (PLEG): container finished" podID="a0d69f72-272a-4871-80ff-09f031c8019d" containerID="cee59edaf43707a889931c6ee283e0211c34e601aa9ea705776e179bc5320288" exitCode=0 Nov 23 16:23:26 crc kubenswrapper[5050]: I1123 16:23:26.547178 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5" event={"ID":"a0d69f72-272a-4871-80ff-09f031c8019d","Type":"ContainerDied","Data":"cee59edaf43707a889931c6ee283e0211c34e601aa9ea705776e179bc5320288"} Nov 23 16:23:26 crc kubenswrapper[5050]: I1123 16:23:26.897230 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-bf4fdc45f-c4g79" podUID="de915556-22d5-4ea1-a24e-b872c1fdbdae" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.114:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.114:8080: connect: connection refused" Nov 23 16:23:26 crc kubenswrapper[5050]: I1123 16:23:26.897419 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-bf4fdc45f-c4g79" Nov 23 16:23:27 crc kubenswrapper[5050]: I1123 16:23:27.566578 5050 generic.go:334] "Generic (PLEG): container finished" podID="a0d69f72-272a-4871-80ff-09f031c8019d" containerID="76190e5d9217365d2e910d77cc64090c8b8b80a707e5575c459d8590f73bd5bf" exitCode=0 Nov 23 16:23:27 crc kubenswrapper[5050]: I1123 16:23:27.569128 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5" event={"ID":"a0d69f72-272a-4871-80ff-09f031c8019d","Type":"ContainerDied","Data":"76190e5d9217365d2e910d77cc64090c8b8b80a707e5575c459d8590f73bd5bf"} Nov 23 16:23:29 crc kubenswrapper[5050]: I1123 16:23:29.105131 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5" Nov 23 16:23:29 crc kubenswrapper[5050]: I1123 16:23:29.224794 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:23:29 crc kubenswrapper[5050]: I1123 16:23:29.224863 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:23:29 crc kubenswrapper[5050]: I1123 16:23:29.224915 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 16:23:29 crc kubenswrapper[5050]: I1123 16:23:29.225902 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6c3e7a6f5a62c9666075227e68f8208f00db4be7096dab6e83261908dedc75a8"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 16:23:29 crc kubenswrapper[5050]: I1123 16:23:29.225995 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://6c3e7a6f5a62c9666075227e68f8208f00db4be7096dab6e83261908dedc75a8" gracePeriod=600 Nov 23 16:23:29 crc kubenswrapper[5050]: I1123 16:23:29.251467 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gm4mr\" (UniqueName: \"kubernetes.io/projected/a0d69f72-272a-4871-80ff-09f031c8019d-kube-api-access-gm4mr\") pod \"a0d69f72-272a-4871-80ff-09f031c8019d\" (UID: \"a0d69f72-272a-4871-80ff-09f031c8019d\") " Nov 23 16:23:29 crc kubenswrapper[5050]: I1123 16:23:29.251666 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a0d69f72-272a-4871-80ff-09f031c8019d-bundle\") pod \"a0d69f72-272a-4871-80ff-09f031c8019d\" (UID: \"a0d69f72-272a-4871-80ff-09f031c8019d\") " Nov 23 16:23:29 crc kubenswrapper[5050]: I1123 16:23:29.251823 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a0d69f72-272a-4871-80ff-09f031c8019d-util\") pod \"a0d69f72-272a-4871-80ff-09f031c8019d\" (UID: \"a0d69f72-272a-4871-80ff-09f031c8019d\") " Nov 23 16:23:29 crc kubenswrapper[5050]: I1123 16:23:29.254208 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a0d69f72-272a-4871-80ff-09f031c8019d-bundle" (OuterVolumeSpecName: "bundle") pod "a0d69f72-272a-4871-80ff-09f031c8019d" (UID: "a0d69f72-272a-4871-80ff-09f031c8019d"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:23:29 crc kubenswrapper[5050]: I1123 16:23:29.262948 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a0d69f72-272a-4871-80ff-09f031c8019d-util" (OuterVolumeSpecName: "util") pod "a0d69f72-272a-4871-80ff-09f031c8019d" (UID: "a0d69f72-272a-4871-80ff-09f031c8019d"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:23:29 crc kubenswrapper[5050]: I1123 16:23:29.265358 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0d69f72-272a-4871-80ff-09f031c8019d-kube-api-access-gm4mr" (OuterVolumeSpecName: "kube-api-access-gm4mr") pod "a0d69f72-272a-4871-80ff-09f031c8019d" (UID: "a0d69f72-272a-4871-80ff-09f031c8019d"). InnerVolumeSpecName "kube-api-access-gm4mr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:23:29 crc kubenswrapper[5050]: I1123 16:23:29.355263 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gm4mr\" (UniqueName: \"kubernetes.io/projected/a0d69f72-272a-4871-80ff-09f031c8019d-kube-api-access-gm4mr\") on node \"crc\" DevicePath \"\"" Nov 23 16:23:29 crc kubenswrapper[5050]: I1123 16:23:29.355302 5050 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a0d69f72-272a-4871-80ff-09f031c8019d-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:23:29 crc kubenswrapper[5050]: I1123 16:23:29.355316 5050 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a0d69f72-272a-4871-80ff-09f031c8019d-util\") on node \"crc\" DevicePath \"\"" Nov 23 16:23:29 crc kubenswrapper[5050]: I1123 16:23:29.594375 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5" Nov 23 16:23:29 crc kubenswrapper[5050]: I1123 16:23:29.594391 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5" event={"ID":"a0d69f72-272a-4871-80ff-09f031c8019d","Type":"ContainerDied","Data":"fa5cfba4c0c0d0d82ed2b855b824b9c433517410e216129d9a440580becb342b"} Nov 23 16:23:29 crc kubenswrapper[5050]: I1123 16:23:29.595490 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fa5cfba4c0c0d0d82ed2b855b824b9c433517410e216129d9a440580becb342b" Nov 23 16:23:29 crc kubenswrapper[5050]: I1123 16:23:29.599107 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="6c3e7a6f5a62c9666075227e68f8208f00db4be7096dab6e83261908dedc75a8" exitCode=0 Nov 23 16:23:29 crc kubenswrapper[5050]: I1123 16:23:29.599144 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"6c3e7a6f5a62c9666075227e68f8208f00db4be7096dab6e83261908dedc75a8"} Nov 23 16:23:29 crc kubenswrapper[5050]: I1123 16:23:29.599167 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486"} Nov 23 16:23:29 crc kubenswrapper[5050]: I1123 16:23:29.599187 5050 scope.go:117] "RemoveContainer" containerID="131b4bb8956deb8c3bd7bd951e055fca3b66c01a0141c3f85cb0b1152065de00" Nov 23 16:23:33 crc kubenswrapper[5050]: I1123 16:23:33.489538 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-bf4fdc45f-c4g79" Nov 23 16:23:33 crc kubenswrapper[5050]: I1123 16:23:33.659655 5050 generic.go:334] "Generic (PLEG): container finished" podID="de915556-22d5-4ea1-a24e-b872c1fdbdae" containerID="3fb9d120d923a2f08457f57e1b459fa07470dbb98aab5e7227cebe4e0a6b5e14" exitCode=137 Nov 23 16:23:33 crc kubenswrapper[5050]: I1123 16:23:33.660419 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-bf4fdc45f-c4g79" event={"ID":"de915556-22d5-4ea1-a24e-b872c1fdbdae","Type":"ContainerDied","Data":"3fb9d120d923a2f08457f57e1b459fa07470dbb98aab5e7227cebe4e0a6b5e14"} Nov 23 16:23:33 crc kubenswrapper[5050]: I1123 16:23:33.660665 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-bf4fdc45f-c4g79" event={"ID":"de915556-22d5-4ea1-a24e-b872c1fdbdae","Type":"ContainerDied","Data":"9e27fa55752d82d6a61c87526fc03615534b80de3a8c4b1bbb287f4f40f1f89a"} Nov 23 16:23:33 crc kubenswrapper[5050]: I1123 16:23:33.660778 5050 scope.go:117] "RemoveContainer" containerID="9608d064b7f70771a47550972552fb2c792e46fb586df55e5f3770c7cdd92919" Nov 23 16:23:33 crc kubenswrapper[5050]: I1123 16:23:33.660851 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-bf4fdc45f-c4g79" Nov 23 16:23:33 crc kubenswrapper[5050]: I1123 16:23:33.686277 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tg24g\" (UniqueName: \"kubernetes.io/projected/de915556-22d5-4ea1-a24e-b872c1fdbdae-kube-api-access-tg24g\") pod \"de915556-22d5-4ea1-a24e-b872c1fdbdae\" (UID: \"de915556-22d5-4ea1-a24e-b872c1fdbdae\") " Nov 23 16:23:33 crc kubenswrapper[5050]: I1123 16:23:33.686641 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/de915556-22d5-4ea1-a24e-b872c1fdbdae-config-data\") pod \"de915556-22d5-4ea1-a24e-b872c1fdbdae\" (UID: \"de915556-22d5-4ea1-a24e-b872c1fdbdae\") " Nov 23 16:23:33 crc kubenswrapper[5050]: I1123 16:23:33.686898 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de915556-22d5-4ea1-a24e-b872c1fdbdae-scripts\") pod \"de915556-22d5-4ea1-a24e-b872c1fdbdae\" (UID: \"de915556-22d5-4ea1-a24e-b872c1fdbdae\") " Nov 23 16:23:33 crc kubenswrapper[5050]: I1123 16:23:33.686999 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de915556-22d5-4ea1-a24e-b872c1fdbdae-logs\") pod \"de915556-22d5-4ea1-a24e-b872c1fdbdae\" (UID: \"de915556-22d5-4ea1-a24e-b872c1fdbdae\") " Nov 23 16:23:33 crc kubenswrapper[5050]: I1123 16:23:33.687400 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/de915556-22d5-4ea1-a24e-b872c1fdbdae-horizon-secret-key\") pod \"de915556-22d5-4ea1-a24e-b872c1fdbdae\" (UID: \"de915556-22d5-4ea1-a24e-b872c1fdbdae\") " Nov 23 16:23:33 crc kubenswrapper[5050]: I1123 16:23:33.687788 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de915556-22d5-4ea1-a24e-b872c1fdbdae-logs" (OuterVolumeSpecName: "logs") pod "de915556-22d5-4ea1-a24e-b872c1fdbdae" (UID: "de915556-22d5-4ea1-a24e-b872c1fdbdae"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:23:33 crc kubenswrapper[5050]: I1123 16:23:33.690904 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de915556-22d5-4ea1-a24e-b872c1fdbdae-logs\") on node \"crc\" DevicePath \"\"" Nov 23 16:23:33 crc kubenswrapper[5050]: I1123 16:23:33.707089 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de915556-22d5-4ea1-a24e-b872c1fdbdae-kube-api-access-tg24g" (OuterVolumeSpecName: "kube-api-access-tg24g") pod "de915556-22d5-4ea1-a24e-b872c1fdbdae" (UID: "de915556-22d5-4ea1-a24e-b872c1fdbdae"). InnerVolumeSpecName "kube-api-access-tg24g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:23:33 crc kubenswrapper[5050]: I1123 16:23:33.723808 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de915556-22d5-4ea1-a24e-b872c1fdbdae-scripts" (OuterVolumeSpecName: "scripts") pod "de915556-22d5-4ea1-a24e-b872c1fdbdae" (UID: "de915556-22d5-4ea1-a24e-b872c1fdbdae"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:23:33 crc kubenswrapper[5050]: I1123 16:23:33.724167 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de915556-22d5-4ea1-a24e-b872c1fdbdae-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "de915556-22d5-4ea1-a24e-b872c1fdbdae" (UID: "de915556-22d5-4ea1-a24e-b872c1fdbdae"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:23:33 crc kubenswrapper[5050]: I1123 16:23:33.744810 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de915556-22d5-4ea1-a24e-b872c1fdbdae-config-data" (OuterVolumeSpecName: "config-data") pod "de915556-22d5-4ea1-a24e-b872c1fdbdae" (UID: "de915556-22d5-4ea1-a24e-b872c1fdbdae"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:23:33 crc kubenswrapper[5050]: I1123 16:23:33.792894 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/de915556-22d5-4ea1-a24e-b872c1fdbdae-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:23:33 crc kubenswrapper[5050]: I1123 16:23:33.792933 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de915556-22d5-4ea1-a24e-b872c1fdbdae-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:23:33 crc kubenswrapper[5050]: I1123 16:23:33.792942 5050 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/de915556-22d5-4ea1-a24e-b872c1fdbdae-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 23 16:23:33 crc kubenswrapper[5050]: I1123 16:23:33.792955 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tg24g\" (UniqueName: \"kubernetes.io/projected/de915556-22d5-4ea1-a24e-b872c1fdbdae-kube-api-access-tg24g\") on node \"crc\" DevicePath \"\"" Nov 23 16:23:33 crc kubenswrapper[5050]: I1123 16:23:33.852117 5050 scope.go:117] "RemoveContainer" containerID="3fb9d120d923a2f08457f57e1b459fa07470dbb98aab5e7227cebe4e0a6b5e14" Nov 23 16:23:33 crc kubenswrapper[5050]: I1123 16:23:33.907773 5050 scope.go:117] "RemoveContainer" containerID="9608d064b7f70771a47550972552fb2c792e46fb586df55e5f3770c7cdd92919" Nov 23 16:23:33 crc kubenswrapper[5050]: E1123 16:23:33.908866 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9608d064b7f70771a47550972552fb2c792e46fb586df55e5f3770c7cdd92919\": container with ID starting with 9608d064b7f70771a47550972552fb2c792e46fb586df55e5f3770c7cdd92919 not found: ID does not exist" containerID="9608d064b7f70771a47550972552fb2c792e46fb586df55e5f3770c7cdd92919" Nov 23 16:23:33 crc kubenswrapper[5050]: I1123 16:23:33.908925 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9608d064b7f70771a47550972552fb2c792e46fb586df55e5f3770c7cdd92919"} err="failed to get container status \"9608d064b7f70771a47550972552fb2c792e46fb586df55e5f3770c7cdd92919\": rpc error: code = NotFound desc = could not find container \"9608d064b7f70771a47550972552fb2c792e46fb586df55e5f3770c7cdd92919\": container with ID starting with 9608d064b7f70771a47550972552fb2c792e46fb586df55e5f3770c7cdd92919 not found: ID does not exist" Nov 23 16:23:33 crc kubenswrapper[5050]: I1123 16:23:33.908962 5050 scope.go:117] "RemoveContainer" containerID="3fb9d120d923a2f08457f57e1b459fa07470dbb98aab5e7227cebe4e0a6b5e14" Nov 23 16:23:33 crc kubenswrapper[5050]: E1123 16:23:33.909349 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3fb9d120d923a2f08457f57e1b459fa07470dbb98aab5e7227cebe4e0a6b5e14\": container with ID starting with 3fb9d120d923a2f08457f57e1b459fa07470dbb98aab5e7227cebe4e0a6b5e14 not found: ID does not exist" containerID="3fb9d120d923a2f08457f57e1b459fa07470dbb98aab5e7227cebe4e0a6b5e14" Nov 23 16:23:33 crc kubenswrapper[5050]: I1123 16:23:33.909398 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3fb9d120d923a2f08457f57e1b459fa07470dbb98aab5e7227cebe4e0a6b5e14"} err="failed to get container status \"3fb9d120d923a2f08457f57e1b459fa07470dbb98aab5e7227cebe4e0a6b5e14\": rpc error: code = NotFound desc = could not find container \"3fb9d120d923a2f08457f57e1b459fa07470dbb98aab5e7227cebe4e0a6b5e14\": container with ID starting with 3fb9d120d923a2f08457f57e1b459fa07470dbb98aab5e7227cebe4e0a6b5e14 not found: ID does not exist" Nov 23 16:23:34 crc kubenswrapper[5050]: I1123 16:23:34.011993 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-bf4fdc45f-c4g79"] Nov 23 16:23:34 crc kubenswrapper[5050]: I1123 16:23:34.021202 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-bf4fdc45f-c4g79"] Nov 23 16:23:35 crc kubenswrapper[5050]: I1123 16:23:35.560488 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de915556-22d5-4ea1-a24e-b872c1fdbdae" path="/var/lib/kubelet/pods/de915556-22d5-4ea1-a24e-b872c1fdbdae/volumes" Nov 23 16:23:39 crc kubenswrapper[5050]: I1123 16:23:39.931601 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-tchkm"] Nov 23 16:23:39 crc kubenswrapper[5050]: E1123 16:23:39.932562 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de915556-22d5-4ea1-a24e-b872c1fdbdae" containerName="horizon" Nov 23 16:23:39 crc kubenswrapper[5050]: I1123 16:23:39.932577 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="de915556-22d5-4ea1-a24e-b872c1fdbdae" containerName="horizon" Nov 23 16:23:39 crc kubenswrapper[5050]: E1123 16:23:39.932590 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de915556-22d5-4ea1-a24e-b872c1fdbdae" containerName="horizon-log" Nov 23 16:23:39 crc kubenswrapper[5050]: I1123 16:23:39.932596 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="de915556-22d5-4ea1-a24e-b872c1fdbdae" containerName="horizon-log" Nov 23 16:23:39 crc kubenswrapper[5050]: E1123 16:23:39.932616 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0d69f72-272a-4871-80ff-09f031c8019d" containerName="extract" Nov 23 16:23:39 crc kubenswrapper[5050]: I1123 16:23:39.932622 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0d69f72-272a-4871-80ff-09f031c8019d" containerName="extract" Nov 23 16:23:39 crc kubenswrapper[5050]: E1123 16:23:39.932639 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0d69f72-272a-4871-80ff-09f031c8019d" containerName="util" Nov 23 16:23:39 crc kubenswrapper[5050]: I1123 16:23:39.932647 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0d69f72-272a-4871-80ff-09f031c8019d" containerName="util" Nov 23 16:23:39 crc kubenswrapper[5050]: E1123 16:23:39.932666 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0d69f72-272a-4871-80ff-09f031c8019d" containerName="pull" Nov 23 16:23:39 crc kubenswrapper[5050]: I1123 16:23:39.932671 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0d69f72-272a-4871-80ff-09f031c8019d" containerName="pull" Nov 23 16:23:39 crc kubenswrapper[5050]: I1123 16:23:39.932872 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="de915556-22d5-4ea1-a24e-b872c1fdbdae" containerName="horizon" Nov 23 16:23:39 crc kubenswrapper[5050]: I1123 16:23:39.932898 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0d69f72-272a-4871-80ff-09f031c8019d" containerName="extract" Nov 23 16:23:39 crc kubenswrapper[5050]: I1123 16:23:39.932908 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="de915556-22d5-4ea1-a24e-b872c1fdbdae" containerName="horizon-log" Nov 23 16:23:39 crc kubenswrapper[5050]: I1123 16:23:39.933616 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-tchkm" Nov 23 16:23:39 crc kubenswrapper[5050]: I1123 16:23:39.936780 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-44vxr" Nov 23 16:23:39 crc kubenswrapper[5050]: I1123 16:23:39.937040 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Nov 23 16:23:39 crc kubenswrapper[5050]: I1123 16:23:39.937249 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Nov 23 16:23:39 crc kubenswrapper[5050]: I1123 16:23:39.961799 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-tchkm"] Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.072066 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dkb6g\" (UniqueName: \"kubernetes.io/projected/3832bef6-d18c-426a-85e0-3555819c7d47-kube-api-access-dkb6g\") pod \"obo-prometheus-operator-668cf9dfbb-tchkm\" (UID: \"3832bef6-d18c-426a-85e0-3555819c7d47\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-tchkm" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.072863 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-vvzzg"] Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.074756 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-vvzzg" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.078380 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-n4bcn" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.088075 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-vvzzg"] Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.090247 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.108628 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-9n7z7"] Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.110696 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-9n7z7" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.123017 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-9n7z7"] Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.174868 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c9fb72b9-7a4b-4b97-8098-bdb59bc12f78-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-c8fbbdfff-vvzzg\" (UID: \"c9fb72b9-7a4b-4b97-8098-bdb59bc12f78\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-vvzzg" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.174946 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c9fb72b9-7a4b-4b97-8098-bdb59bc12f78-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-c8fbbdfff-vvzzg\" (UID: \"c9fb72b9-7a4b-4b97-8098-bdb59bc12f78\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-vvzzg" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.175109 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dkb6g\" (UniqueName: \"kubernetes.io/projected/3832bef6-d18c-426a-85e0-3555819c7d47-kube-api-access-dkb6g\") pod \"obo-prometheus-operator-668cf9dfbb-tchkm\" (UID: \"3832bef6-d18c-426a-85e0-3555819c7d47\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-tchkm" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.214356 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dkb6g\" (UniqueName: \"kubernetes.io/projected/3832bef6-d18c-426a-85e0-3555819c7d47-kube-api-access-dkb6g\") pod \"obo-prometheus-operator-668cf9dfbb-tchkm\" (UID: \"3832bef6-d18c-426a-85e0-3555819c7d47\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-tchkm" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.265230 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-tchkm" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.277549 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c9fb72b9-7a4b-4b97-8098-bdb59bc12f78-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-c8fbbdfff-vvzzg\" (UID: \"c9fb72b9-7a4b-4b97-8098-bdb59bc12f78\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-vvzzg" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.277618 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c1edd280-122b-4bc5-ae0e-fe9d8240d2e1-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-c8fbbdfff-9n7z7\" (UID: \"c1edd280-122b-4bc5-ae0e-fe9d8240d2e1\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-9n7z7" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.277646 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c9fb72b9-7a4b-4b97-8098-bdb59bc12f78-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-c8fbbdfff-vvzzg\" (UID: \"c9fb72b9-7a4b-4b97-8098-bdb59bc12f78\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-vvzzg" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.277684 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c1edd280-122b-4bc5-ae0e-fe9d8240d2e1-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-c8fbbdfff-9n7z7\" (UID: \"c1edd280-122b-4bc5-ae0e-fe9d8240d2e1\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-9n7z7" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.280022 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-f5xvk"] Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.281659 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-f5xvk" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.285166 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c9fb72b9-7a4b-4b97-8098-bdb59bc12f78-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-c8fbbdfff-vvzzg\" (UID: \"c9fb72b9-7a4b-4b97-8098-bdb59bc12f78\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-vvzzg" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.286937 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-zzwsm" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.287172 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.287661 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c9fb72b9-7a4b-4b97-8098-bdb59bc12f78-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-c8fbbdfff-vvzzg\" (UID: \"c9fb72b9-7a4b-4b97-8098-bdb59bc12f78\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-vvzzg" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.315682 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-f5xvk"] Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.382164 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c1edd280-122b-4bc5-ae0e-fe9d8240d2e1-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-c8fbbdfff-9n7z7\" (UID: \"c1edd280-122b-4bc5-ae0e-fe9d8240d2e1\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-9n7z7" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.382749 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78qgt\" (UniqueName: \"kubernetes.io/projected/0500f9f7-d32f-4925-a791-7ce5cddcad30-kube-api-access-78qgt\") pod \"observability-operator-d8bb48f5d-f5xvk\" (UID: \"0500f9f7-d32f-4925-a791-7ce5cddcad30\") " pod="openshift-operators/observability-operator-d8bb48f5d-f5xvk" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.383577 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c1edd280-122b-4bc5-ae0e-fe9d8240d2e1-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-c8fbbdfff-9n7z7\" (UID: \"c1edd280-122b-4bc5-ae0e-fe9d8240d2e1\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-9n7z7" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.383612 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/0500f9f7-d32f-4925-a791-7ce5cddcad30-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-f5xvk\" (UID: \"0500f9f7-d32f-4925-a791-7ce5cddcad30\") " pod="openshift-operators/observability-operator-d8bb48f5d-f5xvk" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.392067 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c1edd280-122b-4bc5-ae0e-fe9d8240d2e1-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-c8fbbdfff-9n7z7\" (UID: \"c1edd280-122b-4bc5-ae0e-fe9d8240d2e1\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-9n7z7" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.392506 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c1edd280-122b-4bc5-ae0e-fe9d8240d2e1-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-c8fbbdfff-9n7z7\" (UID: \"c1edd280-122b-4bc5-ae0e-fe9d8240d2e1\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-9n7z7" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.412964 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-vvzzg" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.448217 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-9n7z7" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.485482 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78qgt\" (UniqueName: \"kubernetes.io/projected/0500f9f7-d32f-4925-a791-7ce5cddcad30-kube-api-access-78qgt\") pod \"observability-operator-d8bb48f5d-f5xvk\" (UID: \"0500f9f7-d32f-4925-a791-7ce5cddcad30\") " pod="openshift-operators/observability-operator-d8bb48f5d-f5xvk" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.485553 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/0500f9f7-d32f-4925-a791-7ce5cddcad30-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-f5xvk\" (UID: \"0500f9f7-d32f-4925-a791-7ce5cddcad30\") " pod="openshift-operators/observability-operator-d8bb48f5d-f5xvk" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.495377 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/0500f9f7-d32f-4925-a791-7ce5cddcad30-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-f5xvk\" (UID: \"0500f9f7-d32f-4925-a791-7ce5cddcad30\") " pod="openshift-operators/observability-operator-d8bb48f5d-f5xvk" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.521090 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78qgt\" (UniqueName: \"kubernetes.io/projected/0500f9f7-d32f-4925-a791-7ce5cddcad30-kube-api-access-78qgt\") pod \"observability-operator-d8bb48f5d-f5xvk\" (UID: \"0500f9f7-d32f-4925-a791-7ce5cddcad30\") " pod="openshift-operators/observability-operator-d8bb48f5d-f5xvk" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.534473 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5446b9c989-ffb9c"] Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.536061 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-ffb9c" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.538409 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-f5xvk" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.538655 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-5bt5w" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.550019 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-ffb9c"] Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.699238 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gsp8s\" (UniqueName: \"kubernetes.io/projected/52ab3442-8aa0-46c1-b7f6-09d59cf23afc-kube-api-access-gsp8s\") pod \"perses-operator-5446b9c989-ffb9c\" (UID: \"52ab3442-8aa0-46c1-b7f6-09d59cf23afc\") " pod="openshift-operators/perses-operator-5446b9c989-ffb9c" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.699787 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/52ab3442-8aa0-46c1-b7f6-09d59cf23afc-openshift-service-ca\") pod \"perses-operator-5446b9c989-ffb9c\" (UID: \"52ab3442-8aa0-46c1-b7f6-09d59cf23afc\") " pod="openshift-operators/perses-operator-5446b9c989-ffb9c" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.813589 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gsp8s\" (UniqueName: \"kubernetes.io/projected/52ab3442-8aa0-46c1-b7f6-09d59cf23afc-kube-api-access-gsp8s\") pod \"perses-operator-5446b9c989-ffb9c\" (UID: \"52ab3442-8aa0-46c1-b7f6-09d59cf23afc\") " pod="openshift-operators/perses-operator-5446b9c989-ffb9c" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.813779 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/52ab3442-8aa0-46c1-b7f6-09d59cf23afc-openshift-service-ca\") pod \"perses-operator-5446b9c989-ffb9c\" (UID: \"52ab3442-8aa0-46c1-b7f6-09d59cf23afc\") " pod="openshift-operators/perses-operator-5446b9c989-ffb9c" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.815185 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/52ab3442-8aa0-46c1-b7f6-09d59cf23afc-openshift-service-ca\") pod \"perses-operator-5446b9c989-ffb9c\" (UID: \"52ab3442-8aa0-46c1-b7f6-09d59cf23afc\") " pod="openshift-operators/perses-operator-5446b9c989-ffb9c" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.832723 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gsp8s\" (UniqueName: \"kubernetes.io/projected/52ab3442-8aa0-46c1-b7f6-09d59cf23afc-kube-api-access-gsp8s\") pod \"perses-operator-5446b9c989-ffb9c\" (UID: \"52ab3442-8aa0-46c1-b7f6-09d59cf23afc\") " pod="openshift-operators/perses-operator-5446b9c989-ffb9c" Nov 23 16:23:40 crc kubenswrapper[5050]: I1123 16:23:40.905293 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-ffb9c" Nov 23 16:23:41 crc kubenswrapper[5050]: I1123 16:23:41.027069 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-tchkm"] Nov 23 16:23:41 crc kubenswrapper[5050]: I1123 16:23:41.141965 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-vvzzg"] Nov 23 16:23:41 crc kubenswrapper[5050]: W1123 16:23:41.146649 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc9fb72b9_7a4b_4b97_8098_bdb59bc12f78.slice/crio-5bde6ac18d4800bfb7261b025200b0d3607d5f83636fe9c65cfba52c60e53eb5 WatchSource:0}: Error finding container 5bde6ac18d4800bfb7261b025200b0d3607d5f83636fe9c65cfba52c60e53eb5: Status 404 returned error can't find the container with id 5bde6ac18d4800bfb7261b025200b0d3607d5f83636fe9c65cfba52c60e53eb5 Nov 23 16:23:41 crc kubenswrapper[5050]: I1123 16:23:41.150818 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-9n7z7"] Nov 23 16:23:41 crc kubenswrapper[5050]: I1123 16:23:41.298913 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-f5xvk"] Nov 23 16:23:41 crc kubenswrapper[5050]: I1123 16:23:41.809465 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-9n7z7" event={"ID":"c1edd280-122b-4bc5-ae0e-fe9d8240d2e1","Type":"ContainerStarted","Data":"1c956c4b6f7cc447b5939d88fac40b33cf76b118dfa96cb5f787d7c0b61468de"} Nov 23 16:23:41 crc kubenswrapper[5050]: I1123 16:23:41.821585 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-f5xvk" event={"ID":"0500f9f7-d32f-4925-a791-7ce5cddcad30","Type":"ContainerStarted","Data":"f4196221609dd8b2ca5ef09162d5d23c272de9499f5de556c1f5e8e2627de12c"} Nov 23 16:23:41 crc kubenswrapper[5050]: I1123 16:23:41.858505 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-tchkm" event={"ID":"3832bef6-d18c-426a-85e0-3555819c7d47","Type":"ContainerStarted","Data":"2637623bdfa528dffa2d71f31aef9cdaebf883a10b60dca01d1716f52fd32ae2"} Nov 23 16:23:41 crc kubenswrapper[5050]: I1123 16:23:41.859652 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-vvzzg" event={"ID":"c9fb72b9-7a4b-4b97-8098-bdb59bc12f78","Type":"ContainerStarted","Data":"5bde6ac18d4800bfb7261b025200b0d3607d5f83636fe9c65cfba52c60e53eb5"} Nov 23 16:23:42 crc kubenswrapper[5050]: I1123 16:23:42.234110 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-ffb9c"] Nov 23 16:23:42 crc kubenswrapper[5050]: W1123 16:23:42.256984 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod52ab3442_8aa0_46c1_b7f6_09d59cf23afc.slice/crio-380e36f93e946e3a0cb151e0f92d015f6389b05671ff20678e9dec75061a0ba6 WatchSource:0}: Error finding container 380e36f93e946e3a0cb151e0f92d015f6389b05671ff20678e9dec75061a0ba6: Status 404 returned error can't find the container with id 380e36f93e946e3a0cb151e0f92d015f6389b05671ff20678e9dec75061a0ba6 Nov 23 16:23:42 crc kubenswrapper[5050]: I1123 16:23:42.875774 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-ffb9c" event={"ID":"52ab3442-8aa0-46c1-b7f6-09d59cf23afc","Type":"ContainerStarted","Data":"380e36f93e946e3a0cb151e0f92d015f6389b05671ff20678e9dec75061a0ba6"} Nov 23 16:23:51 crc kubenswrapper[5050]: I1123 16:23:51.056477 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-2b01-account-create-wk29k"] Nov 23 16:23:51 crc kubenswrapper[5050]: I1123 16:23:51.066591 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-2b01-account-create-wk29k"] Nov 23 16:23:51 crc kubenswrapper[5050]: I1123 16:23:51.618840 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c91600e9-e5ac-4426-bcbf-42cc78069c6a" path="/var/lib/kubelet/pods/c91600e9-e5ac-4426-bcbf-42cc78069c6a/volumes" Nov 23 16:23:52 crc kubenswrapper[5050]: I1123 16:23:52.042019 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-p2fgp"] Nov 23 16:23:52 crc kubenswrapper[5050]: I1123 16:23:52.069834 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-rmlzd"] Nov 23 16:23:52 crc kubenswrapper[5050]: I1123 16:23:52.078645 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-p2fgp"] Nov 23 16:23:52 crc kubenswrapper[5050]: I1123 16:23:52.086687 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-rmlzd"] Nov 23 16:23:52 crc kubenswrapper[5050]: I1123 16:23:52.093281 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-9cac-account-create-m4bh5"] Nov 23 16:23:52 crc kubenswrapper[5050]: I1123 16:23:52.101226 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-9cac-account-create-m4bh5"] Nov 23 16:23:53 crc kubenswrapper[5050]: I1123 16:23:53.037964 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-6645-account-create-4zgnc"] Nov 23 16:23:53 crc kubenswrapper[5050]: I1123 16:23:53.048508 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-6645-account-create-4zgnc"] Nov 23 16:23:53 crc kubenswrapper[5050]: I1123 16:23:53.059433 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-gl7j8"] Nov 23 16:23:53 crc kubenswrapper[5050]: I1123 16:23:53.070800 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-gl7j8"] Nov 23 16:23:53 crc kubenswrapper[5050]: I1123 16:23:53.072797 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-f5xvk" event={"ID":"0500f9f7-d32f-4925-a791-7ce5cddcad30","Type":"ContainerStarted","Data":"2b46ac2d6168d6caef2cc62cc8fc8a7875dc11ab6381885248f0ed52f01494d7"} Nov 23 16:23:53 crc kubenswrapper[5050]: I1123 16:23:53.073030 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-d8bb48f5d-f5xvk" Nov 23 16:23:53 crc kubenswrapper[5050]: I1123 16:23:53.075608 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-tchkm" event={"ID":"3832bef6-d18c-426a-85e0-3555819c7d47","Type":"ContainerStarted","Data":"88328c6aad9e5c84872d187b3c0befbce45fb9c7ca0170b78fe4f62139a1b705"} Nov 23 16:23:53 crc kubenswrapper[5050]: I1123 16:23:53.075847 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-d8bb48f5d-f5xvk" Nov 23 16:23:53 crc kubenswrapper[5050]: I1123 16:23:53.077886 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-vvzzg" event={"ID":"c9fb72b9-7a4b-4b97-8098-bdb59bc12f78","Type":"ContainerStarted","Data":"3444dbd2e9ae7e5cdd9277db315954b1240b9e798c2557092c1395d9fe459e99"} Nov 23 16:23:53 crc kubenswrapper[5050]: I1123 16:23:53.080683 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-ffb9c" event={"ID":"52ab3442-8aa0-46c1-b7f6-09d59cf23afc","Type":"ContainerStarted","Data":"e60c0b207ae87faec57f1a590c2c2e799a101d9b2acc9d9ee7f415098a1b9704"} Nov 23 16:23:53 crc kubenswrapper[5050]: I1123 16:23:53.080790 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5446b9c989-ffb9c" Nov 23 16:23:53 crc kubenswrapper[5050]: I1123 16:23:53.082785 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-9n7z7" event={"ID":"c1edd280-122b-4bc5-ae0e-fe9d8240d2e1","Type":"ContainerStarted","Data":"55dbe1580143c62ecda878f92e3f86bc1699d37fa9ca0f7f358e3860fcb5f40c"} Nov 23 16:23:53 crc kubenswrapper[5050]: I1123 16:23:53.109743 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-d8bb48f5d-f5xvk" podStartSLOduration=2.566293851 podStartE2EDuration="13.109712585s" podCreationTimestamp="2025-11-23 16:23:40 +0000 UTC" firstStartedPulling="2025-11-23 16:23:41.321127589 +0000 UTC m=+6116.488124064" lastFinishedPulling="2025-11-23 16:23:51.864546313 +0000 UTC m=+6127.031542798" observedRunningTime="2025-11-23 16:23:53.095242597 +0000 UTC m=+6128.262239082" watchObservedRunningTime="2025-11-23 16:23:53.109712585 +0000 UTC m=+6128.276709070" Nov 23 16:23:53 crc kubenswrapper[5050]: I1123 16:23:53.120094 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-vvzzg" podStartSLOduration=2.40497965 podStartE2EDuration="13.120070367s" podCreationTimestamp="2025-11-23 16:23:40 +0000 UTC" firstStartedPulling="2025-11-23 16:23:41.149372484 +0000 UTC m=+6116.316368969" lastFinishedPulling="2025-11-23 16:23:51.864463201 +0000 UTC m=+6127.031459686" observedRunningTime="2025-11-23 16:23:53.119037068 +0000 UTC m=+6128.286033553" watchObservedRunningTime="2025-11-23 16:23:53.120070367 +0000 UTC m=+6128.287066852" Nov 23 16:23:53 crc kubenswrapper[5050]: I1123 16:23:53.145718 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-tchkm" podStartSLOduration=3.355550376 podStartE2EDuration="14.14569217s" podCreationTimestamp="2025-11-23 16:23:39 +0000 UTC" firstStartedPulling="2025-11-23 16:23:41.034789452 +0000 UTC m=+6116.201785937" lastFinishedPulling="2025-11-23 16:23:51.824931246 +0000 UTC m=+6126.991927731" observedRunningTime="2025-11-23 16:23:53.135966375 +0000 UTC m=+6128.302962860" watchObservedRunningTime="2025-11-23 16:23:53.14569217 +0000 UTC m=+6128.312688655" Nov 23 16:23:53 crc kubenswrapper[5050]: I1123 16:23:53.198798 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5446b9c989-ffb9c" podStartSLOduration=3.632583436 podStartE2EDuration="13.198768937s" podCreationTimestamp="2025-11-23 16:23:40 +0000 UTC" firstStartedPulling="2025-11-23 16:23:42.259545388 +0000 UTC m=+6117.426541873" lastFinishedPulling="2025-11-23 16:23:51.825730889 +0000 UTC m=+6126.992727374" observedRunningTime="2025-11-23 16:23:53.183218148 +0000 UTC m=+6128.350214643" watchObservedRunningTime="2025-11-23 16:23:53.198768937 +0000 UTC m=+6128.365765422" Nov 23 16:23:53 crc kubenswrapper[5050]: I1123 16:23:53.277810 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-c8fbbdfff-9n7z7" podStartSLOduration=2.628059242 podStartE2EDuration="13.277793776s" podCreationTimestamp="2025-11-23 16:23:40 +0000 UTC" firstStartedPulling="2025-11-23 16:23:41.165569921 +0000 UTC m=+6116.332566406" lastFinishedPulling="2025-11-23 16:23:51.815304455 +0000 UTC m=+6126.982300940" observedRunningTime="2025-11-23 16:23:53.275922103 +0000 UTC m=+6128.442918588" watchObservedRunningTime="2025-11-23 16:23:53.277793776 +0000 UTC m=+6128.444790261" Nov 23 16:23:53 crc kubenswrapper[5050]: I1123 16:23:53.566031 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="18ab378a-a758-455c-9350-8a610f30c66d" path="/var/lib/kubelet/pods/18ab378a-a758-455c-9350-8a610f30c66d/volumes" Nov 23 16:23:53 crc kubenswrapper[5050]: I1123 16:23:53.566657 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47528bce-fa2a-4f34-bdb3-cd44a57be5ef" path="/var/lib/kubelet/pods/47528bce-fa2a-4f34-bdb3-cd44a57be5ef/volumes" Nov 23 16:23:53 crc kubenswrapper[5050]: I1123 16:23:53.567207 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b75904b-a477-4659-9bc3-0f9771466735" path="/var/lib/kubelet/pods/5b75904b-a477-4659-9bc3-0f9771466735/volumes" Nov 23 16:23:53 crc kubenswrapper[5050]: I1123 16:23:53.569272 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3f67377-c66e-4af9-b42f-185208ef4cb6" path="/var/lib/kubelet/pods/d3f67377-c66e-4af9-b42f-185208ef4cb6/volumes" Nov 23 16:23:53 crc kubenswrapper[5050]: I1123 16:23:53.569835 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dff083ef-35fc-44a9-92f2-e884a936cc27" path="/var/lib/kubelet/pods/dff083ef-35fc-44a9-92f2-e884a936cc27/volumes" Nov 23 16:24:00 crc kubenswrapper[5050]: I1123 16:24:00.910566 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5446b9c989-ffb9c" Nov 23 16:24:03 crc kubenswrapper[5050]: I1123 16:24:03.632997 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 23 16:24:03 crc kubenswrapper[5050]: I1123 16:24:03.633851 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="43f64101-e0e6-46aa-befc-e06a3d420fb0" containerName="openstackclient" containerID="cri-o://eaa940447e814c1c972a68b877469c7877979b581be817e1b9484fcd434c987a" gracePeriod=2 Nov 23 16:24:03 crc kubenswrapper[5050]: I1123 16:24:03.640013 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 23 16:24:03 crc kubenswrapper[5050]: I1123 16:24:03.693342 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 23 16:24:03 crc kubenswrapper[5050]: E1123 16:24:03.694395 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43f64101-e0e6-46aa-befc-e06a3d420fb0" containerName="openstackclient" Nov 23 16:24:03 crc kubenswrapper[5050]: I1123 16:24:03.694417 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="43f64101-e0e6-46aa-befc-e06a3d420fb0" containerName="openstackclient" Nov 23 16:24:03 crc kubenswrapper[5050]: I1123 16:24:03.694754 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="43f64101-e0e6-46aa-befc-e06a3d420fb0" containerName="openstackclient" Nov 23 16:24:03 crc kubenswrapper[5050]: I1123 16:24:03.695611 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 23 16:24:03 crc kubenswrapper[5050]: I1123 16:24:03.708757 5050 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="43f64101-e0e6-46aa-befc-e06a3d420fb0" podUID="1d6f0041-ac18-43f2-906a-e11585430d00" Nov 23 16:24:03 crc kubenswrapper[5050]: I1123 16:24:03.780511 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 23 16:24:03 crc kubenswrapper[5050]: I1123 16:24:03.783871 5050 status_manager.go:875] "Failed to update status for pod" pod="openstack/openstackclient" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d6f0041-ac18-43f2-906a-e11585430d00\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T16:24:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T16:24:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T16:24:03Z\\\",\\\"message\\\":\\\"containers with unready status: [openstackclient]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-23T16:24:03Z\\\",\\\"message\\\":\\\"containers with unready status: [openstackclient]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"openstackclient\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/home/cloud-admin/.config/openstack/clouds.yaml\\\",\\\"name\\\":\\\"openstack-config\\\"},{\\\"mountPath\\\":\\\"/home/cloud-admin/.config/openstack/secure.yaml\\\",\\\"name\\\":\\\"openstack-config-secret\\\"},{\\\"mountPath\\\":\\\"/home/cloud-admin/cloudrc\\\",\\\"name\\\":\\\"openstack-config-secret\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m7czt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-23T16:24:03Z\\\"}}\" for pod \"openstack\"/\"openstackclient\": pods \"openstackclient\" not found" Nov 23 16:24:03 crc kubenswrapper[5050]: I1123 16:24:03.803059 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 23 16:24:03 crc kubenswrapper[5050]: E1123 16:24:03.804033 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-m7czt openstack-config openstack-config-secret], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/openstackclient" podUID="1d6f0041-ac18-43f2-906a-e11585430d00" Nov 23 16:24:03 crc kubenswrapper[5050]: I1123 16:24:03.822498 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 23 16:24:03 crc kubenswrapper[5050]: I1123 16:24:03.831085 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 23 16:24:03 crc kubenswrapper[5050]: I1123 16:24:03.838804 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 23 16:24:03 crc kubenswrapper[5050]: I1123 16:24:03.846914 5050 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="1d6f0041-ac18-43f2-906a-e11585430d00" podUID="dd8bde11-d7b2-4bb6-beb2-89f01031730f" Nov 23 16:24:03 crc kubenswrapper[5050]: I1123 16:24:03.858676 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/1d6f0041-ac18-43f2-906a-e11585430d00-openstack-config-secret\") pod \"openstackclient\" (UID: \"1d6f0041-ac18-43f2-906a-e11585430d00\") " pod="openstack/openstackclient" Nov 23 16:24:03 crc kubenswrapper[5050]: I1123 16:24:03.858818 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m7czt\" (UniqueName: \"kubernetes.io/projected/1d6f0041-ac18-43f2-906a-e11585430d00-kube-api-access-m7czt\") pod \"openstackclient\" (UID: \"1d6f0041-ac18-43f2-906a-e11585430d00\") " pod="openstack/openstackclient" Nov 23 16:24:03 crc kubenswrapper[5050]: I1123 16:24:03.858852 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/1d6f0041-ac18-43f2-906a-e11585430d00-openstack-config\") pod \"openstackclient\" (UID: \"1d6f0041-ac18-43f2-906a-e11585430d00\") " pod="openstack/openstackclient" Nov 23 16:24:03 crc kubenswrapper[5050]: I1123 16:24:03.964170 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m7czt\" (UniqueName: \"kubernetes.io/projected/1d6f0041-ac18-43f2-906a-e11585430d00-kube-api-access-m7czt\") pod \"openstackclient\" (UID: \"1d6f0041-ac18-43f2-906a-e11585430d00\") " pod="openstack/openstackclient" Nov 23 16:24:03 crc kubenswrapper[5050]: I1123 16:24:03.964220 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/1d6f0041-ac18-43f2-906a-e11585430d00-openstack-config\") pod \"openstackclient\" (UID: \"1d6f0041-ac18-43f2-906a-e11585430d00\") " pod="openstack/openstackclient" Nov 23 16:24:03 crc kubenswrapper[5050]: I1123 16:24:03.964290 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/1d6f0041-ac18-43f2-906a-e11585430d00-openstack-config-secret\") pod \"openstackclient\" (UID: \"1d6f0041-ac18-43f2-906a-e11585430d00\") " pod="openstack/openstackclient" Nov 23 16:24:03 crc kubenswrapper[5050]: I1123 16:24:03.964346 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/dd8bde11-d7b2-4bb6-beb2-89f01031730f-openstack-config\") pod \"openstackclient\" (UID: \"dd8bde11-d7b2-4bb6-beb2-89f01031730f\") " pod="openstack/openstackclient" Nov 23 16:24:03 crc kubenswrapper[5050]: I1123 16:24:03.964367 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-km7hc\" (UniqueName: \"kubernetes.io/projected/dd8bde11-d7b2-4bb6-beb2-89f01031730f-kube-api-access-km7hc\") pod \"openstackclient\" (UID: \"dd8bde11-d7b2-4bb6-beb2-89f01031730f\") " pod="openstack/openstackclient" Nov 23 16:24:03 crc kubenswrapper[5050]: I1123 16:24:03.964387 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/dd8bde11-d7b2-4bb6-beb2-89f01031730f-openstack-config-secret\") pod \"openstackclient\" (UID: \"dd8bde11-d7b2-4bb6-beb2-89f01031730f\") " pod="openstack/openstackclient" Nov 23 16:24:03 crc kubenswrapper[5050]: E1123 16:24:03.971835 5050 projected.go:194] Error preparing data for projected volume kube-api-access-m7czt for pod openstack/openstackclient: failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (1d6f0041-ac18-43f2-906a-e11585430d00) does not match the UID in record. The object might have been deleted and then recreated Nov 23 16:24:03 crc kubenswrapper[5050]: E1123 16:24:03.972124 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1d6f0041-ac18-43f2-906a-e11585430d00-kube-api-access-m7czt podName:1d6f0041-ac18-43f2-906a-e11585430d00 nodeName:}" failed. No retries permitted until 2025-11-23 16:24:04.472104317 +0000 UTC m=+6139.639100802 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-m7czt" (UniqueName: "kubernetes.io/projected/1d6f0041-ac18-43f2-906a-e11585430d00-kube-api-access-m7czt") pod "openstackclient" (UID: "1d6f0041-ac18-43f2-906a-e11585430d00") : failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (1d6f0041-ac18-43f2-906a-e11585430d00) does not match the UID in record. The object might have been deleted and then recreated Nov 23 16:24:03 crc kubenswrapper[5050]: I1123 16:24:03.978552 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 23 16:24:03 crc kubenswrapper[5050]: I1123 16:24:03.981902 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/1d6f0041-ac18-43f2-906a-e11585430d00-openstack-config\") pod \"openstackclient\" (UID: \"1d6f0041-ac18-43f2-906a-e11585430d00\") " pod="openstack/openstackclient" Nov 23 16:24:04 crc kubenswrapper[5050]: I1123 16:24:04.030028 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/1d6f0041-ac18-43f2-906a-e11585430d00-openstack-config-secret\") pod \"openstackclient\" (UID: \"1d6f0041-ac18-43f2-906a-e11585430d00\") " pod="openstack/openstackclient" Nov 23 16:24:04 crc kubenswrapper[5050]: I1123 16:24:04.036089 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 23 16:24:04 crc kubenswrapper[5050]: I1123 16:24:04.037864 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 23 16:24:04 crc kubenswrapper[5050]: I1123 16:24:04.051456 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 23 16:24:04 crc kubenswrapper[5050]: I1123 16:24:04.065139 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-k4fr6" Nov 23 16:24:04 crc kubenswrapper[5050]: I1123 16:24:04.066159 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/dd8bde11-d7b2-4bb6-beb2-89f01031730f-openstack-config\") pod \"openstackclient\" (UID: \"dd8bde11-d7b2-4bb6-beb2-89f01031730f\") " pod="openstack/openstackclient" Nov 23 16:24:04 crc kubenswrapper[5050]: I1123 16:24:04.066211 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-km7hc\" (UniqueName: \"kubernetes.io/projected/dd8bde11-d7b2-4bb6-beb2-89f01031730f-kube-api-access-km7hc\") pod \"openstackclient\" (UID: \"dd8bde11-d7b2-4bb6-beb2-89f01031730f\") " pod="openstack/openstackclient" Nov 23 16:24:04 crc kubenswrapper[5050]: I1123 16:24:04.066236 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/dd8bde11-d7b2-4bb6-beb2-89f01031730f-openstack-config-secret\") pod \"openstackclient\" (UID: \"dd8bde11-d7b2-4bb6-beb2-89f01031730f\") " pod="openstack/openstackclient" Nov 23 16:24:04 crc kubenswrapper[5050]: I1123 16:24:04.067849 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/dd8bde11-d7b2-4bb6-beb2-89f01031730f-openstack-config\") pod \"openstackclient\" (UID: \"dd8bde11-d7b2-4bb6-beb2-89f01031730f\") " pod="openstack/openstackclient" Nov 23 16:24:04 crc kubenswrapper[5050]: I1123 16:24:04.098573 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/dd8bde11-d7b2-4bb6-beb2-89f01031730f-openstack-config-secret\") pod \"openstackclient\" (UID: \"dd8bde11-d7b2-4bb6-beb2-89f01031730f\") " pod="openstack/openstackclient" Nov 23 16:24:04 crc kubenswrapper[5050]: I1123 16:24:04.138062 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-km7hc\" (UniqueName: \"kubernetes.io/projected/dd8bde11-d7b2-4bb6-beb2-89f01031730f-kube-api-access-km7hc\") pod \"openstackclient\" (UID: \"dd8bde11-d7b2-4bb6-beb2-89f01031730f\") " pod="openstack/openstackclient" Nov 23 16:24:04 crc kubenswrapper[5050]: I1123 16:24:04.189194 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5f8nl\" (UniqueName: \"kubernetes.io/projected/c0c46824-94a1-4a97-b6a1-c32179c011de-kube-api-access-5f8nl\") pod \"kube-state-metrics-0\" (UID: \"c0c46824-94a1-4a97-b6a1-c32179c011de\") " pod="openstack/kube-state-metrics-0" Nov 23 16:24:04 crc kubenswrapper[5050]: I1123 16:24:04.192692 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 23 16:24:04 crc kubenswrapper[5050]: I1123 16:24:04.220102 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 23 16:24:04 crc kubenswrapper[5050]: I1123 16:24:04.277380 5050 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="1d6f0041-ac18-43f2-906a-e11585430d00" podUID="dd8bde11-d7b2-4bb6-beb2-89f01031730f" Nov 23 16:24:04 crc kubenswrapper[5050]: I1123 16:24:04.293616 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5f8nl\" (UniqueName: \"kubernetes.io/projected/c0c46824-94a1-4a97-b6a1-c32179c011de-kube-api-access-5f8nl\") pod \"kube-state-metrics-0\" (UID: \"c0c46824-94a1-4a97-b6a1-c32179c011de\") " pod="openstack/kube-state-metrics-0" Nov 23 16:24:04 crc kubenswrapper[5050]: I1123 16:24:04.346607 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5f8nl\" (UniqueName: \"kubernetes.io/projected/c0c46824-94a1-4a97-b6a1-c32179c011de-kube-api-access-5f8nl\") pod \"kube-state-metrics-0\" (UID: \"c0c46824-94a1-4a97-b6a1-c32179c011de\") " pod="openstack/kube-state-metrics-0" Nov 23 16:24:04 crc kubenswrapper[5050]: I1123 16:24:04.366654 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 23 16:24:04 crc kubenswrapper[5050]: I1123 16:24:04.383856 5050 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="1d6f0041-ac18-43f2-906a-e11585430d00" podUID="dd8bde11-d7b2-4bb6-beb2-89f01031730f" Nov 23 16:24:04 crc kubenswrapper[5050]: I1123 16:24:04.435120 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 23 16:24:04 crc kubenswrapper[5050]: I1123 16:24:04.505455 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/1d6f0041-ac18-43f2-906a-e11585430d00-openstack-config\") pod \"1d6f0041-ac18-43f2-906a-e11585430d00\" (UID: \"1d6f0041-ac18-43f2-906a-e11585430d00\") " Nov 23 16:24:04 crc kubenswrapper[5050]: I1123 16:24:04.505637 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/1d6f0041-ac18-43f2-906a-e11585430d00-openstack-config-secret\") pod \"1d6f0041-ac18-43f2-906a-e11585430d00\" (UID: \"1d6f0041-ac18-43f2-906a-e11585430d00\") " Nov 23 16:24:04 crc kubenswrapper[5050]: I1123 16:24:04.506105 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m7czt\" (UniqueName: \"kubernetes.io/projected/1d6f0041-ac18-43f2-906a-e11585430d00-kube-api-access-m7czt\") on node \"crc\" DevicePath \"\"" Nov 23 16:24:04 crc kubenswrapper[5050]: I1123 16:24:04.506599 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1d6f0041-ac18-43f2-906a-e11585430d00-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "1d6f0041-ac18-43f2-906a-e11585430d00" (UID: "1d6f0041-ac18-43f2-906a-e11585430d00"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:24:04 crc kubenswrapper[5050]: I1123 16:24:04.516021 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d6f0041-ac18-43f2-906a-e11585430d00-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "1d6f0041-ac18-43f2-906a-e11585430d00" (UID: "1d6f0041-ac18-43f2-906a-e11585430d00"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:24:04 crc kubenswrapper[5050]: I1123 16:24:04.614542 5050 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/1d6f0041-ac18-43f2-906a-e11585430d00-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 23 16:24:04 crc kubenswrapper[5050]: I1123 16:24:04.614895 5050 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/1d6f0041-ac18-43f2-906a-e11585430d00-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.145204 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.158806 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/alertmanager-metric-storage-0"] Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.162597 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.167829 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-generated" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.167989 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-web-config" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.168151 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-alertmanager-dockercfg-lv9sp" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.168831 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-tls-assets-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.169192 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-cluster-tls-config" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.178591 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Nov 23 16:24:05 crc kubenswrapper[5050]: W1123 16:24:05.233437 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddd8bde11_d7b2_4bb6_beb2_89f01031730f.slice/crio-56b48c62ce7305f9754d04d5ce44aaa96b05c20cb82594fd817020ebf51e7105 WatchSource:0}: Error finding container 56b48c62ce7305f9754d04d5ce44aaa96b05c20cb82594fd817020ebf51e7105: Status 404 returned error can't find the container with id 56b48c62ce7305f9754d04d5ce44aaa96b05c20cb82594fd817020ebf51e7105 Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.235930 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.257183 5050 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="1d6f0041-ac18-43f2-906a-e11585430d00" podUID="dd8bde11-d7b2-4bb6-beb2-89f01031730f" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.349033 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/7181c88f-0ea1-4e1f-bda2-dd7cf4be825a-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"7181c88f-0ea1-4e1f-bda2-dd7cf4be825a\") " pod="openstack/alertmanager-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.349078 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/7181c88f-0ea1-4e1f-bda2-dd7cf4be825a-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"7181c88f-0ea1-4e1f-bda2-dd7cf4be825a\") " pod="openstack/alertmanager-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.349156 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/7181c88f-0ea1-4e1f-bda2-dd7cf4be825a-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"7181c88f-0ea1-4e1f-bda2-dd7cf4be825a\") " pod="openstack/alertmanager-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.349190 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/7181c88f-0ea1-4e1f-bda2-dd7cf4be825a-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"7181c88f-0ea1-4e1f-bda2-dd7cf4be825a\") " pod="openstack/alertmanager-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.349218 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/7181c88f-0ea1-4e1f-bda2-dd7cf4be825a-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"7181c88f-0ea1-4e1f-bda2-dd7cf4be825a\") " pod="openstack/alertmanager-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.349288 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vktdb\" (UniqueName: \"kubernetes.io/projected/7181c88f-0ea1-4e1f-bda2-dd7cf4be825a-kube-api-access-vktdb\") pod \"alertmanager-metric-storage-0\" (UID: \"7181c88f-0ea1-4e1f-bda2-dd7cf4be825a\") " pod="openstack/alertmanager-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.349308 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/7181c88f-0ea1-4e1f-bda2-dd7cf4be825a-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"7181c88f-0ea1-4e1f-bda2-dd7cf4be825a\") " pod="openstack/alertmanager-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.358813 5050 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="1d6f0041-ac18-43f2-906a-e11585430d00" podUID="dd8bde11-d7b2-4bb6-beb2-89f01031730f" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.452226 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vktdb\" (UniqueName: \"kubernetes.io/projected/7181c88f-0ea1-4e1f-bda2-dd7cf4be825a-kube-api-access-vktdb\") pod \"alertmanager-metric-storage-0\" (UID: \"7181c88f-0ea1-4e1f-bda2-dd7cf4be825a\") " pod="openstack/alertmanager-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.452288 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/7181c88f-0ea1-4e1f-bda2-dd7cf4be825a-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"7181c88f-0ea1-4e1f-bda2-dd7cf4be825a\") " pod="openstack/alertmanager-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.452350 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/7181c88f-0ea1-4e1f-bda2-dd7cf4be825a-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"7181c88f-0ea1-4e1f-bda2-dd7cf4be825a\") " pod="openstack/alertmanager-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.452389 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/7181c88f-0ea1-4e1f-bda2-dd7cf4be825a-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"7181c88f-0ea1-4e1f-bda2-dd7cf4be825a\") " pod="openstack/alertmanager-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.452483 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/7181c88f-0ea1-4e1f-bda2-dd7cf4be825a-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"7181c88f-0ea1-4e1f-bda2-dd7cf4be825a\") " pod="openstack/alertmanager-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.452522 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/7181c88f-0ea1-4e1f-bda2-dd7cf4be825a-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"7181c88f-0ea1-4e1f-bda2-dd7cf4be825a\") " pod="openstack/alertmanager-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.452565 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/7181c88f-0ea1-4e1f-bda2-dd7cf4be825a-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"7181c88f-0ea1-4e1f-bda2-dd7cf4be825a\") " pod="openstack/alertmanager-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.453905 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/7181c88f-0ea1-4e1f-bda2-dd7cf4be825a-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"7181c88f-0ea1-4e1f-bda2-dd7cf4be825a\") " pod="openstack/alertmanager-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.473194 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.487668 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/7181c88f-0ea1-4e1f-bda2-dd7cf4be825a-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"7181c88f-0ea1-4e1f-bda2-dd7cf4be825a\") " pod="openstack/alertmanager-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.499953 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/7181c88f-0ea1-4e1f-bda2-dd7cf4be825a-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"7181c88f-0ea1-4e1f-bda2-dd7cf4be825a\") " pod="openstack/alertmanager-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.501807 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/7181c88f-0ea1-4e1f-bda2-dd7cf4be825a-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"7181c88f-0ea1-4e1f-bda2-dd7cf4be825a\") " pod="openstack/alertmanager-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.503866 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/7181c88f-0ea1-4e1f-bda2-dd7cf4be825a-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"7181c88f-0ea1-4e1f-bda2-dd7cf4be825a\") " pod="openstack/alertmanager-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.504119 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vktdb\" (UniqueName: \"kubernetes.io/projected/7181c88f-0ea1-4e1f-bda2-dd7cf4be825a-kube-api-access-vktdb\") pod \"alertmanager-metric-storage-0\" (UID: \"7181c88f-0ea1-4e1f-bda2-dd7cf4be825a\") " pod="openstack/alertmanager-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.504709 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/7181c88f-0ea1-4e1f-bda2-dd7cf4be825a-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"7181c88f-0ea1-4e1f-bda2-dd7cf4be825a\") " pod="openstack/alertmanager-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.593129 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d6f0041-ac18-43f2-906a-e11585430d00" path="/var/lib/kubelet/pods/1d6f0041-ac18-43f2-906a-e11585430d00/volumes" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.600705 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.603132 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.606628 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.607024 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.607170 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-2f6wm" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.607301 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.608761 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.617627 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.624557 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.665973 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/98375fca-7e96-48ce-acc5-3abd00f043a1-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"98375fca-7e96-48ce-acc5-3abd00f043a1\") " pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.666211 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/98375fca-7e96-48ce-acc5-3abd00f043a1-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"98375fca-7e96-48ce-acc5-3abd00f043a1\") " pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.666323 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w2rbk\" (UniqueName: \"kubernetes.io/projected/98375fca-7e96-48ce-acc5-3abd00f043a1-kube-api-access-w2rbk\") pod \"prometheus-metric-storage-0\" (UID: \"98375fca-7e96-48ce-acc5-3abd00f043a1\") " pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.666994 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-ec9bf053-2f07-4f03-8f16-6a6f56d680f6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ec9bf053-2f07-4f03-8f16-6a6f56d680f6\") pod \"prometheus-metric-storage-0\" (UID: \"98375fca-7e96-48ce-acc5-3abd00f043a1\") " pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.667517 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/98375fca-7e96-48ce-acc5-3abd00f043a1-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"98375fca-7e96-48ce-acc5-3abd00f043a1\") " pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.667682 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/98375fca-7e96-48ce-acc5-3abd00f043a1-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"98375fca-7e96-48ce-acc5-3abd00f043a1\") " pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.667786 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/98375fca-7e96-48ce-acc5-3abd00f043a1-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"98375fca-7e96-48ce-acc5-3abd00f043a1\") " pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.667859 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/98375fca-7e96-48ce-acc5-3abd00f043a1-config\") pod \"prometheus-metric-storage-0\" (UID: \"98375fca-7e96-48ce-acc5-3abd00f043a1\") " pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.772373 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/98375fca-7e96-48ce-acc5-3abd00f043a1-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"98375fca-7e96-48ce-acc5-3abd00f043a1\") " pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.772894 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/98375fca-7e96-48ce-acc5-3abd00f043a1-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"98375fca-7e96-48ce-acc5-3abd00f043a1\") " pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.772937 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/98375fca-7e96-48ce-acc5-3abd00f043a1-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"98375fca-7e96-48ce-acc5-3abd00f043a1\") " pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.772974 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/98375fca-7e96-48ce-acc5-3abd00f043a1-config\") pod \"prometheus-metric-storage-0\" (UID: \"98375fca-7e96-48ce-acc5-3abd00f043a1\") " pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.773013 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/98375fca-7e96-48ce-acc5-3abd00f043a1-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"98375fca-7e96-48ce-acc5-3abd00f043a1\") " pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.773044 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/98375fca-7e96-48ce-acc5-3abd00f043a1-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"98375fca-7e96-48ce-acc5-3abd00f043a1\") " pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.773088 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w2rbk\" (UniqueName: \"kubernetes.io/projected/98375fca-7e96-48ce-acc5-3abd00f043a1-kube-api-access-w2rbk\") pod \"prometheus-metric-storage-0\" (UID: \"98375fca-7e96-48ce-acc5-3abd00f043a1\") " pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.773114 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-ec9bf053-2f07-4f03-8f16-6a6f56d680f6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ec9bf053-2f07-4f03-8f16-6a6f56d680f6\") pod \"prometheus-metric-storage-0\" (UID: \"98375fca-7e96-48ce-acc5-3abd00f043a1\") " pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.777847 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/98375fca-7e96-48ce-acc5-3abd00f043a1-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"98375fca-7e96-48ce-acc5-3abd00f043a1\") " pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.778830 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/98375fca-7e96-48ce-acc5-3abd00f043a1-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"98375fca-7e96-48ce-acc5-3abd00f043a1\") " pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.779759 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/98375fca-7e96-48ce-acc5-3abd00f043a1-config\") pod \"prometheus-metric-storage-0\" (UID: \"98375fca-7e96-48ce-acc5-3abd00f043a1\") " pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.780569 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/98375fca-7e96-48ce-acc5-3abd00f043a1-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"98375fca-7e96-48ce-acc5-3abd00f043a1\") " pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.781398 5050 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.781441 5050 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-ec9bf053-2f07-4f03-8f16-6a6f56d680f6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ec9bf053-2f07-4f03-8f16-6a6f56d680f6\") pod \"prometheus-metric-storage-0\" (UID: \"98375fca-7e96-48ce-acc5-3abd00f043a1\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/81e8531aa5bb07fad3c6422bc6bc53f5a047d2088a9ec17b221c1c6a7a82c615/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.811177 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/98375fca-7e96-48ce-acc5-3abd00f043a1-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"98375fca-7e96-48ce-acc5-3abd00f043a1\") " pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.811527 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/98375fca-7e96-48ce-acc5-3abd00f043a1-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"98375fca-7e96-48ce-acc5-3abd00f043a1\") " pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.812066 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w2rbk\" (UniqueName: \"kubernetes.io/projected/98375fca-7e96-48ce-acc5-3abd00f043a1-kube-api-access-w2rbk\") pod \"prometheus-metric-storage-0\" (UID: \"98375fca-7e96-48ce-acc5-3abd00f043a1\") " pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.823795 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Nov 23 16:24:05 crc kubenswrapper[5050]: I1123 16:24:05.944469 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-ec9bf053-2f07-4f03-8f16-6a6f56d680f6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ec9bf053-2f07-4f03-8f16-6a6f56d680f6\") pod \"prometheus-metric-storage-0\" (UID: \"98375fca-7e96-48ce-acc5-3abd00f043a1\") " pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:06 crc kubenswrapper[5050]: I1123 16:24:06.029289 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:06 crc kubenswrapper[5050]: I1123 16:24:06.285576 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"c0c46824-94a1-4a97-b6a1-c32179c011de","Type":"ContainerStarted","Data":"2f730174ccaf6fb1767e89dfe97915feada2022e62f040513c1e7e930c89b076"} Nov 23 16:24:06 crc kubenswrapper[5050]: I1123 16:24:06.289250 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"dd8bde11-d7b2-4bb6-beb2-89f01031730f","Type":"ContainerStarted","Data":"d8756172085c6d57cf4a53ae488839d62573d2e9876d4c6f9c6822e2fa34cd18"} Nov 23 16:24:06 crc kubenswrapper[5050]: I1123 16:24:06.289281 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"dd8bde11-d7b2-4bb6-beb2-89f01031730f","Type":"ContainerStarted","Data":"56b48c62ce7305f9754d04d5ce44aaa96b05c20cb82594fd817020ebf51e7105"} Nov 23 16:24:06 crc kubenswrapper[5050]: I1123 16:24:06.293405 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 23 16:24:06 crc kubenswrapper[5050]: I1123 16:24:06.297202 5050 generic.go:334] "Generic (PLEG): container finished" podID="43f64101-e0e6-46aa-befc-e06a3d420fb0" containerID="eaa940447e814c1c972a68b877469c7877979b581be817e1b9484fcd434c987a" exitCode=137 Nov 23 16:24:06 crc kubenswrapper[5050]: I1123 16:24:06.297253 5050 scope.go:117] "RemoveContainer" containerID="eaa940447e814c1c972a68b877469c7877979b581be817e1b9484fcd434c987a" Nov 23 16:24:06 crc kubenswrapper[5050]: I1123 16:24:06.309088 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.309067582 podStartE2EDuration="3.309067582s" podCreationTimestamp="2025-11-23 16:24:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:24:06.304657348 +0000 UTC m=+6141.471653843" watchObservedRunningTime="2025-11-23 16:24:06.309067582 +0000 UTC m=+6141.476064067" Nov 23 16:24:06 crc kubenswrapper[5050]: I1123 16:24:06.310628 5050 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="43f64101-e0e6-46aa-befc-e06a3d420fb0" podUID="dd8bde11-d7b2-4bb6-beb2-89f01031730f" Nov 23 16:24:06 crc kubenswrapper[5050]: I1123 16:24:06.343778 5050 scope.go:117] "RemoveContainer" containerID="eaa940447e814c1c972a68b877469c7877979b581be817e1b9484fcd434c987a" Nov 23 16:24:06 crc kubenswrapper[5050]: E1123 16:24:06.345096 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eaa940447e814c1c972a68b877469c7877979b581be817e1b9484fcd434c987a\": container with ID starting with eaa940447e814c1c972a68b877469c7877979b581be817e1b9484fcd434c987a not found: ID does not exist" containerID="eaa940447e814c1c972a68b877469c7877979b581be817e1b9484fcd434c987a" Nov 23 16:24:06 crc kubenswrapper[5050]: I1123 16:24:06.345136 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eaa940447e814c1c972a68b877469c7877979b581be817e1b9484fcd434c987a"} err="failed to get container status \"eaa940447e814c1c972a68b877469c7877979b581be817e1b9484fcd434c987a\": rpc error: code = NotFound desc = could not find container \"eaa940447e814c1c972a68b877469c7877979b581be817e1b9484fcd434c987a\": container with ID starting with eaa940447e814c1c972a68b877469c7877979b581be817e1b9484fcd434c987a not found: ID does not exist" Nov 23 16:24:06 crc kubenswrapper[5050]: I1123 16:24:06.424423 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/43f64101-e0e6-46aa-befc-e06a3d420fb0-openstack-config-secret\") pod \"43f64101-e0e6-46aa-befc-e06a3d420fb0\" (UID: \"43f64101-e0e6-46aa-befc-e06a3d420fb0\") " Nov 23 16:24:06 crc kubenswrapper[5050]: I1123 16:24:06.424485 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v2c5l\" (UniqueName: \"kubernetes.io/projected/43f64101-e0e6-46aa-befc-e06a3d420fb0-kube-api-access-v2c5l\") pod \"43f64101-e0e6-46aa-befc-e06a3d420fb0\" (UID: \"43f64101-e0e6-46aa-befc-e06a3d420fb0\") " Nov 23 16:24:06 crc kubenswrapper[5050]: I1123 16:24:06.424757 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/43f64101-e0e6-46aa-befc-e06a3d420fb0-openstack-config\") pod \"43f64101-e0e6-46aa-befc-e06a3d420fb0\" (UID: \"43f64101-e0e6-46aa-befc-e06a3d420fb0\") " Nov 23 16:24:06 crc kubenswrapper[5050]: I1123 16:24:06.432366 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43f64101-e0e6-46aa-befc-e06a3d420fb0-kube-api-access-v2c5l" (OuterVolumeSpecName: "kube-api-access-v2c5l") pod "43f64101-e0e6-46aa-befc-e06a3d420fb0" (UID: "43f64101-e0e6-46aa-befc-e06a3d420fb0"). InnerVolumeSpecName "kube-api-access-v2c5l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:24:06 crc kubenswrapper[5050]: I1123 16:24:06.459922 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43f64101-e0e6-46aa-befc-e06a3d420fb0-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "43f64101-e0e6-46aa-befc-e06a3d420fb0" (UID: "43f64101-e0e6-46aa-befc-e06a3d420fb0"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:24:06 crc kubenswrapper[5050]: I1123 16:24:06.520918 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43f64101-e0e6-46aa-befc-e06a3d420fb0-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "43f64101-e0e6-46aa-befc-e06a3d420fb0" (UID: "43f64101-e0e6-46aa-befc-e06a3d420fb0"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:24:06 crc kubenswrapper[5050]: I1123 16:24:06.528524 5050 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/43f64101-e0e6-46aa-befc-e06a3d420fb0-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 23 16:24:06 crc kubenswrapper[5050]: I1123 16:24:06.528568 5050 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/43f64101-e0e6-46aa-befc-e06a3d420fb0-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 23 16:24:06 crc kubenswrapper[5050]: I1123 16:24:06.528582 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v2c5l\" (UniqueName: \"kubernetes.io/projected/43f64101-e0e6-46aa-befc-e06a3d420fb0-kube-api-access-v2c5l\") on node \"crc\" DevicePath \"\"" Nov 23 16:24:06 crc kubenswrapper[5050]: I1123 16:24:06.624823 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Nov 23 16:24:06 crc kubenswrapper[5050]: I1123 16:24:06.781336 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 23 16:24:06 crc kubenswrapper[5050]: W1123 16:24:06.787655 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod98375fca_7e96_48ce_acc5_3abd00f043a1.slice/crio-dc54d180014ceb836b048a06d109e10a9675ee818315d008c299a05ff062cd59 WatchSource:0}: Error finding container dc54d180014ceb836b048a06d109e10a9675ee818315d008c299a05ff062cd59: Status 404 returned error can't find the container with id dc54d180014ceb836b048a06d109e10a9675ee818315d008c299a05ff062cd59 Nov 23 16:24:07 crc kubenswrapper[5050]: I1123 16:24:07.036168 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-9skqn"] Nov 23 16:24:07 crc kubenswrapper[5050]: I1123 16:24:07.053939 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-9skqn"] Nov 23 16:24:07 crc kubenswrapper[5050]: I1123 16:24:07.321243 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"7181c88f-0ea1-4e1f-bda2-dd7cf4be825a","Type":"ContainerStarted","Data":"249dfd01d5435fe9abd167d8beca963d600a3e5481e4a00feaafdf1673d46300"} Nov 23 16:24:07 crc kubenswrapper[5050]: I1123 16:24:07.327831 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"98375fca-7e96-48ce-acc5-3abd00f043a1","Type":"ContainerStarted","Data":"dc54d180014ceb836b048a06d109e10a9675ee818315d008c299a05ff062cd59"} Nov 23 16:24:07 crc kubenswrapper[5050]: I1123 16:24:07.329673 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 23 16:24:07 crc kubenswrapper[5050]: I1123 16:24:07.331131 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"c0c46824-94a1-4a97-b6a1-c32179c011de","Type":"ContainerStarted","Data":"ec914597bfdc17366d69c8cd1cfe31b63f860d0087460f6de3cdc45009d14a20"} Nov 23 16:24:07 crc kubenswrapper[5050]: I1123 16:24:07.339820 5050 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="43f64101-e0e6-46aa-befc-e06a3d420fb0" podUID="dd8bde11-d7b2-4bb6-beb2-89f01031730f" Nov 23 16:24:07 crc kubenswrapper[5050]: I1123 16:24:07.389296 5050 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="43f64101-e0e6-46aa-befc-e06a3d420fb0" podUID="dd8bde11-d7b2-4bb6-beb2-89f01031730f" Nov 23 16:24:07 crc kubenswrapper[5050]: I1123 16:24:07.395157 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=3.810437554 podStartE2EDuration="4.395137746s" podCreationTimestamp="2025-11-23 16:24:03 +0000 UTC" firstStartedPulling="2025-11-23 16:24:05.545616329 +0000 UTC m=+6140.712612814" lastFinishedPulling="2025-11-23 16:24:06.130316521 +0000 UTC m=+6141.297313006" observedRunningTime="2025-11-23 16:24:07.383848988 +0000 UTC m=+6142.550845473" watchObservedRunningTime="2025-11-23 16:24:07.395137746 +0000 UTC m=+6142.562134231" Nov 23 16:24:07 crc kubenswrapper[5050]: I1123 16:24:07.569855 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10796220-fe20-4f66-b1bf-08bd71375d85" path="/var/lib/kubelet/pods/10796220-fe20-4f66-b1bf-08bd71375d85/volumes" Nov 23 16:24:07 crc kubenswrapper[5050]: I1123 16:24:07.571251 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43f64101-e0e6-46aa-befc-e06a3d420fb0" path="/var/lib/kubelet/pods/43f64101-e0e6-46aa-befc-e06a3d420fb0/volumes" Nov 23 16:24:08 crc kubenswrapper[5050]: I1123 16:24:08.343371 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 23 16:24:13 crc kubenswrapper[5050]: I1123 16:24:13.395919 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"7181c88f-0ea1-4e1f-bda2-dd7cf4be825a","Type":"ContainerStarted","Data":"71bf0b859e7a7886a8655013889d23c1380bf181c1dafe84c0eb18ba5ba807b1"} Nov 23 16:24:13 crc kubenswrapper[5050]: I1123 16:24:13.397802 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"98375fca-7e96-48ce-acc5-3abd00f043a1","Type":"ContainerStarted","Data":"8ce6bb9325f499405e5630db20e066a8aee9832290106f6e3ebf61893ec4c7aa"} Nov 23 16:24:14 crc kubenswrapper[5050]: I1123 16:24:14.441913 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 23 16:24:16 crc kubenswrapper[5050]: I1123 16:24:16.882658 5050 scope.go:117] "RemoveContainer" containerID="88fa9adc47e25e304dacc4150a0980a9e11406b7edb82da752f7bdde29111eb2" Nov 23 16:24:16 crc kubenswrapper[5050]: I1123 16:24:16.921681 5050 scope.go:117] "RemoveContainer" containerID="5bb900dd47b5d925af4de3b58077019a8802d7af78107df419ec6c3627896256" Nov 23 16:24:17 crc kubenswrapper[5050]: I1123 16:24:17.009548 5050 scope.go:117] "RemoveContainer" containerID="e34077fda1bb8f1b76d792a2c067477a67654fa7320df8f07c1309e670558ff0" Nov 23 16:24:17 crc kubenswrapper[5050]: I1123 16:24:17.068403 5050 scope.go:117] "RemoveContainer" containerID="56572cfb16a7a993f68dcbedf5b4a4a8e376cbe720373063fcaab98ff2f182c0" Nov 23 16:24:17 crc kubenswrapper[5050]: I1123 16:24:17.112135 5050 scope.go:117] "RemoveContainer" containerID="08686e8ee77d5026ee88b2bd0fb7f63d84555c48d0740fc16832a37a5ddf94c7" Nov 23 16:24:17 crc kubenswrapper[5050]: I1123 16:24:17.179528 5050 scope.go:117] "RemoveContainer" containerID="063fc696506451a17f5c6024b739ce49418ba8b685a0a1e208cecba6b5c8ed02" Nov 23 16:24:17 crc kubenswrapper[5050]: I1123 16:24:17.211541 5050 scope.go:117] "RemoveContainer" containerID="8debc2c09a4a9d9beba47e046e4496565d9478c2cd6ef9a058b6c604b2132e22" Nov 23 16:24:20 crc kubenswrapper[5050]: I1123 16:24:20.035540 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-pznlh"] Nov 23 16:24:20 crc kubenswrapper[5050]: I1123 16:24:20.052028 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-pznlh"] Nov 23 16:24:21 crc kubenswrapper[5050]: I1123 16:24:21.546392 5050 generic.go:334] "Generic (PLEG): container finished" podID="98375fca-7e96-48ce-acc5-3abd00f043a1" containerID="8ce6bb9325f499405e5630db20e066a8aee9832290106f6e3ebf61893ec4c7aa" exitCode=0 Nov 23 16:24:21 crc kubenswrapper[5050]: I1123 16:24:21.546472 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"98375fca-7e96-48ce-acc5-3abd00f043a1","Type":"ContainerDied","Data":"8ce6bb9325f499405e5630db20e066a8aee9832290106f6e3ebf61893ec4c7aa"} Nov 23 16:24:21 crc kubenswrapper[5050]: I1123 16:24:21.554326 5050 generic.go:334] "Generic (PLEG): container finished" podID="7181c88f-0ea1-4e1f-bda2-dd7cf4be825a" containerID="71bf0b859e7a7886a8655013889d23c1380bf181c1dafe84c0eb18ba5ba807b1" exitCode=0 Nov 23 16:24:21 crc kubenswrapper[5050]: I1123 16:24:21.571660 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ccbcaa1b-3eea-47c1-8622-b881afcdaa7d" path="/var/lib/kubelet/pods/ccbcaa1b-3eea-47c1-8622-b881afcdaa7d/volumes" Nov 23 16:24:21 crc kubenswrapper[5050]: I1123 16:24:21.572921 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"7181c88f-0ea1-4e1f-bda2-dd7cf4be825a","Type":"ContainerDied","Data":"71bf0b859e7a7886a8655013889d23c1380bf181c1dafe84c0eb18ba5ba807b1"} Nov 23 16:24:22 crc kubenswrapper[5050]: I1123 16:24:22.045843 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-q5mm8"] Nov 23 16:24:22 crc kubenswrapper[5050]: I1123 16:24:22.059011 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-q5mm8"] Nov 23 16:24:23 crc kubenswrapper[5050]: I1123 16:24:23.565099 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e17c8304-cb0e-4fc5-b97f-17d6b65a4e13" path="/var/lib/kubelet/pods/e17c8304-cb0e-4fc5-b97f-17d6b65a4e13/volumes" Nov 23 16:24:24 crc kubenswrapper[5050]: I1123 16:24:24.598238 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"7181c88f-0ea1-4e1f-bda2-dd7cf4be825a","Type":"ContainerStarted","Data":"7ac1456f9ef2d64b182995a36cc97c9132b783c36cc3014cdcebe083830fb274"} Nov 23 16:24:29 crc kubenswrapper[5050]: I1123 16:24:29.679907 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"7181c88f-0ea1-4e1f-bda2-dd7cf4be825a","Type":"ContainerStarted","Data":"07ce057f2d98451bf466cd67568944b870c33b8e83bbdb4f52bac7058c2077f4"} Nov 23 16:24:29 crc kubenswrapper[5050]: I1123 16:24:29.681101 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/alertmanager-metric-storage-0" Nov 23 16:24:29 crc kubenswrapper[5050]: I1123 16:24:29.683373 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/alertmanager-metric-storage-0" Nov 23 16:24:29 crc kubenswrapper[5050]: I1123 16:24:29.713041 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/alertmanager-metric-storage-0" podStartSLOduration=7.407023069 podStartE2EDuration="24.713017858s" podCreationTimestamp="2025-11-23 16:24:05 +0000 UTC" firstStartedPulling="2025-11-23 16:24:06.643246998 +0000 UTC m=+6141.810243483" lastFinishedPulling="2025-11-23 16:24:23.949241787 +0000 UTC m=+6159.116238272" observedRunningTime="2025-11-23 16:24:29.702089569 +0000 UTC m=+6164.869086054" watchObservedRunningTime="2025-11-23 16:24:29.713017858 +0000 UTC m=+6164.880014343" Nov 23 16:24:30 crc kubenswrapper[5050]: I1123 16:24:30.697065 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"98375fca-7e96-48ce-acc5-3abd00f043a1","Type":"ContainerStarted","Data":"c71ebcce2fbd7bf1bb66f69bb8ea23da0526ff9468178a48958d846cb96bd03a"} Nov 23 16:24:35 crc kubenswrapper[5050]: I1123 16:24:35.795630 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"98375fca-7e96-48ce-acc5-3abd00f043a1","Type":"ContainerStarted","Data":"a1823a2ae41a4d9dcb7ff1e3c4d1521a3895f93face60f9b8b73077ae8b1742a"} Nov 23 16:24:38 crc kubenswrapper[5050]: I1123 16:24:38.847529 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"98375fca-7e96-48ce-acc5-3abd00f043a1","Type":"ContainerStarted","Data":"216b7d3f7ce852c8a09b83a952b5be9cfd9d8371887c400de3275578df54ba9e"} Nov 23 16:24:38 crc kubenswrapper[5050]: I1123 16:24:38.887186 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=3.974031443 podStartE2EDuration="34.88715778s" podCreationTimestamp="2025-11-23 16:24:04 +0000 UTC" firstStartedPulling="2025-11-23 16:24:06.790977325 +0000 UTC m=+6141.957973810" lastFinishedPulling="2025-11-23 16:24:37.704103642 +0000 UTC m=+6172.871100147" observedRunningTime="2025-11-23 16:24:38.878085475 +0000 UTC m=+6174.045081970" watchObservedRunningTime="2025-11-23 16:24:38.88715778 +0000 UTC m=+6174.054154305" Nov 23 16:24:40 crc kubenswrapper[5050]: I1123 16:24:40.048991 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-5m4kg"] Nov 23 16:24:40 crc kubenswrapper[5050]: I1123 16:24:40.057788 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-5m4kg"] Nov 23 16:24:41 crc kubenswrapper[5050]: I1123 16:24:41.031023 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:41 crc kubenswrapper[5050]: I1123 16:24:41.573577 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a73e0803-4e30-45bd-9006-d229e8108430" path="/var/lib/kubelet/pods/a73e0803-4e30-45bd-9006-d229e8108430/volumes" Nov 23 16:24:45 crc kubenswrapper[5050]: I1123 16:24:45.127829 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 23 16:24:45 crc kubenswrapper[5050]: I1123 16:24:45.171303 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 23 16:24:45 crc kubenswrapper[5050]: I1123 16:24:45.171610 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 16:24:45 crc kubenswrapper[5050]: I1123 16:24:45.175210 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 23 16:24:45 crc kubenswrapper[5050]: I1123 16:24:45.175435 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 23 16:24:45 crc kubenswrapper[5050]: I1123 16:24:45.301167 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f65b863c-4b1b-4657-9afb-f8bfabff1637-run-httpd\") pod \"ceilometer-0\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " pod="openstack/ceilometer-0" Nov 23 16:24:45 crc kubenswrapper[5050]: I1123 16:24:45.301290 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f65b863c-4b1b-4657-9afb-f8bfabff1637-log-httpd\") pod \"ceilometer-0\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " pod="openstack/ceilometer-0" Nov 23 16:24:45 crc kubenswrapper[5050]: I1123 16:24:45.301369 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f65b863c-4b1b-4657-9afb-f8bfabff1637-config-data\") pod \"ceilometer-0\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " pod="openstack/ceilometer-0" Nov 23 16:24:45 crc kubenswrapper[5050]: I1123 16:24:45.301388 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f65b863c-4b1b-4657-9afb-f8bfabff1637-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " pod="openstack/ceilometer-0" Nov 23 16:24:45 crc kubenswrapper[5050]: I1123 16:24:45.301409 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4g9nr\" (UniqueName: \"kubernetes.io/projected/f65b863c-4b1b-4657-9afb-f8bfabff1637-kube-api-access-4g9nr\") pod \"ceilometer-0\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " pod="openstack/ceilometer-0" Nov 23 16:24:45 crc kubenswrapper[5050]: I1123 16:24:45.301597 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f65b863c-4b1b-4657-9afb-f8bfabff1637-scripts\") pod \"ceilometer-0\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " pod="openstack/ceilometer-0" Nov 23 16:24:45 crc kubenswrapper[5050]: I1123 16:24:45.301626 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f65b863c-4b1b-4657-9afb-f8bfabff1637-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " pod="openstack/ceilometer-0" Nov 23 16:24:45 crc kubenswrapper[5050]: I1123 16:24:45.403764 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f65b863c-4b1b-4657-9afb-f8bfabff1637-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " pod="openstack/ceilometer-0" Nov 23 16:24:45 crc kubenswrapper[5050]: I1123 16:24:45.403814 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f65b863c-4b1b-4657-9afb-f8bfabff1637-scripts\") pod \"ceilometer-0\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " pod="openstack/ceilometer-0" Nov 23 16:24:45 crc kubenswrapper[5050]: I1123 16:24:45.403889 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f65b863c-4b1b-4657-9afb-f8bfabff1637-run-httpd\") pod \"ceilometer-0\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " pod="openstack/ceilometer-0" Nov 23 16:24:45 crc kubenswrapper[5050]: I1123 16:24:45.403943 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f65b863c-4b1b-4657-9afb-f8bfabff1637-log-httpd\") pod \"ceilometer-0\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " pod="openstack/ceilometer-0" Nov 23 16:24:45 crc kubenswrapper[5050]: I1123 16:24:45.404033 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f65b863c-4b1b-4657-9afb-f8bfabff1637-config-data\") pod \"ceilometer-0\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " pod="openstack/ceilometer-0" Nov 23 16:24:45 crc kubenswrapper[5050]: I1123 16:24:45.404699 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f65b863c-4b1b-4657-9afb-f8bfabff1637-log-httpd\") pod \"ceilometer-0\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " pod="openstack/ceilometer-0" Nov 23 16:24:45 crc kubenswrapper[5050]: I1123 16:24:45.404877 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f65b863c-4b1b-4657-9afb-f8bfabff1637-run-httpd\") pod \"ceilometer-0\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " pod="openstack/ceilometer-0" Nov 23 16:24:45 crc kubenswrapper[5050]: I1123 16:24:45.405669 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f65b863c-4b1b-4657-9afb-f8bfabff1637-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " pod="openstack/ceilometer-0" Nov 23 16:24:45 crc kubenswrapper[5050]: I1123 16:24:45.405719 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4g9nr\" (UniqueName: \"kubernetes.io/projected/f65b863c-4b1b-4657-9afb-f8bfabff1637-kube-api-access-4g9nr\") pod \"ceilometer-0\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " pod="openstack/ceilometer-0" Nov 23 16:24:45 crc kubenswrapper[5050]: I1123 16:24:45.412586 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f65b863c-4b1b-4657-9afb-f8bfabff1637-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " pod="openstack/ceilometer-0" Nov 23 16:24:45 crc kubenswrapper[5050]: I1123 16:24:45.413695 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f65b863c-4b1b-4657-9afb-f8bfabff1637-config-data\") pod \"ceilometer-0\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " pod="openstack/ceilometer-0" Nov 23 16:24:45 crc kubenswrapper[5050]: I1123 16:24:45.413861 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f65b863c-4b1b-4657-9afb-f8bfabff1637-scripts\") pod \"ceilometer-0\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " pod="openstack/ceilometer-0" Nov 23 16:24:45 crc kubenswrapper[5050]: I1123 16:24:45.419719 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f65b863c-4b1b-4657-9afb-f8bfabff1637-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " pod="openstack/ceilometer-0" Nov 23 16:24:45 crc kubenswrapper[5050]: I1123 16:24:45.425584 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4g9nr\" (UniqueName: \"kubernetes.io/projected/f65b863c-4b1b-4657-9afb-f8bfabff1637-kube-api-access-4g9nr\") pod \"ceilometer-0\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " pod="openstack/ceilometer-0" Nov 23 16:24:45 crc kubenswrapper[5050]: I1123 16:24:45.502530 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 16:24:46 crc kubenswrapper[5050]: I1123 16:24:46.023502 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 23 16:24:46 crc kubenswrapper[5050]: W1123 16:24:46.025518 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf65b863c_4b1b_4657_9afb_f8bfabff1637.slice/crio-7b617e1d26e045c77adb43a6c4047b0c634930ee45afb16ea572c53d6cdb222d WatchSource:0}: Error finding container 7b617e1d26e045c77adb43a6c4047b0c634930ee45afb16ea572c53d6cdb222d: Status 404 returned error can't find the container with id 7b617e1d26e045c77adb43a6c4047b0c634930ee45afb16ea572c53d6cdb222d Nov 23 16:24:46 crc kubenswrapper[5050]: I1123 16:24:46.944375 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f65b863c-4b1b-4657-9afb-f8bfabff1637","Type":"ContainerStarted","Data":"9b4d9e477e1fa2b130dd1d3752cb18379373606c990154b082f523db8fd2b09b"} Nov 23 16:24:46 crc kubenswrapper[5050]: I1123 16:24:46.944773 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f65b863c-4b1b-4657-9afb-f8bfabff1637","Type":"ContainerStarted","Data":"7b617e1d26e045c77adb43a6c4047b0c634930ee45afb16ea572c53d6cdb222d"} Nov 23 16:24:47 crc kubenswrapper[5050]: I1123 16:24:47.998952 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f65b863c-4b1b-4657-9afb-f8bfabff1637","Type":"ContainerStarted","Data":"88aae75998f6ad1d2ad9cbfa458d98bef59930a987ed01ddce9935479644b6f6"} Nov 23 16:24:49 crc kubenswrapper[5050]: I1123 16:24:49.019268 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f65b863c-4b1b-4657-9afb-f8bfabff1637","Type":"ContainerStarted","Data":"b4807d27a8fdc167daa67e3e8b4dd8a0468009c404c8447a8699245c64819edc"} Nov 23 16:24:51 crc kubenswrapper[5050]: I1123 16:24:51.030309 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:51 crc kubenswrapper[5050]: I1123 16:24:51.033664 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:51 crc kubenswrapper[5050]: I1123 16:24:51.048698 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f65b863c-4b1b-4657-9afb-f8bfabff1637","Type":"ContainerStarted","Data":"bfa9bfb545d77f9650f7b75d6cae08b016ed442a9c9edb0e3627f3e1235a79ba"} Nov 23 16:24:51 crc kubenswrapper[5050]: I1123 16:24:51.048786 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 23 16:24:51 crc kubenswrapper[5050]: I1123 16:24:51.050824 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 23 16:24:51 crc kubenswrapper[5050]: I1123 16:24:51.091737 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.21947394 podStartE2EDuration="6.09171435s" podCreationTimestamp="2025-11-23 16:24:45 +0000 UTC" firstStartedPulling="2025-11-23 16:24:46.033521969 +0000 UTC m=+6181.200518474" lastFinishedPulling="2025-11-23 16:24:49.905762379 +0000 UTC m=+6185.072758884" observedRunningTime="2025-11-23 16:24:51.088350835 +0000 UTC m=+6186.255347330" watchObservedRunningTime="2025-11-23 16:24:51.09171435 +0000 UTC m=+6186.258710845" Nov 23 16:24:56 crc kubenswrapper[5050]: I1123 16:24:56.785411 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-create-87h94"] Nov 23 16:24:56 crc kubenswrapper[5050]: I1123 16:24:56.787863 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-87h94" Nov 23 16:24:56 crc kubenswrapper[5050]: I1123 16:24:56.801437 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-ff36-account-create-frdwk"] Nov 23 16:24:56 crc kubenswrapper[5050]: I1123 16:24:56.802887 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-ff36-account-create-frdwk" Nov 23 16:24:56 crc kubenswrapper[5050]: I1123 16:24:56.806541 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-db-secret" Nov 23 16:24:56 crc kubenswrapper[5050]: I1123 16:24:56.811417 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-87h94"] Nov 23 16:24:56 crc kubenswrapper[5050]: I1123 16:24:56.821308 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-ff36-account-create-frdwk"] Nov 23 16:24:56 crc kubenswrapper[5050]: I1123 16:24:56.876872 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hv55h\" (UniqueName: \"kubernetes.io/projected/5b9acf01-e7ac-40f4-af07-ff0ba015c3cd-kube-api-access-hv55h\") pod \"aodh-db-create-87h94\" (UID: \"5b9acf01-e7ac-40f4-af07-ff0ba015c3cd\") " pod="openstack/aodh-db-create-87h94" Nov 23 16:24:56 crc kubenswrapper[5050]: I1123 16:24:56.877061 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b9acf01-e7ac-40f4-af07-ff0ba015c3cd-operator-scripts\") pod \"aodh-db-create-87h94\" (UID: \"5b9acf01-e7ac-40f4-af07-ff0ba015c3cd\") " pod="openstack/aodh-db-create-87h94" Nov 23 16:24:56 crc kubenswrapper[5050]: I1123 16:24:56.979596 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hv55h\" (UniqueName: \"kubernetes.io/projected/5b9acf01-e7ac-40f4-af07-ff0ba015c3cd-kube-api-access-hv55h\") pod \"aodh-db-create-87h94\" (UID: \"5b9acf01-e7ac-40f4-af07-ff0ba015c3cd\") " pod="openstack/aodh-db-create-87h94" Nov 23 16:24:56 crc kubenswrapper[5050]: I1123 16:24:56.979739 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/469f5a48-e8fc-4c0c-b148-3a1275c9e2ae-operator-scripts\") pod \"aodh-ff36-account-create-frdwk\" (UID: \"469f5a48-e8fc-4c0c-b148-3a1275c9e2ae\") " pod="openstack/aodh-ff36-account-create-frdwk" Nov 23 16:24:56 crc kubenswrapper[5050]: I1123 16:24:56.979765 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b9acf01-e7ac-40f4-af07-ff0ba015c3cd-operator-scripts\") pod \"aodh-db-create-87h94\" (UID: \"5b9acf01-e7ac-40f4-af07-ff0ba015c3cd\") " pod="openstack/aodh-db-create-87h94" Nov 23 16:24:56 crc kubenswrapper[5050]: I1123 16:24:56.979820 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4l9l\" (UniqueName: \"kubernetes.io/projected/469f5a48-e8fc-4c0c-b148-3a1275c9e2ae-kube-api-access-f4l9l\") pod \"aodh-ff36-account-create-frdwk\" (UID: \"469f5a48-e8fc-4c0c-b148-3a1275c9e2ae\") " pod="openstack/aodh-ff36-account-create-frdwk" Nov 23 16:24:56 crc kubenswrapper[5050]: I1123 16:24:56.980908 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b9acf01-e7ac-40f4-af07-ff0ba015c3cd-operator-scripts\") pod \"aodh-db-create-87h94\" (UID: \"5b9acf01-e7ac-40f4-af07-ff0ba015c3cd\") " pod="openstack/aodh-db-create-87h94" Nov 23 16:24:56 crc kubenswrapper[5050]: I1123 16:24:56.999917 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hv55h\" (UniqueName: \"kubernetes.io/projected/5b9acf01-e7ac-40f4-af07-ff0ba015c3cd-kube-api-access-hv55h\") pod \"aodh-db-create-87h94\" (UID: \"5b9acf01-e7ac-40f4-af07-ff0ba015c3cd\") " pod="openstack/aodh-db-create-87h94" Nov 23 16:24:57 crc kubenswrapper[5050]: I1123 16:24:57.082131 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/469f5a48-e8fc-4c0c-b148-3a1275c9e2ae-operator-scripts\") pod \"aodh-ff36-account-create-frdwk\" (UID: \"469f5a48-e8fc-4c0c-b148-3a1275c9e2ae\") " pod="openstack/aodh-ff36-account-create-frdwk" Nov 23 16:24:57 crc kubenswrapper[5050]: I1123 16:24:57.082721 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4l9l\" (UniqueName: \"kubernetes.io/projected/469f5a48-e8fc-4c0c-b148-3a1275c9e2ae-kube-api-access-f4l9l\") pod \"aodh-ff36-account-create-frdwk\" (UID: \"469f5a48-e8fc-4c0c-b148-3a1275c9e2ae\") " pod="openstack/aodh-ff36-account-create-frdwk" Nov 23 16:24:57 crc kubenswrapper[5050]: I1123 16:24:57.083932 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/469f5a48-e8fc-4c0c-b148-3a1275c9e2ae-operator-scripts\") pod \"aodh-ff36-account-create-frdwk\" (UID: \"469f5a48-e8fc-4c0c-b148-3a1275c9e2ae\") " pod="openstack/aodh-ff36-account-create-frdwk" Nov 23 16:24:57 crc kubenswrapper[5050]: I1123 16:24:57.106710 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4l9l\" (UniqueName: \"kubernetes.io/projected/469f5a48-e8fc-4c0c-b148-3a1275c9e2ae-kube-api-access-f4l9l\") pod \"aodh-ff36-account-create-frdwk\" (UID: \"469f5a48-e8fc-4c0c-b148-3a1275c9e2ae\") " pod="openstack/aodh-ff36-account-create-frdwk" Nov 23 16:24:57 crc kubenswrapper[5050]: I1123 16:24:57.121234 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-87h94" Nov 23 16:24:57 crc kubenswrapper[5050]: I1123 16:24:57.138742 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-ff36-account-create-frdwk" Nov 23 16:24:57 crc kubenswrapper[5050]: I1123 16:24:57.696837 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-ff36-account-create-frdwk"] Nov 23 16:24:57 crc kubenswrapper[5050]: I1123 16:24:57.785754 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-87h94"] Nov 23 16:24:57 crc kubenswrapper[5050]: W1123 16:24:57.806833 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5b9acf01_e7ac_40f4_af07_ff0ba015c3cd.slice/crio-566cd62a80ddf5932c6642d3ec3ceab60034378771e219559b9bcba54387781a WatchSource:0}: Error finding container 566cd62a80ddf5932c6642d3ec3ceab60034378771e219559b9bcba54387781a: Status 404 returned error can't find the container with id 566cd62a80ddf5932c6642d3ec3ceab60034378771e219559b9bcba54387781a Nov 23 16:24:58 crc kubenswrapper[5050]: I1123 16:24:58.145057 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-87h94" event={"ID":"5b9acf01-e7ac-40f4-af07-ff0ba015c3cd","Type":"ContainerStarted","Data":"c29b42916bc42cb02696fb4b8de68bc1f915c8fad18fc84402fd7c55cd8b30f8"} Nov 23 16:24:58 crc kubenswrapper[5050]: I1123 16:24:58.145683 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-87h94" event={"ID":"5b9acf01-e7ac-40f4-af07-ff0ba015c3cd","Type":"ContainerStarted","Data":"566cd62a80ddf5932c6642d3ec3ceab60034378771e219559b9bcba54387781a"} Nov 23 16:24:58 crc kubenswrapper[5050]: I1123 16:24:58.148766 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-ff36-account-create-frdwk" event={"ID":"469f5a48-e8fc-4c0c-b148-3a1275c9e2ae","Type":"ContainerStarted","Data":"d3872c1032bde051acc94e17cfcc1ec1dc137ddcaf91264b001a389466a7a1a9"} Nov 23 16:24:58 crc kubenswrapper[5050]: I1123 16:24:58.148824 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-ff36-account-create-frdwk" event={"ID":"469f5a48-e8fc-4c0c-b148-3a1275c9e2ae","Type":"ContainerStarted","Data":"f26d74874d7831b8e534b116b018f6468b1823b3dfed534146127e6e9e92488f"} Nov 23 16:24:58 crc kubenswrapper[5050]: I1123 16:24:58.170357 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-create-87h94" podStartSLOduration=2.170328627 podStartE2EDuration="2.170328627s" podCreationTimestamp="2025-11-23 16:24:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:24:58.163049882 +0000 UTC m=+6193.330046367" watchObservedRunningTime="2025-11-23 16:24:58.170328627 +0000 UTC m=+6193.337325112" Nov 23 16:24:58 crc kubenswrapper[5050]: I1123 16:24:58.189360 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-ff36-account-create-frdwk" podStartSLOduration=2.189326773 podStartE2EDuration="2.189326773s" podCreationTimestamp="2025-11-23 16:24:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:24:58.18745822 +0000 UTC m=+6193.354454705" watchObservedRunningTime="2025-11-23 16:24:58.189326773 +0000 UTC m=+6193.356323258" Nov 23 16:24:59 crc kubenswrapper[5050]: I1123 16:24:59.165005 5050 generic.go:334] "Generic (PLEG): container finished" podID="469f5a48-e8fc-4c0c-b148-3a1275c9e2ae" containerID="d3872c1032bde051acc94e17cfcc1ec1dc137ddcaf91264b001a389466a7a1a9" exitCode=0 Nov 23 16:24:59 crc kubenswrapper[5050]: I1123 16:24:59.165183 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-ff36-account-create-frdwk" event={"ID":"469f5a48-e8fc-4c0c-b148-3a1275c9e2ae","Type":"ContainerDied","Data":"d3872c1032bde051acc94e17cfcc1ec1dc137ddcaf91264b001a389466a7a1a9"} Nov 23 16:24:59 crc kubenswrapper[5050]: I1123 16:24:59.168577 5050 generic.go:334] "Generic (PLEG): container finished" podID="5b9acf01-e7ac-40f4-af07-ff0ba015c3cd" containerID="c29b42916bc42cb02696fb4b8de68bc1f915c8fad18fc84402fd7c55cd8b30f8" exitCode=0 Nov 23 16:24:59 crc kubenswrapper[5050]: I1123 16:24:59.168690 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-87h94" event={"ID":"5b9acf01-e7ac-40f4-af07-ff0ba015c3cd","Type":"ContainerDied","Data":"c29b42916bc42cb02696fb4b8de68bc1f915c8fad18fc84402fd7c55cd8b30f8"} Nov 23 16:25:00 crc kubenswrapper[5050]: I1123 16:25:00.853910 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-87h94" Nov 23 16:25:00 crc kubenswrapper[5050]: I1123 16:25:00.861911 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-ff36-account-create-frdwk" Nov 23 16:25:01 crc kubenswrapper[5050]: I1123 16:25:01.002594 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f4l9l\" (UniqueName: \"kubernetes.io/projected/469f5a48-e8fc-4c0c-b148-3a1275c9e2ae-kube-api-access-f4l9l\") pod \"469f5a48-e8fc-4c0c-b148-3a1275c9e2ae\" (UID: \"469f5a48-e8fc-4c0c-b148-3a1275c9e2ae\") " Nov 23 16:25:01 crc kubenswrapper[5050]: I1123 16:25:01.002723 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hv55h\" (UniqueName: \"kubernetes.io/projected/5b9acf01-e7ac-40f4-af07-ff0ba015c3cd-kube-api-access-hv55h\") pod \"5b9acf01-e7ac-40f4-af07-ff0ba015c3cd\" (UID: \"5b9acf01-e7ac-40f4-af07-ff0ba015c3cd\") " Nov 23 16:25:01 crc kubenswrapper[5050]: I1123 16:25:01.002764 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/469f5a48-e8fc-4c0c-b148-3a1275c9e2ae-operator-scripts\") pod \"469f5a48-e8fc-4c0c-b148-3a1275c9e2ae\" (UID: \"469f5a48-e8fc-4c0c-b148-3a1275c9e2ae\") " Nov 23 16:25:01 crc kubenswrapper[5050]: I1123 16:25:01.002815 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b9acf01-e7ac-40f4-af07-ff0ba015c3cd-operator-scripts\") pod \"5b9acf01-e7ac-40f4-af07-ff0ba015c3cd\" (UID: \"5b9acf01-e7ac-40f4-af07-ff0ba015c3cd\") " Nov 23 16:25:01 crc kubenswrapper[5050]: I1123 16:25:01.003646 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b9acf01-e7ac-40f4-af07-ff0ba015c3cd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5b9acf01-e7ac-40f4-af07-ff0ba015c3cd" (UID: "5b9acf01-e7ac-40f4-af07-ff0ba015c3cd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:25:01 crc kubenswrapper[5050]: I1123 16:25:01.003677 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/469f5a48-e8fc-4c0c-b148-3a1275c9e2ae-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "469f5a48-e8fc-4c0c-b148-3a1275c9e2ae" (UID: "469f5a48-e8fc-4c0c-b148-3a1275c9e2ae"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:25:01 crc kubenswrapper[5050]: I1123 16:25:01.010233 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b9acf01-e7ac-40f4-af07-ff0ba015c3cd-kube-api-access-hv55h" (OuterVolumeSpecName: "kube-api-access-hv55h") pod "5b9acf01-e7ac-40f4-af07-ff0ba015c3cd" (UID: "5b9acf01-e7ac-40f4-af07-ff0ba015c3cd"). InnerVolumeSpecName "kube-api-access-hv55h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:25:01 crc kubenswrapper[5050]: I1123 16:25:01.013298 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/469f5a48-e8fc-4c0c-b148-3a1275c9e2ae-kube-api-access-f4l9l" (OuterVolumeSpecName: "kube-api-access-f4l9l") pod "469f5a48-e8fc-4c0c-b148-3a1275c9e2ae" (UID: "469f5a48-e8fc-4c0c-b148-3a1275c9e2ae"). InnerVolumeSpecName "kube-api-access-f4l9l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:25:01 crc kubenswrapper[5050]: I1123 16:25:01.105794 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f4l9l\" (UniqueName: \"kubernetes.io/projected/469f5a48-e8fc-4c0c-b148-3a1275c9e2ae-kube-api-access-f4l9l\") on node \"crc\" DevicePath \"\"" Nov 23 16:25:01 crc kubenswrapper[5050]: I1123 16:25:01.106209 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hv55h\" (UniqueName: \"kubernetes.io/projected/5b9acf01-e7ac-40f4-af07-ff0ba015c3cd-kube-api-access-hv55h\") on node \"crc\" DevicePath \"\"" Nov 23 16:25:01 crc kubenswrapper[5050]: I1123 16:25:01.106221 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/469f5a48-e8fc-4c0c-b148-3a1275c9e2ae-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:25:01 crc kubenswrapper[5050]: I1123 16:25:01.106231 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b9acf01-e7ac-40f4-af07-ff0ba015c3cd-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:25:01 crc kubenswrapper[5050]: I1123 16:25:01.201939 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-87h94" Nov 23 16:25:01 crc kubenswrapper[5050]: I1123 16:25:01.201924 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-87h94" event={"ID":"5b9acf01-e7ac-40f4-af07-ff0ba015c3cd","Type":"ContainerDied","Data":"566cd62a80ddf5932c6642d3ec3ceab60034378771e219559b9bcba54387781a"} Nov 23 16:25:01 crc kubenswrapper[5050]: I1123 16:25:01.202143 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="566cd62a80ddf5932c6642d3ec3ceab60034378771e219559b9bcba54387781a" Nov 23 16:25:01 crc kubenswrapper[5050]: I1123 16:25:01.204992 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-ff36-account-create-frdwk" event={"ID":"469f5a48-e8fc-4c0c-b148-3a1275c9e2ae","Type":"ContainerDied","Data":"f26d74874d7831b8e534b116b018f6468b1823b3dfed534146127e6e9e92488f"} Nov 23 16:25:01 crc kubenswrapper[5050]: I1123 16:25:01.205036 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f26d74874d7831b8e534b116b018f6468b1823b3dfed534146127e6e9e92488f" Nov 23 16:25:01 crc kubenswrapper[5050]: I1123 16:25:01.205101 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-ff36-account-create-frdwk" Nov 23 16:25:07 crc kubenswrapper[5050]: I1123 16:25:07.065053 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-92vrc"] Nov 23 16:25:07 crc kubenswrapper[5050]: E1123 16:25:07.066525 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b9acf01-e7ac-40f4-af07-ff0ba015c3cd" containerName="mariadb-database-create" Nov 23 16:25:07 crc kubenswrapper[5050]: I1123 16:25:07.066547 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b9acf01-e7ac-40f4-af07-ff0ba015c3cd" containerName="mariadb-database-create" Nov 23 16:25:07 crc kubenswrapper[5050]: E1123 16:25:07.066582 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="469f5a48-e8fc-4c0c-b148-3a1275c9e2ae" containerName="mariadb-account-create" Nov 23 16:25:07 crc kubenswrapper[5050]: I1123 16:25:07.066591 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="469f5a48-e8fc-4c0c-b148-3a1275c9e2ae" containerName="mariadb-account-create" Nov 23 16:25:07 crc kubenswrapper[5050]: I1123 16:25:07.067009 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="469f5a48-e8fc-4c0c-b148-3a1275c9e2ae" containerName="mariadb-account-create" Nov 23 16:25:07 crc kubenswrapper[5050]: I1123 16:25:07.067036 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b9acf01-e7ac-40f4-af07-ff0ba015c3cd" containerName="mariadb-database-create" Nov 23 16:25:07 crc kubenswrapper[5050]: I1123 16:25:07.068285 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-92vrc" Nov 23 16:25:07 crc kubenswrapper[5050]: I1123 16:25:07.070964 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 23 16:25:07 crc kubenswrapper[5050]: I1123 16:25:07.072100 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-n99vt" Nov 23 16:25:07 crc kubenswrapper[5050]: I1123 16:25:07.072532 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 23 16:25:07 crc kubenswrapper[5050]: I1123 16:25:07.074202 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 23 16:25:07 crc kubenswrapper[5050]: I1123 16:25:07.081841 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-92vrc"] Nov 23 16:25:07 crc kubenswrapper[5050]: I1123 16:25:07.176494 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c0f19a-0135-4be9-9d78-8d8813d9d876-config-data\") pod \"aodh-db-sync-92vrc\" (UID: \"49c0f19a-0135-4be9-9d78-8d8813d9d876\") " pod="openstack/aodh-db-sync-92vrc" Nov 23 16:25:07 crc kubenswrapper[5050]: I1123 16:25:07.177122 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c0f19a-0135-4be9-9d78-8d8813d9d876-combined-ca-bundle\") pod \"aodh-db-sync-92vrc\" (UID: \"49c0f19a-0135-4be9-9d78-8d8813d9d876\") " pod="openstack/aodh-db-sync-92vrc" Nov 23 16:25:07 crc kubenswrapper[5050]: I1123 16:25:07.177281 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49c0f19a-0135-4be9-9d78-8d8813d9d876-scripts\") pod \"aodh-db-sync-92vrc\" (UID: \"49c0f19a-0135-4be9-9d78-8d8813d9d876\") " pod="openstack/aodh-db-sync-92vrc" Nov 23 16:25:07 crc kubenswrapper[5050]: I1123 16:25:07.177504 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwgds\" (UniqueName: \"kubernetes.io/projected/49c0f19a-0135-4be9-9d78-8d8813d9d876-kube-api-access-mwgds\") pod \"aodh-db-sync-92vrc\" (UID: \"49c0f19a-0135-4be9-9d78-8d8813d9d876\") " pod="openstack/aodh-db-sync-92vrc" Nov 23 16:25:07 crc kubenswrapper[5050]: I1123 16:25:07.280368 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c0f19a-0135-4be9-9d78-8d8813d9d876-combined-ca-bundle\") pod \"aodh-db-sync-92vrc\" (UID: \"49c0f19a-0135-4be9-9d78-8d8813d9d876\") " pod="openstack/aodh-db-sync-92vrc" Nov 23 16:25:07 crc kubenswrapper[5050]: I1123 16:25:07.280470 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49c0f19a-0135-4be9-9d78-8d8813d9d876-scripts\") pod \"aodh-db-sync-92vrc\" (UID: \"49c0f19a-0135-4be9-9d78-8d8813d9d876\") " pod="openstack/aodh-db-sync-92vrc" Nov 23 16:25:07 crc kubenswrapper[5050]: I1123 16:25:07.280540 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwgds\" (UniqueName: \"kubernetes.io/projected/49c0f19a-0135-4be9-9d78-8d8813d9d876-kube-api-access-mwgds\") pod \"aodh-db-sync-92vrc\" (UID: \"49c0f19a-0135-4be9-9d78-8d8813d9d876\") " pod="openstack/aodh-db-sync-92vrc" Nov 23 16:25:07 crc kubenswrapper[5050]: I1123 16:25:07.280707 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c0f19a-0135-4be9-9d78-8d8813d9d876-config-data\") pod \"aodh-db-sync-92vrc\" (UID: \"49c0f19a-0135-4be9-9d78-8d8813d9d876\") " pod="openstack/aodh-db-sync-92vrc" Nov 23 16:25:07 crc kubenswrapper[5050]: I1123 16:25:07.291535 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c0f19a-0135-4be9-9d78-8d8813d9d876-config-data\") pod \"aodh-db-sync-92vrc\" (UID: \"49c0f19a-0135-4be9-9d78-8d8813d9d876\") " pod="openstack/aodh-db-sync-92vrc" Nov 23 16:25:07 crc kubenswrapper[5050]: I1123 16:25:07.296603 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49c0f19a-0135-4be9-9d78-8d8813d9d876-scripts\") pod \"aodh-db-sync-92vrc\" (UID: \"49c0f19a-0135-4be9-9d78-8d8813d9d876\") " pod="openstack/aodh-db-sync-92vrc" Nov 23 16:25:07 crc kubenswrapper[5050]: I1123 16:25:07.296831 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c0f19a-0135-4be9-9d78-8d8813d9d876-combined-ca-bundle\") pod \"aodh-db-sync-92vrc\" (UID: \"49c0f19a-0135-4be9-9d78-8d8813d9d876\") " pod="openstack/aodh-db-sync-92vrc" Nov 23 16:25:07 crc kubenswrapper[5050]: I1123 16:25:07.301495 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwgds\" (UniqueName: \"kubernetes.io/projected/49c0f19a-0135-4be9-9d78-8d8813d9d876-kube-api-access-mwgds\") pod \"aodh-db-sync-92vrc\" (UID: \"49c0f19a-0135-4be9-9d78-8d8813d9d876\") " pod="openstack/aodh-db-sync-92vrc" Nov 23 16:25:07 crc kubenswrapper[5050]: I1123 16:25:07.450263 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-92vrc" Nov 23 16:25:07 crc kubenswrapper[5050]: I1123 16:25:07.942256 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-92vrc"] Nov 23 16:25:08 crc kubenswrapper[5050]: I1123 16:25:08.321052 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-92vrc" event={"ID":"49c0f19a-0135-4be9-9d78-8d8813d9d876","Type":"ContainerStarted","Data":"ec4675de39009d7cd92297462e234aa8a6d7c9637f1b8a723fe74bb5e3eebe9b"} Nov 23 16:25:13 crc kubenswrapper[5050]: I1123 16:25:13.411934 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-92vrc" event={"ID":"49c0f19a-0135-4be9-9d78-8d8813d9d876","Type":"ContainerStarted","Data":"c96a965778da0f778aaa9aeb6801957eb0f9ed943553ca635bd26a7f1bd2af33"} Nov 23 16:25:13 crc kubenswrapper[5050]: I1123 16:25:13.455364 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-92vrc" podStartSLOduration=1.721122758 podStartE2EDuration="6.45533281s" podCreationTimestamp="2025-11-23 16:25:07 +0000 UTC" firstStartedPulling="2025-11-23 16:25:07.9518593 +0000 UTC m=+6203.118855775" lastFinishedPulling="2025-11-23 16:25:12.686069302 +0000 UTC m=+6207.853065827" observedRunningTime="2025-11-23 16:25:13.441168251 +0000 UTC m=+6208.608164766" watchObservedRunningTime="2025-11-23 16:25:13.45533281 +0000 UTC m=+6208.622329335" Nov 23 16:25:15 crc kubenswrapper[5050]: I1123 16:25:15.437783 5050 generic.go:334] "Generic (PLEG): container finished" podID="49c0f19a-0135-4be9-9d78-8d8813d9d876" containerID="c96a965778da0f778aaa9aeb6801957eb0f9ed943553ca635bd26a7f1bd2af33" exitCode=0 Nov 23 16:25:15 crc kubenswrapper[5050]: I1123 16:25:15.437866 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-92vrc" event={"ID":"49c0f19a-0135-4be9-9d78-8d8813d9d876","Type":"ContainerDied","Data":"c96a965778da0f778aaa9aeb6801957eb0f9ed943553ca635bd26a7f1bd2af33"} Nov 23 16:25:15 crc kubenswrapper[5050]: I1123 16:25:15.517729 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 23 16:25:16 crc kubenswrapper[5050]: I1123 16:25:16.939638 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-92vrc" Nov 23 16:25:16 crc kubenswrapper[5050]: I1123 16:25:16.992095 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49c0f19a-0135-4be9-9d78-8d8813d9d876-scripts\") pod \"49c0f19a-0135-4be9-9d78-8d8813d9d876\" (UID: \"49c0f19a-0135-4be9-9d78-8d8813d9d876\") " Nov 23 16:25:16 crc kubenswrapper[5050]: I1123 16:25:16.992410 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c0f19a-0135-4be9-9d78-8d8813d9d876-combined-ca-bundle\") pod \"49c0f19a-0135-4be9-9d78-8d8813d9d876\" (UID: \"49c0f19a-0135-4be9-9d78-8d8813d9d876\") " Nov 23 16:25:16 crc kubenswrapper[5050]: I1123 16:25:16.992544 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c0f19a-0135-4be9-9d78-8d8813d9d876-config-data\") pod \"49c0f19a-0135-4be9-9d78-8d8813d9d876\" (UID: \"49c0f19a-0135-4be9-9d78-8d8813d9d876\") " Nov 23 16:25:16 crc kubenswrapper[5050]: I1123 16:25:16.992657 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mwgds\" (UniqueName: \"kubernetes.io/projected/49c0f19a-0135-4be9-9d78-8d8813d9d876-kube-api-access-mwgds\") pod \"49c0f19a-0135-4be9-9d78-8d8813d9d876\" (UID: \"49c0f19a-0135-4be9-9d78-8d8813d9d876\") " Nov 23 16:25:16 crc kubenswrapper[5050]: I1123 16:25:16.999591 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c0f19a-0135-4be9-9d78-8d8813d9d876-scripts" (OuterVolumeSpecName: "scripts") pod "49c0f19a-0135-4be9-9d78-8d8813d9d876" (UID: "49c0f19a-0135-4be9-9d78-8d8813d9d876"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:25:17 crc kubenswrapper[5050]: I1123 16:25:17.024415 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c0f19a-0135-4be9-9d78-8d8813d9d876-kube-api-access-mwgds" (OuterVolumeSpecName: "kube-api-access-mwgds") pod "49c0f19a-0135-4be9-9d78-8d8813d9d876" (UID: "49c0f19a-0135-4be9-9d78-8d8813d9d876"). InnerVolumeSpecName "kube-api-access-mwgds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:25:17 crc kubenswrapper[5050]: I1123 16:25:17.032959 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c0f19a-0135-4be9-9d78-8d8813d9d876-config-data" (OuterVolumeSpecName: "config-data") pod "49c0f19a-0135-4be9-9d78-8d8813d9d876" (UID: "49c0f19a-0135-4be9-9d78-8d8813d9d876"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:25:17 crc kubenswrapper[5050]: I1123 16:25:17.033986 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c0f19a-0135-4be9-9d78-8d8813d9d876-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "49c0f19a-0135-4be9-9d78-8d8813d9d876" (UID: "49c0f19a-0135-4be9-9d78-8d8813d9d876"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:25:17 crc kubenswrapper[5050]: I1123 16:25:17.094523 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49c0f19a-0135-4be9-9d78-8d8813d9d876-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:25:17 crc kubenswrapper[5050]: I1123 16:25:17.094558 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c0f19a-0135-4be9-9d78-8d8813d9d876-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:25:17 crc kubenswrapper[5050]: I1123 16:25:17.094571 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c0f19a-0135-4be9-9d78-8d8813d9d876-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:25:17 crc kubenswrapper[5050]: I1123 16:25:17.094582 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mwgds\" (UniqueName: \"kubernetes.io/projected/49c0f19a-0135-4be9-9d78-8d8813d9d876-kube-api-access-mwgds\") on node \"crc\" DevicePath \"\"" Nov 23 16:25:17 crc kubenswrapper[5050]: I1123 16:25:17.464973 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-92vrc" event={"ID":"49c0f19a-0135-4be9-9d78-8d8813d9d876","Type":"ContainerDied","Data":"ec4675de39009d7cd92297462e234aa8a6d7c9637f1b8a723fe74bb5e3eebe9b"} Nov 23 16:25:17 crc kubenswrapper[5050]: I1123 16:25:17.465032 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ec4675de39009d7cd92297462e234aa8a6d7c9637f1b8a723fe74bb5e3eebe9b" Nov 23 16:25:17 crc kubenswrapper[5050]: I1123 16:25:17.465036 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-92vrc" Nov 23 16:25:17 crc kubenswrapper[5050]: I1123 16:25:17.559596 5050 scope.go:117] "RemoveContainer" containerID="b0bad7799a99735bfbd956d80653de39fdb440cd0c61f2c75de64c0cd74e6481" Nov 23 16:25:17 crc kubenswrapper[5050]: I1123 16:25:17.605190 5050 scope.go:117] "RemoveContainer" containerID="9d6ee5d8dc6993110b46bea1096907946feea512d71596392ccd6a9c01aabe22" Nov 23 16:25:17 crc kubenswrapper[5050]: I1123 16:25:17.691141 5050 scope.go:117] "RemoveContainer" containerID="05fc1817af6846b0b98226a6fe360837816f34f6615eeae2e10dc0b4828f6a8d" Nov 23 16:25:17 crc kubenswrapper[5050]: I1123 16:25:17.753037 5050 scope.go:117] "RemoveContainer" containerID="c8f331d053776c911d8203fe6e1908a05a9f73bcb1ac58ffa155103c0bb0463e" Nov 23 16:25:17 crc kubenswrapper[5050]: I1123 16:25:17.791963 5050 scope.go:117] "RemoveContainer" containerID="73fb1d109e600928c168e991d60d8b4d275f6b0c7b32501b0b32357b0eae60d5" Nov 23 16:25:21 crc kubenswrapper[5050]: I1123 16:25:21.829811 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Nov 23 16:25:21 crc kubenswrapper[5050]: E1123 16:25:21.835022 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49c0f19a-0135-4be9-9d78-8d8813d9d876" containerName="aodh-db-sync" Nov 23 16:25:21 crc kubenswrapper[5050]: I1123 16:25:21.836619 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="49c0f19a-0135-4be9-9d78-8d8813d9d876" containerName="aodh-db-sync" Nov 23 16:25:21 crc kubenswrapper[5050]: I1123 16:25:21.837847 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="49c0f19a-0135-4be9-9d78-8d8813d9d876" containerName="aodh-db-sync" Nov 23 16:25:21 crc kubenswrapper[5050]: I1123 16:25:21.845419 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 23 16:25:21 crc kubenswrapper[5050]: I1123 16:25:21.848880 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 23 16:25:21 crc kubenswrapper[5050]: I1123 16:25:21.849198 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-n99vt" Nov 23 16:25:21 crc kubenswrapper[5050]: I1123 16:25:21.862775 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 23 16:25:21 crc kubenswrapper[5050]: I1123 16:25:21.865542 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 23 16:25:21 crc kubenswrapper[5050]: I1123 16:25:21.940549 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jz4k5\" (UniqueName: \"kubernetes.io/projected/5e79d4b3-0fc0-4521-a8a2-f3416c3f4039-kube-api-access-jz4k5\") pod \"aodh-0\" (UID: \"5e79d4b3-0fc0-4521-a8a2-f3416c3f4039\") " pod="openstack/aodh-0" Nov 23 16:25:21 crc kubenswrapper[5050]: I1123 16:25:21.941058 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e79d4b3-0fc0-4521-a8a2-f3416c3f4039-config-data\") pod \"aodh-0\" (UID: \"5e79d4b3-0fc0-4521-a8a2-f3416c3f4039\") " pod="openstack/aodh-0" Nov 23 16:25:21 crc kubenswrapper[5050]: I1123 16:25:21.941170 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e79d4b3-0fc0-4521-a8a2-f3416c3f4039-combined-ca-bundle\") pod \"aodh-0\" (UID: \"5e79d4b3-0fc0-4521-a8a2-f3416c3f4039\") " pod="openstack/aodh-0" Nov 23 16:25:21 crc kubenswrapper[5050]: I1123 16:25:21.941309 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e79d4b3-0fc0-4521-a8a2-f3416c3f4039-scripts\") pod \"aodh-0\" (UID: \"5e79d4b3-0fc0-4521-a8a2-f3416c3f4039\") " pod="openstack/aodh-0" Nov 23 16:25:22 crc kubenswrapper[5050]: I1123 16:25:22.047776 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e79d4b3-0fc0-4521-a8a2-f3416c3f4039-config-data\") pod \"aodh-0\" (UID: \"5e79d4b3-0fc0-4521-a8a2-f3416c3f4039\") " pod="openstack/aodh-0" Nov 23 16:25:22 crc kubenswrapper[5050]: I1123 16:25:22.047922 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e79d4b3-0fc0-4521-a8a2-f3416c3f4039-combined-ca-bundle\") pod \"aodh-0\" (UID: \"5e79d4b3-0fc0-4521-a8a2-f3416c3f4039\") " pod="openstack/aodh-0" Nov 23 16:25:22 crc kubenswrapper[5050]: I1123 16:25:22.048056 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e79d4b3-0fc0-4521-a8a2-f3416c3f4039-scripts\") pod \"aodh-0\" (UID: \"5e79d4b3-0fc0-4521-a8a2-f3416c3f4039\") " pod="openstack/aodh-0" Nov 23 16:25:22 crc kubenswrapper[5050]: I1123 16:25:22.048398 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jz4k5\" (UniqueName: \"kubernetes.io/projected/5e79d4b3-0fc0-4521-a8a2-f3416c3f4039-kube-api-access-jz4k5\") pod \"aodh-0\" (UID: \"5e79d4b3-0fc0-4521-a8a2-f3416c3f4039\") " pod="openstack/aodh-0" Nov 23 16:25:22 crc kubenswrapper[5050]: I1123 16:25:22.055373 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e79d4b3-0fc0-4521-a8a2-f3416c3f4039-combined-ca-bundle\") pod \"aodh-0\" (UID: \"5e79d4b3-0fc0-4521-a8a2-f3416c3f4039\") " pod="openstack/aodh-0" Nov 23 16:25:22 crc kubenswrapper[5050]: I1123 16:25:22.056277 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e79d4b3-0fc0-4521-a8a2-f3416c3f4039-config-data\") pod \"aodh-0\" (UID: \"5e79d4b3-0fc0-4521-a8a2-f3416c3f4039\") " pod="openstack/aodh-0" Nov 23 16:25:22 crc kubenswrapper[5050]: I1123 16:25:22.068084 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e79d4b3-0fc0-4521-a8a2-f3416c3f4039-scripts\") pod \"aodh-0\" (UID: \"5e79d4b3-0fc0-4521-a8a2-f3416c3f4039\") " pod="openstack/aodh-0" Nov 23 16:25:22 crc kubenswrapper[5050]: I1123 16:25:22.070999 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jz4k5\" (UniqueName: \"kubernetes.io/projected/5e79d4b3-0fc0-4521-a8a2-f3416c3f4039-kube-api-access-jz4k5\") pod \"aodh-0\" (UID: \"5e79d4b3-0fc0-4521-a8a2-f3416c3f4039\") " pod="openstack/aodh-0" Nov 23 16:25:22 crc kubenswrapper[5050]: I1123 16:25:22.185997 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 23 16:25:22 crc kubenswrapper[5050]: I1123 16:25:22.903735 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 23 16:25:23 crc kubenswrapper[5050]: I1123 16:25:23.039576 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-6fn7f"] Nov 23 16:25:23 crc kubenswrapper[5050]: I1123 16:25:23.047497 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-8046-account-create-hkqfk"] Nov 23 16:25:23 crc kubenswrapper[5050]: I1123 16:25:23.055318 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-8046-account-create-hkqfk"] Nov 23 16:25:23 crc kubenswrapper[5050]: I1123 16:25:23.062960 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-6fn7f"] Nov 23 16:25:23 crc kubenswrapper[5050]: I1123 16:25:23.261526 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 23 16:25:23 crc kubenswrapper[5050]: I1123 16:25:23.261917 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f65b863c-4b1b-4657-9afb-f8bfabff1637" containerName="sg-core" containerID="cri-o://b4807d27a8fdc167daa67e3e8b4dd8a0468009c404c8447a8699245c64819edc" gracePeriod=30 Nov 23 16:25:23 crc kubenswrapper[5050]: I1123 16:25:23.261992 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f65b863c-4b1b-4657-9afb-f8bfabff1637" containerName="ceilometer-notification-agent" containerID="cri-o://88aae75998f6ad1d2ad9cbfa458d98bef59930a987ed01ddce9935479644b6f6" gracePeriod=30 Nov 23 16:25:23 crc kubenswrapper[5050]: I1123 16:25:23.261940 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f65b863c-4b1b-4657-9afb-f8bfabff1637" containerName="proxy-httpd" containerID="cri-o://bfa9bfb545d77f9650f7b75d6cae08b016ed442a9c9edb0e3627f3e1235a79ba" gracePeriod=30 Nov 23 16:25:23 crc kubenswrapper[5050]: I1123 16:25:23.262165 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f65b863c-4b1b-4657-9afb-f8bfabff1637" containerName="ceilometer-central-agent" containerID="cri-o://9b4d9e477e1fa2b130dd1d3752cb18379373606c990154b082f523db8fd2b09b" gracePeriod=30 Nov 23 16:25:23 crc kubenswrapper[5050]: I1123 16:25:23.563484 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1fa8aef4-fb4b-4655-b03e-6707ee37bd48" path="/var/lib/kubelet/pods/1fa8aef4-fb4b-4655-b03e-6707ee37bd48/volumes" Nov 23 16:25:23 crc kubenswrapper[5050]: I1123 16:25:23.565493 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50036d79-f35b-430c-803b-f41a189aef45" path="/var/lib/kubelet/pods/50036d79-f35b-430c-803b-f41a189aef45/volumes" Nov 23 16:25:23 crc kubenswrapper[5050]: I1123 16:25:23.577609 5050 generic.go:334] "Generic (PLEG): container finished" podID="f65b863c-4b1b-4657-9afb-f8bfabff1637" containerID="bfa9bfb545d77f9650f7b75d6cae08b016ed442a9c9edb0e3627f3e1235a79ba" exitCode=0 Nov 23 16:25:23 crc kubenswrapper[5050]: I1123 16:25:23.577671 5050 generic.go:334] "Generic (PLEG): container finished" podID="f65b863c-4b1b-4657-9afb-f8bfabff1637" containerID="b4807d27a8fdc167daa67e3e8b4dd8a0468009c404c8447a8699245c64819edc" exitCode=2 Nov 23 16:25:23 crc kubenswrapper[5050]: I1123 16:25:23.577706 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f65b863c-4b1b-4657-9afb-f8bfabff1637","Type":"ContainerDied","Data":"bfa9bfb545d77f9650f7b75d6cae08b016ed442a9c9edb0e3627f3e1235a79ba"} Nov 23 16:25:23 crc kubenswrapper[5050]: I1123 16:25:23.577767 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f65b863c-4b1b-4657-9afb-f8bfabff1637","Type":"ContainerDied","Data":"b4807d27a8fdc167daa67e3e8b4dd8a0468009c404c8447a8699245c64819edc"} Nov 23 16:25:23 crc kubenswrapper[5050]: I1123 16:25:23.579829 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"5e79d4b3-0fc0-4521-a8a2-f3416c3f4039","Type":"ContainerStarted","Data":"86e8a0efdb0e8252974c2653ab1e473fb912b662a3bc0cccde39275e81e001f7"} Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.102478 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.216686 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4g9nr\" (UniqueName: \"kubernetes.io/projected/f65b863c-4b1b-4657-9afb-f8bfabff1637-kube-api-access-4g9nr\") pod \"f65b863c-4b1b-4657-9afb-f8bfabff1637\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.216798 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f65b863c-4b1b-4657-9afb-f8bfabff1637-sg-core-conf-yaml\") pod \"f65b863c-4b1b-4657-9afb-f8bfabff1637\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.217060 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f65b863c-4b1b-4657-9afb-f8bfabff1637-log-httpd\") pod \"f65b863c-4b1b-4657-9afb-f8bfabff1637\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.217109 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f65b863c-4b1b-4657-9afb-f8bfabff1637-run-httpd\") pod \"f65b863c-4b1b-4657-9afb-f8bfabff1637\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.217168 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f65b863c-4b1b-4657-9afb-f8bfabff1637-combined-ca-bundle\") pod \"f65b863c-4b1b-4657-9afb-f8bfabff1637\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.217276 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f65b863c-4b1b-4657-9afb-f8bfabff1637-config-data\") pod \"f65b863c-4b1b-4657-9afb-f8bfabff1637\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.217361 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f65b863c-4b1b-4657-9afb-f8bfabff1637-scripts\") pod \"f65b863c-4b1b-4657-9afb-f8bfabff1637\" (UID: \"f65b863c-4b1b-4657-9afb-f8bfabff1637\") " Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.217968 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f65b863c-4b1b-4657-9afb-f8bfabff1637-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f65b863c-4b1b-4657-9afb-f8bfabff1637" (UID: "f65b863c-4b1b-4657-9afb-f8bfabff1637"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.218113 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f65b863c-4b1b-4657-9afb-f8bfabff1637-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f65b863c-4b1b-4657-9afb-f8bfabff1637" (UID: "f65b863c-4b1b-4657-9afb-f8bfabff1637"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.218949 5050 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f65b863c-4b1b-4657-9afb-f8bfabff1637-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.218979 5050 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f65b863c-4b1b-4657-9afb-f8bfabff1637-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.223980 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f65b863c-4b1b-4657-9afb-f8bfabff1637-scripts" (OuterVolumeSpecName: "scripts") pod "f65b863c-4b1b-4657-9afb-f8bfabff1637" (UID: "f65b863c-4b1b-4657-9afb-f8bfabff1637"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.226610 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f65b863c-4b1b-4657-9afb-f8bfabff1637-kube-api-access-4g9nr" (OuterVolumeSpecName: "kube-api-access-4g9nr") pod "f65b863c-4b1b-4657-9afb-f8bfabff1637" (UID: "f65b863c-4b1b-4657-9afb-f8bfabff1637"). InnerVolumeSpecName "kube-api-access-4g9nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.265543 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f65b863c-4b1b-4657-9afb-f8bfabff1637-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f65b863c-4b1b-4657-9afb-f8bfabff1637" (UID: "f65b863c-4b1b-4657-9afb-f8bfabff1637"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.321943 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4g9nr\" (UniqueName: \"kubernetes.io/projected/f65b863c-4b1b-4657-9afb-f8bfabff1637-kube-api-access-4g9nr\") on node \"crc\" DevicePath \"\"" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.321976 5050 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f65b863c-4b1b-4657-9afb-f8bfabff1637-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.321987 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f65b863c-4b1b-4657-9afb-f8bfabff1637-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.336225 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f65b863c-4b1b-4657-9afb-f8bfabff1637-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f65b863c-4b1b-4657-9afb-f8bfabff1637" (UID: "f65b863c-4b1b-4657-9afb-f8bfabff1637"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.350238 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f65b863c-4b1b-4657-9afb-f8bfabff1637-config-data" (OuterVolumeSpecName: "config-data") pod "f65b863c-4b1b-4657-9afb-f8bfabff1637" (UID: "f65b863c-4b1b-4657-9afb-f8bfabff1637"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.425389 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f65b863c-4b1b-4657-9afb-f8bfabff1637-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.425424 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f65b863c-4b1b-4657-9afb-f8bfabff1637-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.600954 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"5e79d4b3-0fc0-4521-a8a2-f3416c3f4039","Type":"ContainerStarted","Data":"5b89d8c8b9920f648b34be74fad53cbd34c226fa78b1771b62712d6a423b8888"} Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.605487 5050 generic.go:334] "Generic (PLEG): container finished" podID="f65b863c-4b1b-4657-9afb-f8bfabff1637" containerID="88aae75998f6ad1d2ad9cbfa458d98bef59930a987ed01ddce9935479644b6f6" exitCode=0 Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.605558 5050 generic.go:334] "Generic (PLEG): container finished" podID="f65b863c-4b1b-4657-9afb-f8bfabff1637" containerID="9b4d9e477e1fa2b130dd1d3752cb18379373606c990154b082f523db8fd2b09b" exitCode=0 Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.605578 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f65b863c-4b1b-4657-9afb-f8bfabff1637","Type":"ContainerDied","Data":"88aae75998f6ad1d2ad9cbfa458d98bef59930a987ed01ddce9935479644b6f6"} Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.605651 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f65b863c-4b1b-4657-9afb-f8bfabff1637","Type":"ContainerDied","Data":"9b4d9e477e1fa2b130dd1d3752cb18379373606c990154b082f523db8fd2b09b"} Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.605669 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f65b863c-4b1b-4657-9afb-f8bfabff1637","Type":"ContainerDied","Data":"7b617e1d26e045c77adb43a6c4047b0c634930ee45afb16ea572c53d6cdb222d"} Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.605694 5050 scope.go:117] "RemoveContainer" containerID="bfa9bfb545d77f9650f7b75d6cae08b016ed442a9c9edb0e3627f3e1235a79ba" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.605964 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.656572 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.670895 5050 scope.go:117] "RemoveContainer" containerID="b4807d27a8fdc167daa67e3e8b4dd8a0468009c404c8447a8699245c64819edc" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.672042 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.689774 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 23 16:25:24 crc kubenswrapper[5050]: E1123 16:25:24.690350 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f65b863c-4b1b-4657-9afb-f8bfabff1637" containerName="ceilometer-notification-agent" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.690365 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f65b863c-4b1b-4657-9afb-f8bfabff1637" containerName="ceilometer-notification-agent" Nov 23 16:25:24 crc kubenswrapper[5050]: E1123 16:25:24.690390 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f65b863c-4b1b-4657-9afb-f8bfabff1637" containerName="proxy-httpd" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.690396 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f65b863c-4b1b-4657-9afb-f8bfabff1637" containerName="proxy-httpd" Nov 23 16:25:24 crc kubenswrapper[5050]: E1123 16:25:24.690422 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f65b863c-4b1b-4657-9afb-f8bfabff1637" containerName="sg-core" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.690429 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f65b863c-4b1b-4657-9afb-f8bfabff1637" containerName="sg-core" Nov 23 16:25:24 crc kubenswrapper[5050]: E1123 16:25:24.690590 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f65b863c-4b1b-4657-9afb-f8bfabff1637" containerName="ceilometer-central-agent" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.690601 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f65b863c-4b1b-4657-9afb-f8bfabff1637" containerName="ceilometer-central-agent" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.690856 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="f65b863c-4b1b-4657-9afb-f8bfabff1637" containerName="ceilometer-notification-agent" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.690877 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="f65b863c-4b1b-4657-9afb-f8bfabff1637" containerName="sg-core" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.690895 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="f65b863c-4b1b-4657-9afb-f8bfabff1637" containerName="ceilometer-central-agent" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.690911 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="f65b863c-4b1b-4657-9afb-f8bfabff1637" containerName="proxy-httpd" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.696498 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.698718 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.698894 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.704122 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.711574 5050 scope.go:117] "RemoveContainer" containerID="88aae75998f6ad1d2ad9cbfa458d98bef59930a987ed01ddce9935479644b6f6" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.732135 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e03c7f2-9820-4ee9-888a-0f83da39a907-config-data\") pod \"ceilometer-0\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " pod="openstack/ceilometer-0" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.732184 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7e03c7f2-9820-4ee9-888a-0f83da39a907-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " pod="openstack/ceilometer-0" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.732243 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e03c7f2-9820-4ee9-888a-0f83da39a907-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " pod="openstack/ceilometer-0" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.732681 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7e03c7f2-9820-4ee9-888a-0f83da39a907-run-httpd\") pod \"ceilometer-0\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " pod="openstack/ceilometer-0" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.732762 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tw56z\" (UniqueName: \"kubernetes.io/projected/7e03c7f2-9820-4ee9-888a-0f83da39a907-kube-api-access-tw56z\") pod \"ceilometer-0\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " pod="openstack/ceilometer-0" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.732835 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7e03c7f2-9820-4ee9-888a-0f83da39a907-scripts\") pod \"ceilometer-0\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " pod="openstack/ceilometer-0" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.733203 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7e03c7f2-9820-4ee9-888a-0f83da39a907-log-httpd\") pod \"ceilometer-0\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " pod="openstack/ceilometer-0" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.751711 5050 scope.go:117] "RemoveContainer" containerID="9b4d9e477e1fa2b130dd1d3752cb18379373606c990154b082f523db8fd2b09b" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.776931 5050 scope.go:117] "RemoveContainer" containerID="bfa9bfb545d77f9650f7b75d6cae08b016ed442a9c9edb0e3627f3e1235a79ba" Nov 23 16:25:24 crc kubenswrapper[5050]: E1123 16:25:24.777314 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bfa9bfb545d77f9650f7b75d6cae08b016ed442a9c9edb0e3627f3e1235a79ba\": container with ID starting with bfa9bfb545d77f9650f7b75d6cae08b016ed442a9c9edb0e3627f3e1235a79ba not found: ID does not exist" containerID="bfa9bfb545d77f9650f7b75d6cae08b016ed442a9c9edb0e3627f3e1235a79ba" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.777346 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bfa9bfb545d77f9650f7b75d6cae08b016ed442a9c9edb0e3627f3e1235a79ba"} err="failed to get container status \"bfa9bfb545d77f9650f7b75d6cae08b016ed442a9c9edb0e3627f3e1235a79ba\": rpc error: code = NotFound desc = could not find container \"bfa9bfb545d77f9650f7b75d6cae08b016ed442a9c9edb0e3627f3e1235a79ba\": container with ID starting with bfa9bfb545d77f9650f7b75d6cae08b016ed442a9c9edb0e3627f3e1235a79ba not found: ID does not exist" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.777369 5050 scope.go:117] "RemoveContainer" containerID="b4807d27a8fdc167daa67e3e8b4dd8a0468009c404c8447a8699245c64819edc" Nov 23 16:25:24 crc kubenswrapper[5050]: E1123 16:25:24.777846 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4807d27a8fdc167daa67e3e8b4dd8a0468009c404c8447a8699245c64819edc\": container with ID starting with b4807d27a8fdc167daa67e3e8b4dd8a0468009c404c8447a8699245c64819edc not found: ID does not exist" containerID="b4807d27a8fdc167daa67e3e8b4dd8a0468009c404c8447a8699245c64819edc" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.777898 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4807d27a8fdc167daa67e3e8b4dd8a0468009c404c8447a8699245c64819edc"} err="failed to get container status \"b4807d27a8fdc167daa67e3e8b4dd8a0468009c404c8447a8699245c64819edc\": rpc error: code = NotFound desc = could not find container \"b4807d27a8fdc167daa67e3e8b4dd8a0468009c404c8447a8699245c64819edc\": container with ID starting with b4807d27a8fdc167daa67e3e8b4dd8a0468009c404c8447a8699245c64819edc not found: ID does not exist" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.777930 5050 scope.go:117] "RemoveContainer" containerID="88aae75998f6ad1d2ad9cbfa458d98bef59930a987ed01ddce9935479644b6f6" Nov 23 16:25:24 crc kubenswrapper[5050]: E1123 16:25:24.778388 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88aae75998f6ad1d2ad9cbfa458d98bef59930a987ed01ddce9935479644b6f6\": container with ID starting with 88aae75998f6ad1d2ad9cbfa458d98bef59930a987ed01ddce9935479644b6f6 not found: ID does not exist" containerID="88aae75998f6ad1d2ad9cbfa458d98bef59930a987ed01ddce9935479644b6f6" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.778419 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88aae75998f6ad1d2ad9cbfa458d98bef59930a987ed01ddce9935479644b6f6"} err="failed to get container status \"88aae75998f6ad1d2ad9cbfa458d98bef59930a987ed01ddce9935479644b6f6\": rpc error: code = NotFound desc = could not find container \"88aae75998f6ad1d2ad9cbfa458d98bef59930a987ed01ddce9935479644b6f6\": container with ID starting with 88aae75998f6ad1d2ad9cbfa458d98bef59930a987ed01ddce9935479644b6f6 not found: ID does not exist" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.778465 5050 scope.go:117] "RemoveContainer" containerID="9b4d9e477e1fa2b130dd1d3752cb18379373606c990154b082f523db8fd2b09b" Nov 23 16:25:24 crc kubenswrapper[5050]: E1123 16:25:24.778816 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b4d9e477e1fa2b130dd1d3752cb18379373606c990154b082f523db8fd2b09b\": container with ID starting with 9b4d9e477e1fa2b130dd1d3752cb18379373606c990154b082f523db8fd2b09b not found: ID does not exist" containerID="9b4d9e477e1fa2b130dd1d3752cb18379373606c990154b082f523db8fd2b09b" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.778844 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b4d9e477e1fa2b130dd1d3752cb18379373606c990154b082f523db8fd2b09b"} err="failed to get container status \"9b4d9e477e1fa2b130dd1d3752cb18379373606c990154b082f523db8fd2b09b\": rpc error: code = NotFound desc = could not find container \"9b4d9e477e1fa2b130dd1d3752cb18379373606c990154b082f523db8fd2b09b\": container with ID starting with 9b4d9e477e1fa2b130dd1d3752cb18379373606c990154b082f523db8fd2b09b not found: ID does not exist" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.778861 5050 scope.go:117] "RemoveContainer" containerID="bfa9bfb545d77f9650f7b75d6cae08b016ed442a9c9edb0e3627f3e1235a79ba" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.779234 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bfa9bfb545d77f9650f7b75d6cae08b016ed442a9c9edb0e3627f3e1235a79ba"} err="failed to get container status \"bfa9bfb545d77f9650f7b75d6cae08b016ed442a9c9edb0e3627f3e1235a79ba\": rpc error: code = NotFound desc = could not find container \"bfa9bfb545d77f9650f7b75d6cae08b016ed442a9c9edb0e3627f3e1235a79ba\": container with ID starting with bfa9bfb545d77f9650f7b75d6cae08b016ed442a9c9edb0e3627f3e1235a79ba not found: ID does not exist" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.779265 5050 scope.go:117] "RemoveContainer" containerID="b4807d27a8fdc167daa67e3e8b4dd8a0468009c404c8447a8699245c64819edc" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.779768 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4807d27a8fdc167daa67e3e8b4dd8a0468009c404c8447a8699245c64819edc"} err="failed to get container status \"b4807d27a8fdc167daa67e3e8b4dd8a0468009c404c8447a8699245c64819edc\": rpc error: code = NotFound desc = could not find container \"b4807d27a8fdc167daa67e3e8b4dd8a0468009c404c8447a8699245c64819edc\": container with ID starting with b4807d27a8fdc167daa67e3e8b4dd8a0468009c404c8447a8699245c64819edc not found: ID does not exist" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.779794 5050 scope.go:117] "RemoveContainer" containerID="88aae75998f6ad1d2ad9cbfa458d98bef59930a987ed01ddce9935479644b6f6" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.780261 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88aae75998f6ad1d2ad9cbfa458d98bef59930a987ed01ddce9935479644b6f6"} err="failed to get container status \"88aae75998f6ad1d2ad9cbfa458d98bef59930a987ed01ddce9935479644b6f6\": rpc error: code = NotFound desc = could not find container \"88aae75998f6ad1d2ad9cbfa458d98bef59930a987ed01ddce9935479644b6f6\": container with ID starting with 88aae75998f6ad1d2ad9cbfa458d98bef59930a987ed01ddce9935479644b6f6 not found: ID does not exist" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.780337 5050 scope.go:117] "RemoveContainer" containerID="9b4d9e477e1fa2b130dd1d3752cb18379373606c990154b082f523db8fd2b09b" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.781052 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b4d9e477e1fa2b130dd1d3752cb18379373606c990154b082f523db8fd2b09b"} err="failed to get container status \"9b4d9e477e1fa2b130dd1d3752cb18379373606c990154b082f523db8fd2b09b\": rpc error: code = NotFound desc = could not find container \"9b4d9e477e1fa2b130dd1d3752cb18379373606c990154b082f523db8fd2b09b\": container with ID starting with 9b4d9e477e1fa2b130dd1d3752cb18379373606c990154b082f523db8fd2b09b not found: ID does not exist" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.836458 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e03c7f2-9820-4ee9-888a-0f83da39a907-config-data\") pod \"ceilometer-0\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " pod="openstack/ceilometer-0" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.836518 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7e03c7f2-9820-4ee9-888a-0f83da39a907-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " pod="openstack/ceilometer-0" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.836641 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e03c7f2-9820-4ee9-888a-0f83da39a907-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " pod="openstack/ceilometer-0" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.836862 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7e03c7f2-9820-4ee9-888a-0f83da39a907-run-httpd\") pod \"ceilometer-0\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " pod="openstack/ceilometer-0" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.836908 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tw56z\" (UniqueName: \"kubernetes.io/projected/7e03c7f2-9820-4ee9-888a-0f83da39a907-kube-api-access-tw56z\") pod \"ceilometer-0\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " pod="openstack/ceilometer-0" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.836958 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7e03c7f2-9820-4ee9-888a-0f83da39a907-scripts\") pod \"ceilometer-0\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " pod="openstack/ceilometer-0" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.837062 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7e03c7f2-9820-4ee9-888a-0f83da39a907-log-httpd\") pod \"ceilometer-0\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " pod="openstack/ceilometer-0" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.838003 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7e03c7f2-9820-4ee9-888a-0f83da39a907-log-httpd\") pod \"ceilometer-0\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " pod="openstack/ceilometer-0" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.838413 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7e03c7f2-9820-4ee9-888a-0f83da39a907-run-httpd\") pod \"ceilometer-0\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " pod="openstack/ceilometer-0" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.842703 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e03c7f2-9820-4ee9-888a-0f83da39a907-config-data\") pod \"ceilometer-0\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " pod="openstack/ceilometer-0" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.849215 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7e03c7f2-9820-4ee9-888a-0f83da39a907-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " pod="openstack/ceilometer-0" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.863067 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7e03c7f2-9820-4ee9-888a-0f83da39a907-scripts\") pod \"ceilometer-0\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " pod="openstack/ceilometer-0" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.863906 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e03c7f2-9820-4ee9-888a-0f83da39a907-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " pod="openstack/ceilometer-0" Nov 23 16:25:24 crc kubenswrapper[5050]: I1123 16:25:24.864539 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tw56z\" (UniqueName: \"kubernetes.io/projected/7e03c7f2-9820-4ee9-888a-0f83da39a907-kube-api-access-tw56z\") pod \"ceilometer-0\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " pod="openstack/ceilometer-0" Nov 23 16:25:25 crc kubenswrapper[5050]: I1123 16:25:25.018061 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 16:25:25 crc kubenswrapper[5050]: I1123 16:25:25.574338 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f65b863c-4b1b-4657-9afb-f8bfabff1637" path="/var/lib/kubelet/pods/f65b863c-4b1b-4657-9afb-f8bfabff1637/volumes" Nov 23 16:25:25 crc kubenswrapper[5050]: I1123 16:25:25.630047 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"5e79d4b3-0fc0-4521-a8a2-f3416c3f4039","Type":"ContainerStarted","Data":"1a22e3cb7593c76509c2fcdaf533e0066ce5361c297282854e96e277eaf0f488"} Nov 23 16:25:25 crc kubenswrapper[5050]: I1123 16:25:25.661993 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 23 16:25:25 crc kubenswrapper[5050]: W1123 16:25:25.669574 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7e03c7f2_9820_4ee9_888a_0f83da39a907.slice/crio-8821bb8136194be45e482920c0d23d73a514925eec4e65e81551a9cc1185422d WatchSource:0}: Error finding container 8821bb8136194be45e482920c0d23d73a514925eec4e65e81551a9cc1185422d: Status 404 returned error can't find the container with id 8821bb8136194be45e482920c0d23d73a514925eec4e65e81551a9cc1185422d Nov 23 16:25:26 crc kubenswrapper[5050]: I1123 16:25:26.645430 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7e03c7f2-9820-4ee9-888a-0f83da39a907","Type":"ContainerStarted","Data":"8821bb8136194be45e482920c0d23d73a514925eec4e65e81551a9cc1185422d"} Nov 23 16:25:27 crc kubenswrapper[5050]: I1123 16:25:27.659807 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7e03c7f2-9820-4ee9-888a-0f83da39a907","Type":"ContainerStarted","Data":"3bf4dc0c80c1225c27f61741c11a77dea9e56c2f54c97205ed331bece7c9479e"} Nov 23 16:25:27 crc kubenswrapper[5050]: I1123 16:25:27.660718 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7e03c7f2-9820-4ee9-888a-0f83da39a907","Type":"ContainerStarted","Data":"183f93eecf7b7a18879f441f6a6844c071c241cb3c28e0948a4aa6874bd89d27"} Nov 23 16:25:27 crc kubenswrapper[5050]: I1123 16:25:27.663643 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"5e79d4b3-0fc0-4521-a8a2-f3416c3f4039","Type":"ContainerStarted","Data":"14494e7c5cf0fb814c899cddc43cfa6e66ad599bab69ca00326d7facf5704b41"} Nov 23 16:25:28 crc kubenswrapper[5050]: I1123 16:25:28.677059 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7e03c7f2-9820-4ee9-888a-0f83da39a907","Type":"ContainerStarted","Data":"0b865f85483f63792762c864df26ec7b0dde4d779255e7bdb3013351a922a7e4"} Nov 23 16:25:28 crc kubenswrapper[5050]: I1123 16:25:28.681866 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"5e79d4b3-0fc0-4521-a8a2-f3416c3f4039","Type":"ContainerStarted","Data":"1fbe90f073aad8ce4e5057831dee01a4f7afd4a7fb31969a20c629c46e64c7e1"} Nov 23 16:25:28 crc kubenswrapper[5050]: I1123 16:25:28.783139 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.735437888 podStartE2EDuration="7.783100661s" podCreationTimestamp="2025-11-23 16:25:21 +0000 UTC" firstStartedPulling="2025-11-23 16:25:22.922865739 +0000 UTC m=+6218.089862244" lastFinishedPulling="2025-11-23 16:25:27.970528532 +0000 UTC m=+6223.137525017" observedRunningTime="2025-11-23 16:25:28.707958701 +0000 UTC m=+6223.874955186" watchObservedRunningTime="2025-11-23 16:25:28.783100661 +0000 UTC m=+6223.950097146" Nov 23 16:25:29 crc kubenswrapper[5050]: I1123 16:25:29.223872 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:25:29 crc kubenswrapper[5050]: I1123 16:25:29.223923 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:25:30 crc kubenswrapper[5050]: I1123 16:25:30.707882 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7e03c7f2-9820-4ee9-888a-0f83da39a907","Type":"ContainerStarted","Data":"29b113b82fe51bb2f0ee6472a8d5fc6fbda628ae49972afce4d024951075fcca"} Nov 23 16:25:30 crc kubenswrapper[5050]: I1123 16:25:30.708751 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 23 16:25:30 crc kubenswrapper[5050]: I1123 16:25:30.740043 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.808018333 podStartE2EDuration="6.740018638s" podCreationTimestamp="2025-11-23 16:25:24 +0000 UTC" firstStartedPulling="2025-11-23 16:25:25.675281273 +0000 UTC m=+6220.842277758" lastFinishedPulling="2025-11-23 16:25:29.607281588 +0000 UTC m=+6224.774278063" observedRunningTime="2025-11-23 16:25:30.731916189 +0000 UTC m=+6225.898912684" watchObservedRunningTime="2025-11-23 16:25:30.740018638 +0000 UTC m=+6225.907015123" Nov 23 16:25:31 crc kubenswrapper[5050]: I1123 16:25:31.037057 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-nvm9x"] Nov 23 16:25:31 crc kubenswrapper[5050]: I1123 16:25:31.066015 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-nvm9x"] Nov 23 16:25:31 crc kubenswrapper[5050]: I1123 16:25:31.580365 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f35435e8-7b27-4b47-83e1-cb469f7bd7b0" path="/var/lib/kubelet/pods/f35435e8-7b27-4b47-83e1-cb469f7bd7b0/volumes" Nov 23 16:25:34 crc kubenswrapper[5050]: I1123 16:25:34.602713 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-create-67qdh"] Nov 23 16:25:34 crc kubenswrapper[5050]: I1123 16:25:34.605095 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-67qdh" Nov 23 16:25:34 crc kubenswrapper[5050]: I1123 16:25:34.620773 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-67qdh"] Nov 23 16:25:34 crc kubenswrapper[5050]: I1123 16:25:34.620948 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dbfbd4d0-c340-4fdc-ac31-152b141957eb-operator-scripts\") pod \"manila-db-create-67qdh\" (UID: \"dbfbd4d0-c340-4fdc-ac31-152b141957eb\") " pod="openstack/manila-db-create-67qdh" Nov 23 16:25:34 crc kubenswrapper[5050]: I1123 16:25:34.621308 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vx47m\" (UniqueName: \"kubernetes.io/projected/dbfbd4d0-c340-4fdc-ac31-152b141957eb-kube-api-access-vx47m\") pod \"manila-db-create-67qdh\" (UID: \"dbfbd4d0-c340-4fdc-ac31-152b141957eb\") " pod="openstack/manila-db-create-67qdh" Nov 23 16:25:34 crc kubenswrapper[5050]: I1123 16:25:34.704980 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-f8df-account-create-7w2hg"] Nov 23 16:25:34 crc kubenswrapper[5050]: I1123 16:25:34.706485 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-f8df-account-create-7w2hg" Nov 23 16:25:34 crc kubenswrapper[5050]: I1123 16:25:34.710419 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-db-secret" Nov 23 16:25:34 crc kubenswrapper[5050]: I1123 16:25:34.716765 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-f8df-account-create-7w2hg"] Nov 23 16:25:34 crc kubenswrapper[5050]: I1123 16:25:34.725621 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mpsqj\" (UniqueName: \"kubernetes.io/projected/fae4322c-6472-4a04-8bd6-b5703fd6dbd5-kube-api-access-mpsqj\") pod \"manila-f8df-account-create-7w2hg\" (UID: \"fae4322c-6472-4a04-8bd6-b5703fd6dbd5\") " pod="openstack/manila-f8df-account-create-7w2hg" Nov 23 16:25:34 crc kubenswrapper[5050]: I1123 16:25:34.725769 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dbfbd4d0-c340-4fdc-ac31-152b141957eb-operator-scripts\") pod \"manila-db-create-67qdh\" (UID: \"dbfbd4d0-c340-4fdc-ac31-152b141957eb\") " pod="openstack/manila-db-create-67qdh" Nov 23 16:25:34 crc kubenswrapper[5050]: I1123 16:25:34.726002 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fae4322c-6472-4a04-8bd6-b5703fd6dbd5-operator-scripts\") pod \"manila-f8df-account-create-7w2hg\" (UID: \"fae4322c-6472-4a04-8bd6-b5703fd6dbd5\") " pod="openstack/manila-f8df-account-create-7w2hg" Nov 23 16:25:34 crc kubenswrapper[5050]: I1123 16:25:34.726088 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vx47m\" (UniqueName: \"kubernetes.io/projected/dbfbd4d0-c340-4fdc-ac31-152b141957eb-kube-api-access-vx47m\") pod \"manila-db-create-67qdh\" (UID: \"dbfbd4d0-c340-4fdc-ac31-152b141957eb\") " pod="openstack/manila-db-create-67qdh" Nov 23 16:25:34 crc kubenswrapper[5050]: I1123 16:25:34.726837 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dbfbd4d0-c340-4fdc-ac31-152b141957eb-operator-scripts\") pod \"manila-db-create-67qdh\" (UID: \"dbfbd4d0-c340-4fdc-ac31-152b141957eb\") " pod="openstack/manila-db-create-67qdh" Nov 23 16:25:34 crc kubenswrapper[5050]: I1123 16:25:34.757557 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vx47m\" (UniqueName: \"kubernetes.io/projected/dbfbd4d0-c340-4fdc-ac31-152b141957eb-kube-api-access-vx47m\") pod \"manila-db-create-67qdh\" (UID: \"dbfbd4d0-c340-4fdc-ac31-152b141957eb\") " pod="openstack/manila-db-create-67qdh" Nov 23 16:25:34 crc kubenswrapper[5050]: I1123 16:25:34.829636 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mpsqj\" (UniqueName: \"kubernetes.io/projected/fae4322c-6472-4a04-8bd6-b5703fd6dbd5-kube-api-access-mpsqj\") pod \"manila-f8df-account-create-7w2hg\" (UID: \"fae4322c-6472-4a04-8bd6-b5703fd6dbd5\") " pod="openstack/manila-f8df-account-create-7w2hg" Nov 23 16:25:34 crc kubenswrapper[5050]: I1123 16:25:34.830099 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fae4322c-6472-4a04-8bd6-b5703fd6dbd5-operator-scripts\") pod \"manila-f8df-account-create-7w2hg\" (UID: \"fae4322c-6472-4a04-8bd6-b5703fd6dbd5\") " pod="openstack/manila-f8df-account-create-7w2hg" Nov 23 16:25:34 crc kubenswrapper[5050]: I1123 16:25:34.831015 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fae4322c-6472-4a04-8bd6-b5703fd6dbd5-operator-scripts\") pod \"manila-f8df-account-create-7w2hg\" (UID: \"fae4322c-6472-4a04-8bd6-b5703fd6dbd5\") " pod="openstack/manila-f8df-account-create-7w2hg" Nov 23 16:25:34 crc kubenswrapper[5050]: I1123 16:25:34.856059 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mpsqj\" (UniqueName: \"kubernetes.io/projected/fae4322c-6472-4a04-8bd6-b5703fd6dbd5-kube-api-access-mpsqj\") pod \"manila-f8df-account-create-7w2hg\" (UID: \"fae4322c-6472-4a04-8bd6-b5703fd6dbd5\") " pod="openstack/manila-f8df-account-create-7w2hg" Nov 23 16:25:34 crc kubenswrapper[5050]: I1123 16:25:34.924424 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-67qdh" Nov 23 16:25:35 crc kubenswrapper[5050]: I1123 16:25:35.024286 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-f8df-account-create-7w2hg" Nov 23 16:25:35 crc kubenswrapper[5050]: I1123 16:25:35.544487 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-67qdh"] Nov 23 16:25:35 crc kubenswrapper[5050]: I1123 16:25:35.616362 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-f8df-account-create-7w2hg"] Nov 23 16:25:35 crc kubenswrapper[5050]: I1123 16:25:35.866564 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-f8df-account-create-7w2hg" event={"ID":"fae4322c-6472-4a04-8bd6-b5703fd6dbd5","Type":"ContainerStarted","Data":"36fb8bd57ee3df3add68180b07efb8a6ae66c865d4d6464a1f40896b71cd6050"} Nov 23 16:25:35 crc kubenswrapper[5050]: I1123 16:25:35.869502 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-67qdh" event={"ID":"dbfbd4d0-c340-4fdc-ac31-152b141957eb","Type":"ContainerStarted","Data":"101e900c816f095f6a234fbc3d346d71c6c7b6d60a1efb9919ddba37f960bac4"} Nov 23 16:25:35 crc kubenswrapper[5050]: I1123 16:25:35.869546 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-67qdh" event={"ID":"dbfbd4d0-c340-4fdc-ac31-152b141957eb","Type":"ContainerStarted","Data":"a29a09088f654dad35cdef0acd4e066244513e2eaf2dc3d19826d685b7696c87"} Nov 23 16:25:35 crc kubenswrapper[5050]: I1123 16:25:35.889768 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-f8df-account-create-7w2hg" podStartSLOduration=1.88974696 podStartE2EDuration="1.88974696s" podCreationTimestamp="2025-11-23 16:25:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:25:35.88762826 +0000 UTC m=+6231.054624785" watchObservedRunningTime="2025-11-23 16:25:35.88974696 +0000 UTC m=+6231.056743445" Nov 23 16:25:35 crc kubenswrapper[5050]: I1123 16:25:35.914561 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-db-create-67qdh" podStartSLOduration=1.914541029 podStartE2EDuration="1.914541029s" podCreationTimestamp="2025-11-23 16:25:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:25:35.904868746 +0000 UTC m=+6231.071865251" watchObservedRunningTime="2025-11-23 16:25:35.914541029 +0000 UTC m=+6231.081537524" Nov 23 16:25:36 crc kubenswrapper[5050]: I1123 16:25:36.888177 5050 generic.go:334] "Generic (PLEG): container finished" podID="fae4322c-6472-4a04-8bd6-b5703fd6dbd5" containerID="a9b1bb09aa85270bce6ca938ebecc057d46e5ff528a30d43750354c836390916" exitCode=0 Nov 23 16:25:36 crc kubenswrapper[5050]: I1123 16:25:36.889200 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-f8df-account-create-7w2hg" event={"ID":"fae4322c-6472-4a04-8bd6-b5703fd6dbd5","Type":"ContainerDied","Data":"a9b1bb09aa85270bce6ca938ebecc057d46e5ff528a30d43750354c836390916"} Nov 23 16:25:36 crc kubenswrapper[5050]: I1123 16:25:36.893309 5050 generic.go:334] "Generic (PLEG): container finished" podID="dbfbd4d0-c340-4fdc-ac31-152b141957eb" containerID="101e900c816f095f6a234fbc3d346d71c6c7b6d60a1efb9919ddba37f960bac4" exitCode=0 Nov 23 16:25:36 crc kubenswrapper[5050]: I1123 16:25:36.893362 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-67qdh" event={"ID":"dbfbd4d0-c340-4fdc-ac31-152b141957eb","Type":"ContainerDied","Data":"101e900c816f095f6a234fbc3d346d71c6c7b6d60a1efb9919ddba37f960bac4"} Nov 23 16:25:38 crc kubenswrapper[5050]: I1123 16:25:38.519755 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-f8df-account-create-7w2hg" Nov 23 16:25:38 crc kubenswrapper[5050]: I1123 16:25:38.522360 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-67qdh" Nov 23 16:25:38 crc kubenswrapper[5050]: I1123 16:25:38.630259 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fae4322c-6472-4a04-8bd6-b5703fd6dbd5-operator-scripts\") pod \"fae4322c-6472-4a04-8bd6-b5703fd6dbd5\" (UID: \"fae4322c-6472-4a04-8bd6-b5703fd6dbd5\") " Nov 23 16:25:38 crc kubenswrapper[5050]: I1123 16:25:38.630539 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mpsqj\" (UniqueName: \"kubernetes.io/projected/fae4322c-6472-4a04-8bd6-b5703fd6dbd5-kube-api-access-mpsqj\") pod \"fae4322c-6472-4a04-8bd6-b5703fd6dbd5\" (UID: \"fae4322c-6472-4a04-8bd6-b5703fd6dbd5\") " Nov 23 16:25:38 crc kubenswrapper[5050]: I1123 16:25:38.630655 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dbfbd4d0-c340-4fdc-ac31-152b141957eb-operator-scripts\") pod \"dbfbd4d0-c340-4fdc-ac31-152b141957eb\" (UID: \"dbfbd4d0-c340-4fdc-ac31-152b141957eb\") " Nov 23 16:25:38 crc kubenswrapper[5050]: I1123 16:25:38.630869 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vx47m\" (UniqueName: \"kubernetes.io/projected/dbfbd4d0-c340-4fdc-ac31-152b141957eb-kube-api-access-vx47m\") pod \"dbfbd4d0-c340-4fdc-ac31-152b141957eb\" (UID: \"dbfbd4d0-c340-4fdc-ac31-152b141957eb\") " Nov 23 16:25:38 crc kubenswrapper[5050]: I1123 16:25:38.631833 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbfbd4d0-c340-4fdc-ac31-152b141957eb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "dbfbd4d0-c340-4fdc-ac31-152b141957eb" (UID: "dbfbd4d0-c340-4fdc-ac31-152b141957eb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:25:38 crc kubenswrapper[5050]: I1123 16:25:38.632592 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fae4322c-6472-4a04-8bd6-b5703fd6dbd5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "fae4322c-6472-4a04-8bd6-b5703fd6dbd5" (UID: "fae4322c-6472-4a04-8bd6-b5703fd6dbd5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:25:38 crc kubenswrapper[5050]: I1123 16:25:38.632914 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dbfbd4d0-c340-4fdc-ac31-152b141957eb-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:25:38 crc kubenswrapper[5050]: I1123 16:25:38.644658 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbfbd4d0-c340-4fdc-ac31-152b141957eb-kube-api-access-vx47m" (OuterVolumeSpecName: "kube-api-access-vx47m") pod "dbfbd4d0-c340-4fdc-ac31-152b141957eb" (UID: "dbfbd4d0-c340-4fdc-ac31-152b141957eb"). InnerVolumeSpecName "kube-api-access-vx47m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:25:38 crc kubenswrapper[5050]: I1123 16:25:38.644778 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fae4322c-6472-4a04-8bd6-b5703fd6dbd5-kube-api-access-mpsqj" (OuterVolumeSpecName: "kube-api-access-mpsqj") pod "fae4322c-6472-4a04-8bd6-b5703fd6dbd5" (UID: "fae4322c-6472-4a04-8bd6-b5703fd6dbd5"). InnerVolumeSpecName "kube-api-access-mpsqj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:25:38 crc kubenswrapper[5050]: I1123 16:25:38.736997 5050 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fae4322c-6472-4a04-8bd6-b5703fd6dbd5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:25:38 crc kubenswrapper[5050]: I1123 16:25:38.737332 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mpsqj\" (UniqueName: \"kubernetes.io/projected/fae4322c-6472-4a04-8bd6-b5703fd6dbd5-kube-api-access-mpsqj\") on node \"crc\" DevicePath \"\"" Nov 23 16:25:38 crc kubenswrapper[5050]: I1123 16:25:38.737394 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vx47m\" (UniqueName: \"kubernetes.io/projected/dbfbd4d0-c340-4fdc-ac31-152b141957eb-kube-api-access-vx47m\") on node \"crc\" DevicePath \"\"" Nov 23 16:25:38 crc kubenswrapper[5050]: I1123 16:25:38.953855 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-67qdh" event={"ID":"dbfbd4d0-c340-4fdc-ac31-152b141957eb","Type":"ContainerDied","Data":"a29a09088f654dad35cdef0acd4e066244513e2eaf2dc3d19826d685b7696c87"} Nov 23 16:25:38 crc kubenswrapper[5050]: I1123 16:25:38.953923 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a29a09088f654dad35cdef0acd4e066244513e2eaf2dc3d19826d685b7696c87" Nov 23 16:25:38 crc kubenswrapper[5050]: I1123 16:25:38.953898 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-67qdh" Nov 23 16:25:38 crc kubenswrapper[5050]: I1123 16:25:38.956974 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-f8df-account-create-7w2hg" event={"ID":"fae4322c-6472-4a04-8bd6-b5703fd6dbd5","Type":"ContainerDied","Data":"36fb8bd57ee3df3add68180b07efb8a6ae66c865d4d6464a1f40896b71cd6050"} Nov 23 16:25:38 crc kubenswrapper[5050]: I1123 16:25:38.957026 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="36fb8bd57ee3df3add68180b07efb8a6ae66c865d4d6464a1f40896b71cd6050" Nov 23 16:25:38 crc kubenswrapper[5050]: I1123 16:25:38.957104 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-f8df-account-create-7w2hg" Nov 23 16:25:40 crc kubenswrapper[5050]: I1123 16:25:40.039654 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-sync-jjgfp"] Nov 23 16:25:40 crc kubenswrapper[5050]: E1123 16:25:40.050432 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fae4322c-6472-4a04-8bd6-b5703fd6dbd5" containerName="mariadb-account-create" Nov 23 16:25:40 crc kubenswrapper[5050]: I1123 16:25:40.050515 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="fae4322c-6472-4a04-8bd6-b5703fd6dbd5" containerName="mariadb-account-create" Nov 23 16:25:40 crc kubenswrapper[5050]: E1123 16:25:40.050553 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbfbd4d0-c340-4fdc-ac31-152b141957eb" containerName="mariadb-database-create" Nov 23 16:25:40 crc kubenswrapper[5050]: I1123 16:25:40.050571 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbfbd4d0-c340-4fdc-ac31-152b141957eb" containerName="mariadb-database-create" Nov 23 16:25:40 crc kubenswrapper[5050]: I1123 16:25:40.051076 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbfbd4d0-c340-4fdc-ac31-152b141957eb" containerName="mariadb-database-create" Nov 23 16:25:40 crc kubenswrapper[5050]: I1123 16:25:40.051118 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="fae4322c-6472-4a04-8bd6-b5703fd6dbd5" containerName="mariadb-account-create" Nov 23 16:25:40 crc kubenswrapper[5050]: I1123 16:25:40.052736 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-jjgfp" Nov 23 16:25:40 crc kubenswrapper[5050]: I1123 16:25:40.059137 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-b6t67" Nov 23 16:25:40 crc kubenswrapper[5050]: I1123 16:25:40.059479 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Nov 23 16:25:40 crc kubenswrapper[5050]: I1123 16:25:40.071602 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/55f77b8e-9262-4c72-a63b-7a71959fa156-job-config-data\") pod \"manila-db-sync-jjgfp\" (UID: \"55f77b8e-9262-4c72-a63b-7a71959fa156\") " pod="openstack/manila-db-sync-jjgfp" Nov 23 16:25:40 crc kubenswrapper[5050]: I1123 16:25:40.071718 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55f77b8e-9262-4c72-a63b-7a71959fa156-config-data\") pod \"manila-db-sync-jjgfp\" (UID: \"55f77b8e-9262-4c72-a63b-7a71959fa156\") " pod="openstack/manila-db-sync-jjgfp" Nov 23 16:25:40 crc kubenswrapper[5050]: I1123 16:25:40.071747 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55f77b8e-9262-4c72-a63b-7a71959fa156-combined-ca-bundle\") pod \"manila-db-sync-jjgfp\" (UID: \"55f77b8e-9262-4c72-a63b-7a71959fa156\") " pod="openstack/manila-db-sync-jjgfp" Nov 23 16:25:40 crc kubenswrapper[5050]: I1123 16:25:40.071781 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2sq7j\" (UniqueName: \"kubernetes.io/projected/55f77b8e-9262-4c72-a63b-7a71959fa156-kube-api-access-2sq7j\") pod \"manila-db-sync-jjgfp\" (UID: \"55f77b8e-9262-4c72-a63b-7a71959fa156\") " pod="openstack/manila-db-sync-jjgfp" Nov 23 16:25:40 crc kubenswrapper[5050]: I1123 16:25:40.072000 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-jjgfp"] Nov 23 16:25:40 crc kubenswrapper[5050]: I1123 16:25:40.175014 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/55f77b8e-9262-4c72-a63b-7a71959fa156-job-config-data\") pod \"manila-db-sync-jjgfp\" (UID: \"55f77b8e-9262-4c72-a63b-7a71959fa156\") " pod="openstack/manila-db-sync-jjgfp" Nov 23 16:25:40 crc kubenswrapper[5050]: I1123 16:25:40.175190 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55f77b8e-9262-4c72-a63b-7a71959fa156-config-data\") pod \"manila-db-sync-jjgfp\" (UID: \"55f77b8e-9262-4c72-a63b-7a71959fa156\") " pod="openstack/manila-db-sync-jjgfp" Nov 23 16:25:40 crc kubenswrapper[5050]: I1123 16:25:40.175233 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55f77b8e-9262-4c72-a63b-7a71959fa156-combined-ca-bundle\") pod \"manila-db-sync-jjgfp\" (UID: \"55f77b8e-9262-4c72-a63b-7a71959fa156\") " pod="openstack/manila-db-sync-jjgfp" Nov 23 16:25:40 crc kubenswrapper[5050]: I1123 16:25:40.175283 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2sq7j\" (UniqueName: \"kubernetes.io/projected/55f77b8e-9262-4c72-a63b-7a71959fa156-kube-api-access-2sq7j\") pod \"manila-db-sync-jjgfp\" (UID: \"55f77b8e-9262-4c72-a63b-7a71959fa156\") " pod="openstack/manila-db-sync-jjgfp" Nov 23 16:25:40 crc kubenswrapper[5050]: I1123 16:25:40.180317 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/55f77b8e-9262-4c72-a63b-7a71959fa156-job-config-data\") pod \"manila-db-sync-jjgfp\" (UID: \"55f77b8e-9262-4c72-a63b-7a71959fa156\") " pod="openstack/manila-db-sync-jjgfp" Nov 23 16:25:40 crc kubenswrapper[5050]: I1123 16:25:40.181258 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55f77b8e-9262-4c72-a63b-7a71959fa156-config-data\") pod \"manila-db-sync-jjgfp\" (UID: \"55f77b8e-9262-4c72-a63b-7a71959fa156\") " pod="openstack/manila-db-sync-jjgfp" Nov 23 16:25:40 crc kubenswrapper[5050]: I1123 16:25:40.183579 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55f77b8e-9262-4c72-a63b-7a71959fa156-combined-ca-bundle\") pod \"manila-db-sync-jjgfp\" (UID: \"55f77b8e-9262-4c72-a63b-7a71959fa156\") " pod="openstack/manila-db-sync-jjgfp" Nov 23 16:25:40 crc kubenswrapper[5050]: I1123 16:25:40.198475 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2sq7j\" (UniqueName: \"kubernetes.io/projected/55f77b8e-9262-4c72-a63b-7a71959fa156-kube-api-access-2sq7j\") pod \"manila-db-sync-jjgfp\" (UID: \"55f77b8e-9262-4c72-a63b-7a71959fa156\") " pod="openstack/manila-db-sync-jjgfp" Nov 23 16:25:40 crc kubenswrapper[5050]: I1123 16:25:40.425793 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-jjgfp" Nov 23 16:25:41 crc kubenswrapper[5050]: W1123 16:25:41.134637 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod55f77b8e_9262_4c72_a63b_7a71959fa156.slice/crio-2bf0854c879b55d4b4c6e26c49b40d924d08c7e05f2d41c16c4a107baf6b8899 WatchSource:0}: Error finding container 2bf0854c879b55d4b4c6e26c49b40d924d08c7e05f2d41c16c4a107baf6b8899: Status 404 returned error can't find the container with id 2bf0854c879b55d4b4c6e26c49b40d924d08c7e05f2d41c16c4a107baf6b8899 Nov 23 16:25:41 crc kubenswrapper[5050]: I1123 16:25:41.134844 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-jjgfp"] Nov 23 16:25:41 crc kubenswrapper[5050]: I1123 16:25:41.995106 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-jjgfp" event={"ID":"55f77b8e-9262-4c72-a63b-7a71959fa156","Type":"ContainerStarted","Data":"2bf0854c879b55d4b4c6e26c49b40d924d08c7e05f2d41c16c4a107baf6b8899"} Nov 23 16:25:47 crc kubenswrapper[5050]: I1123 16:25:47.056278 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-jjgfp" event={"ID":"55f77b8e-9262-4c72-a63b-7a71959fa156","Type":"ContainerStarted","Data":"4f3b8970c5734a3ffb958357facf1c25e5b926a2c2faaa5589697f06c1fa3966"} Nov 23 16:25:47 crc kubenswrapper[5050]: I1123 16:25:47.090806 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-db-sync-jjgfp" podStartSLOduration=2.854418563 podStartE2EDuration="7.090781003s" podCreationTimestamp="2025-11-23 16:25:40 +0000 UTC" firstStartedPulling="2025-11-23 16:25:41.137811375 +0000 UTC m=+6236.304807870" lastFinishedPulling="2025-11-23 16:25:45.374173795 +0000 UTC m=+6240.541170310" observedRunningTime="2025-11-23 16:25:47.082199061 +0000 UTC m=+6242.249195546" watchObservedRunningTime="2025-11-23 16:25:47.090781003 +0000 UTC m=+6242.257777478" Nov 23 16:25:49 crc kubenswrapper[5050]: I1123 16:25:49.084142 5050 generic.go:334] "Generic (PLEG): container finished" podID="55f77b8e-9262-4c72-a63b-7a71959fa156" containerID="4f3b8970c5734a3ffb958357facf1c25e5b926a2c2faaa5589697f06c1fa3966" exitCode=0 Nov 23 16:25:49 crc kubenswrapper[5050]: I1123 16:25:49.084266 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-jjgfp" event={"ID":"55f77b8e-9262-4c72-a63b-7a71959fa156","Type":"ContainerDied","Data":"4f3b8970c5734a3ffb958357facf1c25e5b926a2c2faaa5589697f06c1fa3966"} Nov 23 16:25:50 crc kubenswrapper[5050]: I1123 16:25:50.678678 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-jjgfp" Nov 23 16:25:50 crc kubenswrapper[5050]: I1123 16:25:50.848153 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55f77b8e-9262-4c72-a63b-7a71959fa156-combined-ca-bundle\") pod \"55f77b8e-9262-4c72-a63b-7a71959fa156\" (UID: \"55f77b8e-9262-4c72-a63b-7a71959fa156\") " Nov 23 16:25:50 crc kubenswrapper[5050]: I1123 16:25:50.848226 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2sq7j\" (UniqueName: \"kubernetes.io/projected/55f77b8e-9262-4c72-a63b-7a71959fa156-kube-api-access-2sq7j\") pod \"55f77b8e-9262-4c72-a63b-7a71959fa156\" (UID: \"55f77b8e-9262-4c72-a63b-7a71959fa156\") " Nov 23 16:25:50 crc kubenswrapper[5050]: I1123 16:25:50.848561 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55f77b8e-9262-4c72-a63b-7a71959fa156-config-data\") pod \"55f77b8e-9262-4c72-a63b-7a71959fa156\" (UID: \"55f77b8e-9262-4c72-a63b-7a71959fa156\") " Nov 23 16:25:50 crc kubenswrapper[5050]: I1123 16:25:50.848665 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/55f77b8e-9262-4c72-a63b-7a71959fa156-job-config-data\") pod \"55f77b8e-9262-4c72-a63b-7a71959fa156\" (UID: \"55f77b8e-9262-4c72-a63b-7a71959fa156\") " Nov 23 16:25:50 crc kubenswrapper[5050]: I1123 16:25:50.856370 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55f77b8e-9262-4c72-a63b-7a71959fa156-job-config-data" (OuterVolumeSpecName: "job-config-data") pod "55f77b8e-9262-4c72-a63b-7a71959fa156" (UID: "55f77b8e-9262-4c72-a63b-7a71959fa156"). InnerVolumeSpecName "job-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:25:50 crc kubenswrapper[5050]: I1123 16:25:50.856633 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55f77b8e-9262-4c72-a63b-7a71959fa156-kube-api-access-2sq7j" (OuterVolumeSpecName: "kube-api-access-2sq7j") pod "55f77b8e-9262-4c72-a63b-7a71959fa156" (UID: "55f77b8e-9262-4c72-a63b-7a71959fa156"). InnerVolumeSpecName "kube-api-access-2sq7j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:25:50 crc kubenswrapper[5050]: I1123 16:25:50.875729 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55f77b8e-9262-4c72-a63b-7a71959fa156-config-data" (OuterVolumeSpecName: "config-data") pod "55f77b8e-9262-4c72-a63b-7a71959fa156" (UID: "55f77b8e-9262-4c72-a63b-7a71959fa156"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:25:50 crc kubenswrapper[5050]: I1123 16:25:50.908594 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55f77b8e-9262-4c72-a63b-7a71959fa156-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "55f77b8e-9262-4c72-a63b-7a71959fa156" (UID: "55f77b8e-9262-4c72-a63b-7a71959fa156"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:25:50 crc kubenswrapper[5050]: I1123 16:25:50.952470 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55f77b8e-9262-4c72-a63b-7a71959fa156-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:25:50 crc kubenswrapper[5050]: I1123 16:25:50.952518 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2sq7j\" (UniqueName: \"kubernetes.io/projected/55f77b8e-9262-4c72-a63b-7a71959fa156-kube-api-access-2sq7j\") on node \"crc\" DevicePath \"\"" Nov 23 16:25:50 crc kubenswrapper[5050]: I1123 16:25:50.952538 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55f77b8e-9262-4c72-a63b-7a71959fa156-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:25:50 crc kubenswrapper[5050]: I1123 16:25:50.952550 5050 reconciler_common.go:293] "Volume detached for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/55f77b8e-9262-4c72-a63b-7a71959fa156-job-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.108013 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-jjgfp" event={"ID":"55f77b8e-9262-4c72-a63b-7a71959fa156","Type":"ContainerDied","Data":"2bf0854c879b55d4b4c6e26c49b40d924d08c7e05f2d41c16c4a107baf6b8899"} Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.108333 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2bf0854c879b55d4b4c6e26c49b40d924d08c7e05f2d41c16c4a107baf6b8899" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.108176 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-jjgfp" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.637801 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-scheduler-0"] Nov 23 16:25:51 crc kubenswrapper[5050]: E1123 16:25:51.638293 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55f77b8e-9262-4c72-a63b-7a71959fa156" containerName="manila-db-sync" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.638313 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="55f77b8e-9262-4c72-a63b-7a71959fa156" containerName="manila-db-sync" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.638567 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="55f77b8e-9262-4c72-a63b-7a71959fa156" containerName="manila-db-sync" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.639742 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.644071 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scripts" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.644348 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.664816 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scheduler-config-data" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.665141 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-b6t67" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.680096 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-share-share1-0"] Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.682688 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.684022 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/42ae5f1a-5bf5-480e-b387-f45f211beb62-scripts\") pod \"manila-scheduler-0\" (UID: \"42ae5f1a-5bf5-480e-b387-f45f211beb62\") " pod="openstack/manila-scheduler-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.684101 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42ae5f1a-5bf5-480e-b387-f45f211beb62-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"42ae5f1a-5bf5-480e-b387-f45f211beb62\") " pod="openstack/manila-scheduler-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.684123 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/42ae5f1a-5bf5-480e-b387-f45f211beb62-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"42ae5f1a-5bf5-480e-b387-f45f211beb62\") " pod="openstack/manila-scheduler-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.684248 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/42ae5f1a-5bf5-480e-b387-f45f211beb62-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"42ae5f1a-5bf5-480e-b387-f45f211beb62\") " pod="openstack/manila-scheduler-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.684346 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42ae5f1a-5bf5-480e-b387-f45f211beb62-config-data\") pod \"manila-scheduler-0\" (UID: \"42ae5f1a-5bf5-480e-b387-f45f211beb62\") " pod="openstack/manila-scheduler-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.684386 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xl6jn\" (UniqueName: \"kubernetes.io/projected/42ae5f1a-5bf5-480e-b387-f45f211beb62-kube-api-access-xl6jn\") pod \"manila-scheduler-0\" (UID: \"42ae5f1a-5bf5-480e-b387-f45f211beb62\") " pod="openstack/manila-scheduler-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.687702 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-share-share1-config-data" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.699565 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.710594 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.787617 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a92636c-670e-44d8-ba06-c0ad4276da6d-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"0a92636c-670e-44d8-ba06-c0ad4276da6d\") " pod="openstack/manila-share-share1-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.794611 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/42ae5f1a-5bf5-480e-b387-f45f211beb62-scripts\") pod \"manila-scheduler-0\" (UID: \"42ae5f1a-5bf5-480e-b387-f45f211beb62\") " pod="openstack/manila-scheduler-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.794793 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42ae5f1a-5bf5-480e-b387-f45f211beb62-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"42ae5f1a-5bf5-480e-b387-f45f211beb62\") " pod="openstack/manila-scheduler-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.794813 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/42ae5f1a-5bf5-480e-b387-f45f211beb62-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"42ae5f1a-5bf5-480e-b387-f45f211beb62\") " pod="openstack/manila-scheduler-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.794844 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/0a92636c-670e-44d8-ba06-c0ad4276da6d-ceph\") pod \"manila-share-share1-0\" (UID: \"0a92636c-670e-44d8-ba06-c0ad4276da6d\") " pod="openstack/manila-share-share1-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.795095 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a92636c-670e-44d8-ba06-c0ad4276da6d-config-data\") pod \"manila-share-share1-0\" (UID: \"0a92636c-670e-44d8-ba06-c0ad4276da6d\") " pod="openstack/manila-share-share1-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.795180 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdj2j\" (UniqueName: \"kubernetes.io/projected/0a92636c-670e-44d8-ba06-c0ad4276da6d-kube-api-access-pdj2j\") pod \"manila-share-share1-0\" (UID: \"0a92636c-670e-44d8-ba06-c0ad4276da6d\") " pod="openstack/manila-share-share1-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.795249 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/0a92636c-670e-44d8-ba06-c0ad4276da6d-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"0a92636c-670e-44d8-ba06-c0ad4276da6d\") " pod="openstack/manila-share-share1-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.795276 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0a92636c-670e-44d8-ba06-c0ad4276da6d-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"0a92636c-670e-44d8-ba06-c0ad4276da6d\") " pod="openstack/manila-share-share1-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.795384 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/42ae5f1a-5bf5-480e-b387-f45f211beb62-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"42ae5f1a-5bf5-480e-b387-f45f211beb62\") " pod="openstack/manila-scheduler-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.795515 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0a92636c-670e-44d8-ba06-c0ad4276da6d-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"0a92636c-670e-44d8-ba06-c0ad4276da6d\") " pod="openstack/manila-share-share1-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.795626 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42ae5f1a-5bf5-480e-b387-f45f211beb62-config-data\") pod \"manila-scheduler-0\" (UID: \"42ae5f1a-5bf5-480e-b387-f45f211beb62\") " pod="openstack/manila-scheduler-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.795707 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xl6jn\" (UniqueName: \"kubernetes.io/projected/42ae5f1a-5bf5-480e-b387-f45f211beb62-kube-api-access-xl6jn\") pod \"manila-scheduler-0\" (UID: \"42ae5f1a-5bf5-480e-b387-f45f211beb62\") " pod="openstack/manila-scheduler-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.795830 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a92636c-670e-44d8-ba06-c0ad4276da6d-scripts\") pod \"manila-share-share1-0\" (UID: \"0a92636c-670e-44d8-ba06-c0ad4276da6d\") " pod="openstack/manila-share-share1-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.797158 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/42ae5f1a-5bf5-480e-b387-f45f211beb62-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"42ae5f1a-5bf5-480e-b387-f45f211beb62\") " pod="openstack/manila-scheduler-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.798042 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-84fbdc6f57-fwcg7"] Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.800580 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.820930 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/42ae5f1a-5bf5-480e-b387-f45f211beb62-scripts\") pod \"manila-scheduler-0\" (UID: \"42ae5f1a-5bf5-480e-b387-f45f211beb62\") " pod="openstack/manila-scheduler-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.827164 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42ae5f1a-5bf5-480e-b387-f45f211beb62-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"42ae5f1a-5bf5-480e-b387-f45f211beb62\") " pod="openstack/manila-scheduler-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.827889 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42ae5f1a-5bf5-480e-b387-f45f211beb62-config-data\") pod \"manila-scheduler-0\" (UID: \"42ae5f1a-5bf5-480e-b387-f45f211beb62\") " pod="openstack/manila-scheduler-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.835535 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84fbdc6f57-fwcg7"] Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.836970 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xl6jn\" (UniqueName: \"kubernetes.io/projected/42ae5f1a-5bf5-480e-b387-f45f211beb62-kube-api-access-xl6jn\") pod \"manila-scheduler-0\" (UID: \"42ae5f1a-5bf5-480e-b387-f45f211beb62\") " pod="openstack/manila-scheduler-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.846481 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/42ae5f1a-5bf5-480e-b387-f45f211beb62-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"42ae5f1a-5bf5-480e-b387-f45f211beb62\") " pod="openstack/manila-scheduler-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.899029 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdj2j\" (UniqueName: \"kubernetes.io/projected/0a92636c-670e-44d8-ba06-c0ad4276da6d-kube-api-access-pdj2j\") pod \"manila-share-share1-0\" (UID: \"0a92636c-670e-44d8-ba06-c0ad4276da6d\") " pod="openstack/manila-share-share1-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.899097 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/0a92636c-670e-44d8-ba06-c0ad4276da6d-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"0a92636c-670e-44d8-ba06-c0ad4276da6d\") " pod="openstack/manila-share-share1-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.899127 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0a92636c-670e-44d8-ba06-c0ad4276da6d-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"0a92636c-670e-44d8-ba06-c0ad4276da6d\") " pod="openstack/manila-share-share1-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.899153 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e4be80a9-30d1-47ad-9dff-6aa826295fdc-dns-svc\") pod \"dnsmasq-dns-84fbdc6f57-fwcg7\" (UID: \"e4be80a9-30d1-47ad-9dff-6aa826295fdc\") " pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.899209 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0a92636c-670e-44d8-ba06-c0ad4276da6d-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"0a92636c-670e-44d8-ba06-c0ad4276da6d\") " pod="openstack/manila-share-share1-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.899247 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e4be80a9-30d1-47ad-9dff-6aa826295fdc-ovsdbserver-sb\") pod \"dnsmasq-dns-84fbdc6f57-fwcg7\" (UID: \"e4be80a9-30d1-47ad-9dff-6aa826295fdc\") " pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.899282 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e4be80a9-30d1-47ad-9dff-6aa826295fdc-ovsdbserver-nb\") pod \"dnsmasq-dns-84fbdc6f57-fwcg7\" (UID: \"e4be80a9-30d1-47ad-9dff-6aa826295fdc\") " pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.899322 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a92636c-670e-44d8-ba06-c0ad4276da6d-scripts\") pod \"manila-share-share1-0\" (UID: \"0a92636c-670e-44d8-ba06-c0ad4276da6d\") " pod="openstack/manila-share-share1-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.899348 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a92636c-670e-44d8-ba06-c0ad4276da6d-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"0a92636c-670e-44d8-ba06-c0ad4276da6d\") " pod="openstack/manila-share-share1-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.899379 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/0a92636c-670e-44d8-ba06-c0ad4276da6d-ceph\") pod \"manila-share-share1-0\" (UID: \"0a92636c-670e-44d8-ba06-c0ad4276da6d\") " pod="openstack/manila-share-share1-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.899405 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ckxh5\" (UniqueName: \"kubernetes.io/projected/e4be80a9-30d1-47ad-9dff-6aa826295fdc-kube-api-access-ckxh5\") pod \"dnsmasq-dns-84fbdc6f57-fwcg7\" (UID: \"e4be80a9-30d1-47ad-9dff-6aa826295fdc\") " pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.899429 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4be80a9-30d1-47ad-9dff-6aa826295fdc-config\") pod \"dnsmasq-dns-84fbdc6f57-fwcg7\" (UID: \"e4be80a9-30d1-47ad-9dff-6aa826295fdc\") " pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.899494 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a92636c-670e-44d8-ba06-c0ad4276da6d-config-data\") pod \"manila-share-share1-0\" (UID: \"0a92636c-670e-44d8-ba06-c0ad4276da6d\") " pod="openstack/manila-share-share1-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.901351 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/0a92636c-670e-44d8-ba06-c0ad4276da6d-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"0a92636c-670e-44d8-ba06-c0ad4276da6d\") " pod="openstack/manila-share-share1-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.901439 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0a92636c-670e-44d8-ba06-c0ad4276da6d-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"0a92636c-670e-44d8-ba06-c0ad4276da6d\") " pod="openstack/manila-share-share1-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.906858 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a92636c-670e-44d8-ba06-c0ad4276da6d-scripts\") pod \"manila-share-share1-0\" (UID: \"0a92636c-670e-44d8-ba06-c0ad4276da6d\") " pod="openstack/manila-share-share1-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.911558 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/0a92636c-670e-44d8-ba06-c0ad4276da6d-ceph\") pod \"manila-share-share1-0\" (UID: \"0a92636c-670e-44d8-ba06-c0ad4276da6d\") " pod="openstack/manila-share-share1-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.912498 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0a92636c-670e-44d8-ba06-c0ad4276da6d-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"0a92636c-670e-44d8-ba06-c0ad4276da6d\") " pod="openstack/manila-share-share1-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.919160 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a92636c-670e-44d8-ba06-c0ad4276da6d-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"0a92636c-670e-44d8-ba06-c0ad4276da6d\") " pod="openstack/manila-share-share1-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.923883 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a92636c-670e-44d8-ba06-c0ad4276da6d-config-data\") pod \"manila-share-share1-0\" (UID: \"0a92636c-670e-44d8-ba06-c0ad4276da6d\") " pod="openstack/manila-share-share1-0" Nov 23 16:25:51 crc kubenswrapper[5050]: I1123 16:25:51.931122 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdj2j\" (UniqueName: \"kubernetes.io/projected/0a92636c-670e-44d8-ba06-c0ad4276da6d-kube-api-access-pdj2j\") pod \"manila-share-share1-0\" (UID: \"0a92636c-670e-44d8-ba06-c0ad4276da6d\") " pod="openstack/manila-share-share1-0" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.002183 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e4be80a9-30d1-47ad-9dff-6aa826295fdc-ovsdbserver-sb\") pod \"dnsmasq-dns-84fbdc6f57-fwcg7\" (UID: \"e4be80a9-30d1-47ad-9dff-6aa826295fdc\") " pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.003075 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e4be80a9-30d1-47ad-9dff-6aa826295fdc-ovsdbserver-nb\") pod \"dnsmasq-dns-84fbdc6f57-fwcg7\" (UID: \"e4be80a9-30d1-47ad-9dff-6aa826295fdc\") " pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.003213 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckxh5\" (UniqueName: \"kubernetes.io/projected/e4be80a9-30d1-47ad-9dff-6aa826295fdc-kube-api-access-ckxh5\") pod \"dnsmasq-dns-84fbdc6f57-fwcg7\" (UID: \"e4be80a9-30d1-47ad-9dff-6aa826295fdc\") " pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.003292 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4be80a9-30d1-47ad-9dff-6aa826295fdc-config\") pod \"dnsmasq-dns-84fbdc6f57-fwcg7\" (UID: \"e4be80a9-30d1-47ad-9dff-6aa826295fdc\") " pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.003492 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e4be80a9-30d1-47ad-9dff-6aa826295fdc-dns-svc\") pod \"dnsmasq-dns-84fbdc6f57-fwcg7\" (UID: \"e4be80a9-30d1-47ad-9dff-6aa826295fdc\") " pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.011177 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.013556 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e4be80a9-30d1-47ad-9dff-6aa826295fdc-ovsdbserver-sb\") pod \"dnsmasq-dns-84fbdc6f57-fwcg7\" (UID: \"e4be80a9-30d1-47ad-9dff-6aa826295fdc\") " pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.018138 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e4be80a9-30d1-47ad-9dff-6aa826295fdc-dns-svc\") pod \"dnsmasq-dns-84fbdc6f57-fwcg7\" (UID: \"e4be80a9-30d1-47ad-9dff-6aa826295fdc\") " pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.019041 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e4be80a9-30d1-47ad-9dff-6aa826295fdc-ovsdbserver-nb\") pod \"dnsmasq-dns-84fbdc6f57-fwcg7\" (UID: \"e4be80a9-30d1-47ad-9dff-6aa826295fdc\") " pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.021795 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4be80a9-30d1-47ad-9dff-6aa826295fdc-config\") pod \"dnsmasq-dns-84fbdc6f57-fwcg7\" (UID: \"e4be80a9-30d1-47ad-9dff-6aa826295fdc\") " pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.025115 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ckxh5\" (UniqueName: \"kubernetes.io/projected/e4be80a9-30d1-47ad-9dff-6aa826295fdc-kube-api-access-ckxh5\") pod \"dnsmasq-dns-84fbdc6f57-fwcg7\" (UID: \"e4be80a9-30d1-47ad-9dff-6aa826295fdc\") " pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.025379 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-api-0"] Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.027692 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.032294 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-api-config-data" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.034288 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.056085 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.108307 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6068f40-9934-40eb-b77b-c497f58e6667-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"b6068f40-9934-40eb-b77b-c497f58e6667\") " pod="openstack/manila-api-0" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.108900 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b6068f40-9934-40eb-b77b-c497f58e6667-logs\") pod \"manila-api-0\" (UID: \"b6068f40-9934-40eb-b77b-c497f58e6667\") " pod="openstack/manila-api-0" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.108944 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6068f40-9934-40eb-b77b-c497f58e6667-config-data\") pod \"manila-api-0\" (UID: \"b6068f40-9934-40eb-b77b-c497f58e6667\") " pod="openstack/manila-api-0" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.109095 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b6068f40-9934-40eb-b77b-c497f58e6667-etc-machine-id\") pod \"manila-api-0\" (UID: \"b6068f40-9934-40eb-b77b-c497f58e6667\") " pod="openstack/manila-api-0" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.109134 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6068f40-9934-40eb-b77b-c497f58e6667-scripts\") pod \"manila-api-0\" (UID: \"b6068f40-9934-40eb-b77b-c497f58e6667\") " pod="openstack/manila-api-0" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.109315 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nsqb7\" (UniqueName: \"kubernetes.io/projected/b6068f40-9934-40eb-b77b-c497f58e6667-kube-api-access-nsqb7\") pod \"manila-api-0\" (UID: \"b6068f40-9934-40eb-b77b-c497f58e6667\") " pod="openstack/manila-api-0" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.109438 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b6068f40-9934-40eb-b77b-c497f58e6667-config-data-custom\") pod \"manila-api-0\" (UID: \"b6068f40-9934-40eb-b77b-c497f58e6667\") " pod="openstack/manila-api-0" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.211436 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6068f40-9934-40eb-b77b-c497f58e6667-config-data\") pod \"manila-api-0\" (UID: \"b6068f40-9934-40eb-b77b-c497f58e6667\") " pod="openstack/manila-api-0" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.211909 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b6068f40-9934-40eb-b77b-c497f58e6667-logs\") pod \"manila-api-0\" (UID: \"b6068f40-9934-40eb-b77b-c497f58e6667\") " pod="openstack/manila-api-0" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.211957 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b6068f40-9934-40eb-b77b-c497f58e6667-etc-machine-id\") pod \"manila-api-0\" (UID: \"b6068f40-9934-40eb-b77b-c497f58e6667\") " pod="openstack/manila-api-0" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.211979 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6068f40-9934-40eb-b77b-c497f58e6667-scripts\") pod \"manila-api-0\" (UID: \"b6068f40-9934-40eb-b77b-c497f58e6667\") " pod="openstack/manila-api-0" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.212031 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nsqb7\" (UniqueName: \"kubernetes.io/projected/b6068f40-9934-40eb-b77b-c497f58e6667-kube-api-access-nsqb7\") pod \"manila-api-0\" (UID: \"b6068f40-9934-40eb-b77b-c497f58e6667\") " pod="openstack/manila-api-0" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.212063 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b6068f40-9934-40eb-b77b-c497f58e6667-config-data-custom\") pod \"manila-api-0\" (UID: \"b6068f40-9934-40eb-b77b-c497f58e6667\") " pod="openstack/manila-api-0" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.212149 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6068f40-9934-40eb-b77b-c497f58e6667-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"b6068f40-9934-40eb-b77b-c497f58e6667\") " pod="openstack/manila-api-0" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.213380 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b6068f40-9934-40eb-b77b-c497f58e6667-logs\") pod \"manila-api-0\" (UID: \"b6068f40-9934-40eb-b77b-c497f58e6667\") " pod="openstack/manila-api-0" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.214157 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b6068f40-9934-40eb-b77b-c497f58e6667-etc-machine-id\") pod \"manila-api-0\" (UID: \"b6068f40-9934-40eb-b77b-c497f58e6667\") " pod="openstack/manila-api-0" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.221205 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b6068f40-9934-40eb-b77b-c497f58e6667-config-data-custom\") pod \"manila-api-0\" (UID: \"b6068f40-9934-40eb-b77b-c497f58e6667\") " pod="openstack/manila-api-0" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.224764 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6068f40-9934-40eb-b77b-c497f58e6667-scripts\") pod \"manila-api-0\" (UID: \"b6068f40-9934-40eb-b77b-c497f58e6667\") " pod="openstack/manila-api-0" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.226064 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6068f40-9934-40eb-b77b-c497f58e6667-config-data\") pod \"manila-api-0\" (UID: \"b6068f40-9934-40eb-b77b-c497f58e6667\") " pod="openstack/manila-api-0" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.229513 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6068f40-9934-40eb-b77b-c497f58e6667-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"b6068f40-9934-40eb-b77b-c497f58e6667\") " pod="openstack/manila-api-0" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.231777 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nsqb7\" (UniqueName: \"kubernetes.io/projected/b6068f40-9934-40eb-b77b-c497f58e6667-kube-api-access-nsqb7\") pod \"manila-api-0\" (UID: \"b6068f40-9934-40eb-b77b-c497f58e6667\") " pod="openstack/manila-api-0" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.314568 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.403967 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.623126 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 23 16:25:52 crc kubenswrapper[5050]: W1123 16:25:52.853769 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0a92636c_670e_44d8_ba06_c0ad4276da6d.slice/crio-0fd96e50bdeeb43728d45af5b119ece0fcbb8615bed2e95bb413d7648a598759 WatchSource:0}: Error finding container 0fd96e50bdeeb43728d45af5b119ece0fcbb8615bed2e95bb413d7648a598759: Status 404 returned error can't find the container with id 0fd96e50bdeeb43728d45af5b119ece0fcbb8615bed2e95bb413d7648a598759 Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.856597 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 23 16:25:52 crc kubenswrapper[5050]: I1123 16:25:52.960586 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84fbdc6f57-fwcg7"] Nov 23 16:25:53 crc kubenswrapper[5050]: I1123 16:25:53.138518 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"0a92636c-670e-44d8-ba06-c0ad4276da6d","Type":"ContainerStarted","Data":"0fd96e50bdeeb43728d45af5b119ece0fcbb8615bed2e95bb413d7648a598759"} Nov 23 16:25:53 crc kubenswrapper[5050]: I1123 16:25:53.142037 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" event={"ID":"e4be80a9-30d1-47ad-9dff-6aa826295fdc","Type":"ContainerStarted","Data":"194a74c832e7a84a20e8bc074aee85f2fb48731e10b114314d0ea53bba3356a8"} Nov 23 16:25:53 crc kubenswrapper[5050]: I1123 16:25:53.147283 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"42ae5f1a-5bf5-480e-b387-f45f211beb62","Type":"ContainerStarted","Data":"c9392b83a94e13042e7be9dc8cb16986a3953baebe8945aaf7b7d4726a07a89b"} Nov 23 16:25:53 crc kubenswrapper[5050]: I1123 16:25:53.226609 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 23 16:25:54 crc kubenswrapper[5050]: I1123 16:25:54.176581 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"42ae5f1a-5bf5-480e-b387-f45f211beb62","Type":"ContainerStarted","Data":"a99126532f56320e98b86b430483229b017716dcb6c97ca8ff802381e0b95c70"} Nov 23 16:25:54 crc kubenswrapper[5050]: I1123 16:25:54.182041 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"b6068f40-9934-40eb-b77b-c497f58e6667","Type":"ContainerStarted","Data":"959792c347cab75f2719b9883fa1fc4340a1d1435927ac49d616baa9f760a37c"} Nov 23 16:25:54 crc kubenswrapper[5050]: I1123 16:25:54.182128 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"b6068f40-9934-40eb-b77b-c497f58e6667","Type":"ContainerStarted","Data":"33eeea22139c5270a3abbe5d945e66ec8da5a4cb83c37ea305d260b55be6299f"} Nov 23 16:25:54 crc kubenswrapper[5050]: I1123 16:25:54.190842 5050 generic.go:334] "Generic (PLEG): container finished" podID="e4be80a9-30d1-47ad-9dff-6aa826295fdc" containerID="406e2bb5ddb5b42f86cfe26b66a31f0a4fa605fa008ae956539b492e7265db6f" exitCode=0 Nov 23 16:25:54 crc kubenswrapper[5050]: I1123 16:25:54.190898 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" event={"ID":"e4be80a9-30d1-47ad-9dff-6aa826295fdc","Type":"ContainerDied","Data":"406e2bb5ddb5b42f86cfe26b66a31f0a4fa605fa008ae956539b492e7265db6f"} Nov 23 16:25:55 crc kubenswrapper[5050]: I1123 16:25:55.026988 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 23 16:25:55 crc kubenswrapper[5050]: I1123 16:25:55.206122 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"42ae5f1a-5bf5-480e-b387-f45f211beb62","Type":"ContainerStarted","Data":"976a59bacdffcfc2a321a5aef7d9dd44898883587334a83d6b164ede292a1e7c"} Nov 23 16:25:55 crc kubenswrapper[5050]: I1123 16:25:55.214531 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"b6068f40-9934-40eb-b77b-c497f58e6667","Type":"ContainerStarted","Data":"28551ee7f39ab868f71cce911a5ff448ebd531695d786816e477fb673ce39581"} Nov 23 16:25:55 crc kubenswrapper[5050]: I1123 16:25:55.216427 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 23 16:25:55 crc kubenswrapper[5050]: I1123 16:25:55.222693 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" event={"ID":"e4be80a9-30d1-47ad-9dff-6aa826295fdc","Type":"ContainerStarted","Data":"f42ada5c5ac1f32abf97d461a1bade5e4dd703559b214fd3ecfbd1dd589ddee3"} Nov 23 16:25:55 crc kubenswrapper[5050]: I1123 16:25:55.224070 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" Nov 23 16:25:55 crc kubenswrapper[5050]: I1123 16:25:55.239891 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-scheduler-0" podStartSLOduration=3.5516907140000002 podStartE2EDuration="4.239857593s" podCreationTimestamp="2025-11-23 16:25:51 +0000 UTC" firstStartedPulling="2025-11-23 16:25:52.626346428 +0000 UTC m=+6247.793342903" lastFinishedPulling="2025-11-23 16:25:53.314513297 +0000 UTC m=+6248.481509782" observedRunningTime="2025-11-23 16:25:55.230665464 +0000 UTC m=+6250.397661959" watchObservedRunningTime="2025-11-23 16:25:55.239857593 +0000 UTC m=+6250.406854078" Nov 23 16:25:55 crc kubenswrapper[5050]: I1123 16:25:55.277979 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" podStartSLOduration=4.277953148 podStartE2EDuration="4.277953148s" podCreationTimestamp="2025-11-23 16:25:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:25:55.268918423 +0000 UTC m=+6250.435914898" watchObservedRunningTime="2025-11-23 16:25:55.277953148 +0000 UTC m=+6250.444949633" Nov 23 16:25:55 crc kubenswrapper[5050]: I1123 16:25:55.298704 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-api-0" podStartSLOduration=4.298681473 podStartE2EDuration="4.298681473s" podCreationTimestamp="2025-11-23 16:25:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:25:55.286219481 +0000 UTC m=+6250.453215966" watchObservedRunningTime="2025-11-23 16:25:55.298681473 +0000 UTC m=+6250.465677958" Nov 23 16:25:56 crc kubenswrapper[5050]: I1123 16:25:56.747509 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 23 16:25:56 crc kubenswrapper[5050]: I1123 16:25:56.749064 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7e03c7f2-9820-4ee9-888a-0f83da39a907" containerName="ceilometer-central-agent" containerID="cri-o://183f93eecf7b7a18879f441f6a6844c071c241cb3c28e0948a4aa6874bd89d27" gracePeriod=30 Nov 23 16:25:56 crc kubenswrapper[5050]: I1123 16:25:56.749109 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7e03c7f2-9820-4ee9-888a-0f83da39a907" containerName="proxy-httpd" containerID="cri-o://29b113b82fe51bb2f0ee6472a8d5fc6fbda628ae49972afce4d024951075fcca" gracePeriod=30 Nov 23 16:25:56 crc kubenswrapper[5050]: I1123 16:25:56.749140 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7e03c7f2-9820-4ee9-888a-0f83da39a907" containerName="sg-core" containerID="cri-o://0b865f85483f63792762c864df26ec7b0dde4d779255e7bdb3013351a922a7e4" gracePeriod=30 Nov 23 16:25:56 crc kubenswrapper[5050]: I1123 16:25:56.749151 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7e03c7f2-9820-4ee9-888a-0f83da39a907" containerName="ceilometer-notification-agent" containerID="cri-o://3bf4dc0c80c1225c27f61741c11a77dea9e56c2f54c97205ed331bece7c9479e" gracePeriod=30 Nov 23 16:25:57 crc kubenswrapper[5050]: I1123 16:25:57.259502 5050 generic.go:334] "Generic (PLEG): container finished" podID="7e03c7f2-9820-4ee9-888a-0f83da39a907" containerID="29b113b82fe51bb2f0ee6472a8d5fc6fbda628ae49972afce4d024951075fcca" exitCode=0 Nov 23 16:25:57 crc kubenswrapper[5050]: I1123 16:25:57.260009 5050 generic.go:334] "Generic (PLEG): container finished" podID="7e03c7f2-9820-4ee9-888a-0f83da39a907" containerID="0b865f85483f63792762c864df26ec7b0dde4d779255e7bdb3013351a922a7e4" exitCode=2 Nov 23 16:25:57 crc kubenswrapper[5050]: I1123 16:25:57.260028 5050 generic.go:334] "Generic (PLEG): container finished" podID="7e03c7f2-9820-4ee9-888a-0f83da39a907" containerID="183f93eecf7b7a18879f441f6a6844c071c241cb3c28e0948a4aa6874bd89d27" exitCode=0 Nov 23 16:25:57 crc kubenswrapper[5050]: I1123 16:25:57.259580 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7e03c7f2-9820-4ee9-888a-0f83da39a907","Type":"ContainerDied","Data":"29b113b82fe51bb2f0ee6472a8d5fc6fbda628ae49972afce4d024951075fcca"} Nov 23 16:25:57 crc kubenswrapper[5050]: I1123 16:25:57.260131 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7e03c7f2-9820-4ee9-888a-0f83da39a907","Type":"ContainerDied","Data":"0b865f85483f63792762c864df26ec7b0dde4d779255e7bdb3013351a922a7e4"} Nov 23 16:25:57 crc kubenswrapper[5050]: I1123 16:25:57.260155 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7e03c7f2-9820-4ee9-888a-0f83da39a907","Type":"ContainerDied","Data":"183f93eecf7b7a18879f441f6a6844c071c241cb3c28e0948a4aa6874bd89d27"} Nov 23 16:25:59 crc kubenswrapper[5050]: I1123 16:25:59.224314 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:25:59 crc kubenswrapper[5050]: I1123 16:25:59.224386 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:25:59 crc kubenswrapper[5050]: I1123 16:25:59.285689 5050 generic.go:334] "Generic (PLEG): container finished" podID="7e03c7f2-9820-4ee9-888a-0f83da39a907" containerID="3bf4dc0c80c1225c27f61741c11a77dea9e56c2f54c97205ed331bece7c9479e" exitCode=0 Nov 23 16:25:59 crc kubenswrapper[5050]: I1123 16:25:59.285801 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7e03c7f2-9820-4ee9-888a-0f83da39a907","Type":"ContainerDied","Data":"3bf4dc0c80c1225c27f61741c11a77dea9e56c2f54c97205ed331bece7c9479e"} Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.125827 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.223034 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e03c7f2-9820-4ee9-888a-0f83da39a907-config-data\") pod \"7e03c7f2-9820-4ee9-888a-0f83da39a907\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.224052 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7e03c7f2-9820-4ee9-888a-0f83da39a907-sg-core-conf-yaml\") pod \"7e03c7f2-9820-4ee9-888a-0f83da39a907\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.224102 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7e03c7f2-9820-4ee9-888a-0f83da39a907-run-httpd\") pod \"7e03c7f2-9820-4ee9-888a-0f83da39a907\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.224165 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tw56z\" (UniqueName: \"kubernetes.io/projected/7e03c7f2-9820-4ee9-888a-0f83da39a907-kube-api-access-tw56z\") pod \"7e03c7f2-9820-4ee9-888a-0f83da39a907\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.224191 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7e03c7f2-9820-4ee9-888a-0f83da39a907-scripts\") pod \"7e03c7f2-9820-4ee9-888a-0f83da39a907\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.224209 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7e03c7f2-9820-4ee9-888a-0f83da39a907-log-httpd\") pod \"7e03c7f2-9820-4ee9-888a-0f83da39a907\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.224254 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e03c7f2-9820-4ee9-888a-0f83da39a907-combined-ca-bundle\") pod \"7e03c7f2-9820-4ee9-888a-0f83da39a907\" (UID: \"7e03c7f2-9820-4ee9-888a-0f83da39a907\") " Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.225328 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e03c7f2-9820-4ee9-888a-0f83da39a907-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7e03c7f2-9820-4ee9-888a-0f83da39a907" (UID: "7e03c7f2-9820-4ee9-888a-0f83da39a907"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.226217 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e03c7f2-9820-4ee9-888a-0f83da39a907-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7e03c7f2-9820-4ee9-888a-0f83da39a907" (UID: "7e03c7f2-9820-4ee9-888a-0f83da39a907"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.230568 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e03c7f2-9820-4ee9-888a-0f83da39a907-kube-api-access-tw56z" (OuterVolumeSpecName: "kube-api-access-tw56z") pod "7e03c7f2-9820-4ee9-888a-0f83da39a907" (UID: "7e03c7f2-9820-4ee9-888a-0f83da39a907"). InnerVolumeSpecName "kube-api-access-tw56z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.232220 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e03c7f2-9820-4ee9-888a-0f83da39a907-scripts" (OuterVolumeSpecName: "scripts") pod "7e03c7f2-9820-4ee9-888a-0f83da39a907" (UID: "7e03c7f2-9820-4ee9-888a-0f83da39a907"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.270937 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e03c7f2-9820-4ee9-888a-0f83da39a907-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7e03c7f2-9820-4ee9-888a-0f83da39a907" (UID: "7e03c7f2-9820-4ee9-888a-0f83da39a907"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.303296 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7e03c7f2-9820-4ee9-888a-0f83da39a907","Type":"ContainerDied","Data":"8821bb8136194be45e482920c0d23d73a514925eec4e65e81551a9cc1185422d"} Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.303390 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.303394 5050 scope.go:117] "RemoveContainer" containerID="29b113b82fe51bb2f0ee6472a8d5fc6fbda628ae49972afce4d024951075fcca" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.328387 5050 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7e03c7f2-9820-4ee9-888a-0f83da39a907-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.328431 5050 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7e03c7f2-9820-4ee9-888a-0f83da39a907-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.328913 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tw56z\" (UniqueName: \"kubernetes.io/projected/7e03c7f2-9820-4ee9-888a-0f83da39a907-kube-api-access-tw56z\") on node \"crc\" DevicePath \"\"" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.328941 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7e03c7f2-9820-4ee9-888a-0f83da39a907-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.328955 5050 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7e03c7f2-9820-4ee9-888a-0f83da39a907-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.337302 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e03c7f2-9820-4ee9-888a-0f83da39a907-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7e03c7f2-9820-4ee9-888a-0f83da39a907" (UID: "7e03c7f2-9820-4ee9-888a-0f83da39a907"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.345137 5050 scope.go:117] "RemoveContainer" containerID="0b865f85483f63792762c864df26ec7b0dde4d779255e7bdb3013351a922a7e4" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.373804 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e03c7f2-9820-4ee9-888a-0f83da39a907-config-data" (OuterVolumeSpecName: "config-data") pod "7e03c7f2-9820-4ee9-888a-0f83da39a907" (UID: "7e03c7f2-9820-4ee9-888a-0f83da39a907"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.376580 5050 scope.go:117] "RemoveContainer" containerID="3bf4dc0c80c1225c27f61741c11a77dea9e56c2f54c97205ed331bece7c9479e" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.427603 5050 scope.go:117] "RemoveContainer" containerID="183f93eecf7b7a18879f441f6a6844c071c241cb3c28e0948a4aa6874bd89d27" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.431389 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e03c7f2-9820-4ee9-888a-0f83da39a907-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.431431 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e03c7f2-9820-4ee9-888a-0f83da39a907-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.683628 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.711265 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.724870 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 23 16:26:00 crc kubenswrapper[5050]: E1123 16:26:00.725633 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e03c7f2-9820-4ee9-888a-0f83da39a907" containerName="sg-core" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.725668 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e03c7f2-9820-4ee9-888a-0f83da39a907" containerName="sg-core" Nov 23 16:26:00 crc kubenswrapper[5050]: E1123 16:26:00.725705 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e03c7f2-9820-4ee9-888a-0f83da39a907" containerName="ceilometer-central-agent" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.725716 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e03c7f2-9820-4ee9-888a-0f83da39a907" containerName="ceilometer-central-agent" Nov 23 16:26:00 crc kubenswrapper[5050]: E1123 16:26:00.725736 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e03c7f2-9820-4ee9-888a-0f83da39a907" containerName="proxy-httpd" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.725761 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e03c7f2-9820-4ee9-888a-0f83da39a907" containerName="proxy-httpd" Nov 23 16:26:00 crc kubenswrapper[5050]: E1123 16:26:00.725793 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e03c7f2-9820-4ee9-888a-0f83da39a907" containerName="ceilometer-notification-agent" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.725802 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e03c7f2-9820-4ee9-888a-0f83da39a907" containerName="ceilometer-notification-agent" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.726027 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e03c7f2-9820-4ee9-888a-0f83da39a907" containerName="proxy-httpd" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.726038 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e03c7f2-9820-4ee9-888a-0f83da39a907" containerName="ceilometer-notification-agent" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.726055 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e03c7f2-9820-4ee9-888a-0f83da39a907" containerName="sg-core" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.726077 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e03c7f2-9820-4ee9-888a-0f83da39a907" containerName="ceilometer-central-agent" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.728274 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.732760 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.732843 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.734620 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.773221 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-run-httpd\") pod \"ceilometer-0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " pod="openstack/ceilometer-0" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.773406 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-log-httpd\") pod \"ceilometer-0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " pod="openstack/ceilometer-0" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.773563 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xqjm5\" (UniqueName: \"kubernetes.io/projected/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-kube-api-access-xqjm5\") pod \"ceilometer-0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " pod="openstack/ceilometer-0" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.773629 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-scripts\") pod \"ceilometer-0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " pod="openstack/ceilometer-0" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.773689 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-config-data\") pod \"ceilometer-0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " pod="openstack/ceilometer-0" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.773755 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " pod="openstack/ceilometer-0" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.773787 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " pod="openstack/ceilometer-0" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.876477 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-log-httpd\") pod \"ceilometer-0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " pod="openstack/ceilometer-0" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.876550 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xqjm5\" (UniqueName: \"kubernetes.io/projected/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-kube-api-access-xqjm5\") pod \"ceilometer-0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " pod="openstack/ceilometer-0" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.876593 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-scripts\") pod \"ceilometer-0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " pod="openstack/ceilometer-0" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.876616 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-config-data\") pod \"ceilometer-0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " pod="openstack/ceilometer-0" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.876664 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " pod="openstack/ceilometer-0" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.876682 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " pod="openstack/ceilometer-0" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.876874 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-run-httpd\") pod \"ceilometer-0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " pod="openstack/ceilometer-0" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.877188 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-log-httpd\") pod \"ceilometer-0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " pod="openstack/ceilometer-0" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.877469 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-run-httpd\") pod \"ceilometer-0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " pod="openstack/ceilometer-0" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.883160 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " pod="openstack/ceilometer-0" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.883312 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-config-data\") pod \"ceilometer-0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " pod="openstack/ceilometer-0" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.892626 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-scripts\") pod \"ceilometer-0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " pod="openstack/ceilometer-0" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.895355 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " pod="openstack/ceilometer-0" Nov 23 16:26:00 crc kubenswrapper[5050]: I1123 16:26:00.898388 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xqjm5\" (UniqueName: \"kubernetes.io/projected/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-kube-api-access-xqjm5\") pod \"ceilometer-0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " pod="openstack/ceilometer-0" Nov 23 16:26:01 crc kubenswrapper[5050]: I1123 16:26:01.062428 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 16:26:01 crc kubenswrapper[5050]: I1123 16:26:01.330162 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"0a92636c-670e-44d8-ba06-c0ad4276da6d","Type":"ContainerStarted","Data":"ca3174107b017ad6d92a71b6f4da0558551a6b8ff8198e272dcac2f308c0f96b"} Nov 23 16:26:01 crc kubenswrapper[5050]: I1123 16:26:01.330599 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"0a92636c-670e-44d8-ba06-c0ad4276da6d","Type":"ContainerStarted","Data":"4228604c5364ba06b144728b0a197be2fdd1169717a48506386d457ff2a6db31"} Nov 23 16:26:01 crc kubenswrapper[5050]: I1123 16:26:01.362052 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-share-share1-0" podStartSLOduration=3.48565037 podStartE2EDuration="10.362022533s" podCreationTimestamp="2025-11-23 16:25:51 +0000 UTC" firstStartedPulling="2025-11-23 16:25:52.857798816 +0000 UTC m=+6248.024795301" lastFinishedPulling="2025-11-23 16:25:59.734170979 +0000 UTC m=+6254.901167464" observedRunningTime="2025-11-23 16:26:01.359821351 +0000 UTC m=+6256.526817876" watchObservedRunningTime="2025-11-23 16:26:01.362022533 +0000 UTC m=+6256.529019028" Nov 23 16:26:01 crc kubenswrapper[5050]: W1123 16:26:01.562674 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc8292c7e_9f14_4f99_9a13_65472ed4a9e0.slice/crio-264b7f4afeb39b0644fd58814ceb8c29b541a0b6ff07268f5c2c8ff1d4fe06f9 WatchSource:0}: Error finding container 264b7f4afeb39b0644fd58814ceb8c29b541a0b6ff07268f5c2c8ff1d4fe06f9: Status 404 returned error can't find the container with id 264b7f4afeb39b0644fd58814ceb8c29b541a0b6ff07268f5c2c8ff1d4fe06f9 Nov 23 16:26:01 crc kubenswrapper[5050]: I1123 16:26:01.569552 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e03c7f2-9820-4ee9-888a-0f83da39a907" path="/var/lib/kubelet/pods/7e03c7f2-9820-4ee9-888a-0f83da39a907/volumes" Nov 23 16:26:01 crc kubenswrapper[5050]: I1123 16:26:01.570623 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 23 16:26:02 crc kubenswrapper[5050]: I1123 16:26:02.015241 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-scheduler-0" Nov 23 16:26:02 crc kubenswrapper[5050]: I1123 16:26:02.036764 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 23 16:26:02 crc kubenswrapper[5050]: I1123 16:26:02.317703 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" Nov 23 16:26:02 crc kubenswrapper[5050]: I1123 16:26:02.403182 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-95c9fdf89-5ptdz"] Nov 23 16:26:02 crc kubenswrapper[5050]: I1123 16:26:02.403428 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" podUID="bca35152-4426-4f06-8268-ad2f82241949" containerName="dnsmasq-dns" containerID="cri-o://4cf53a2831965d9d530323389471986b18ffdb18110228100065f2d45a3b4123" gracePeriod=10 Nov 23 16:26:02 crc kubenswrapper[5050]: I1123 16:26:02.457080 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c8292c7e-9f14-4f99-9a13-65472ed4a9e0","Type":"ContainerStarted","Data":"264b7f4afeb39b0644fd58814ceb8c29b541a0b6ff07268f5c2c8ff1d4fe06f9"} Nov 23 16:26:03 crc kubenswrapper[5050]: I1123 16:26:03.476883 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c8292c7e-9f14-4f99-9a13-65472ed4a9e0","Type":"ContainerStarted","Data":"fa04512575e5732109b824b6e19e9b5b7bfc1e699a5eba87ed9a8cb599df6ec6"} Nov 23 16:26:03 crc kubenswrapper[5050]: I1123 16:26:03.481223 5050 generic.go:334] "Generic (PLEG): container finished" podID="bca35152-4426-4f06-8268-ad2f82241949" containerID="4cf53a2831965d9d530323389471986b18ffdb18110228100065f2d45a3b4123" exitCode=0 Nov 23 16:26:03 crc kubenswrapper[5050]: I1123 16:26:03.483063 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" event={"ID":"bca35152-4426-4f06-8268-ad2f82241949","Type":"ContainerDied","Data":"4cf53a2831965d9d530323389471986b18ffdb18110228100065f2d45a3b4123"} Nov 23 16:26:03 crc kubenswrapper[5050]: I1123 16:26:03.634824 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" Nov 23 16:26:03 crc kubenswrapper[5050]: I1123 16:26:03.703646 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bca35152-4426-4f06-8268-ad2f82241949-ovsdbserver-sb\") pod \"bca35152-4426-4f06-8268-ad2f82241949\" (UID: \"bca35152-4426-4f06-8268-ad2f82241949\") " Nov 23 16:26:03 crc kubenswrapper[5050]: I1123 16:26:03.703860 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bca35152-4426-4f06-8268-ad2f82241949-dns-svc\") pod \"bca35152-4426-4f06-8268-ad2f82241949\" (UID: \"bca35152-4426-4f06-8268-ad2f82241949\") " Nov 23 16:26:03 crc kubenswrapper[5050]: I1123 16:26:03.703997 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-228j8\" (UniqueName: \"kubernetes.io/projected/bca35152-4426-4f06-8268-ad2f82241949-kube-api-access-228j8\") pod \"bca35152-4426-4f06-8268-ad2f82241949\" (UID: \"bca35152-4426-4f06-8268-ad2f82241949\") " Nov 23 16:26:03 crc kubenswrapper[5050]: I1123 16:26:03.704084 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bca35152-4426-4f06-8268-ad2f82241949-config\") pod \"bca35152-4426-4f06-8268-ad2f82241949\" (UID: \"bca35152-4426-4f06-8268-ad2f82241949\") " Nov 23 16:26:03 crc kubenswrapper[5050]: I1123 16:26:03.704197 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bca35152-4426-4f06-8268-ad2f82241949-ovsdbserver-nb\") pod \"bca35152-4426-4f06-8268-ad2f82241949\" (UID: \"bca35152-4426-4f06-8268-ad2f82241949\") " Nov 23 16:26:03 crc kubenswrapper[5050]: I1123 16:26:03.710977 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bca35152-4426-4f06-8268-ad2f82241949-kube-api-access-228j8" (OuterVolumeSpecName: "kube-api-access-228j8") pod "bca35152-4426-4f06-8268-ad2f82241949" (UID: "bca35152-4426-4f06-8268-ad2f82241949"). InnerVolumeSpecName "kube-api-access-228j8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:26:03 crc kubenswrapper[5050]: I1123 16:26:03.810685 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bca35152-4426-4f06-8268-ad2f82241949-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "bca35152-4426-4f06-8268-ad2f82241949" (UID: "bca35152-4426-4f06-8268-ad2f82241949"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:26:03 crc kubenswrapper[5050]: I1123 16:26:03.811419 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-228j8\" (UniqueName: \"kubernetes.io/projected/bca35152-4426-4f06-8268-ad2f82241949-kube-api-access-228j8\") on node \"crc\" DevicePath \"\"" Nov 23 16:26:03 crc kubenswrapper[5050]: I1123 16:26:03.811471 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bca35152-4426-4f06-8268-ad2f82241949-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 23 16:26:03 crc kubenswrapper[5050]: I1123 16:26:03.826305 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bca35152-4426-4f06-8268-ad2f82241949-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bca35152-4426-4f06-8268-ad2f82241949" (UID: "bca35152-4426-4f06-8268-ad2f82241949"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:26:03 crc kubenswrapper[5050]: E1123 16:26:03.831343 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/bca35152-4426-4f06-8268-ad2f82241949-config podName:bca35152-4426-4f06-8268-ad2f82241949 nodeName:}" failed. No retries permitted until 2025-11-23 16:26:04.331319192 +0000 UTC m=+6259.498315677 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "config" (UniqueName: "kubernetes.io/configmap/bca35152-4426-4f06-8268-ad2f82241949-config") pod "bca35152-4426-4f06-8268-ad2f82241949" (UID: "bca35152-4426-4f06-8268-ad2f82241949") : error deleting /var/lib/kubelet/pods/bca35152-4426-4f06-8268-ad2f82241949/volume-subpaths: remove /var/lib/kubelet/pods/bca35152-4426-4f06-8268-ad2f82241949/volume-subpaths: no such file or directory Nov 23 16:26:03 crc kubenswrapper[5050]: I1123 16:26:03.833640 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bca35152-4426-4f06-8268-ad2f82241949-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "bca35152-4426-4f06-8268-ad2f82241949" (UID: "bca35152-4426-4f06-8268-ad2f82241949"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:26:03 crc kubenswrapper[5050]: I1123 16:26:03.914561 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bca35152-4426-4f06-8268-ad2f82241949-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 23 16:26:03 crc kubenswrapper[5050]: I1123 16:26:03.914611 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bca35152-4426-4f06-8268-ad2f82241949-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 16:26:04 crc kubenswrapper[5050]: I1123 16:26:04.333895 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bca35152-4426-4f06-8268-ad2f82241949-config\") pod \"bca35152-4426-4f06-8268-ad2f82241949\" (UID: \"bca35152-4426-4f06-8268-ad2f82241949\") " Nov 23 16:26:04 crc kubenswrapper[5050]: I1123 16:26:04.334437 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bca35152-4426-4f06-8268-ad2f82241949-config" (OuterVolumeSpecName: "config") pod "bca35152-4426-4f06-8268-ad2f82241949" (UID: "bca35152-4426-4f06-8268-ad2f82241949"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:26:04 crc kubenswrapper[5050]: I1123 16:26:04.335182 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bca35152-4426-4f06-8268-ad2f82241949-config\") on node \"crc\" DevicePath \"\"" Nov 23 16:26:04 crc kubenswrapper[5050]: I1123 16:26:04.511711 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c8292c7e-9f14-4f99-9a13-65472ed4a9e0","Type":"ContainerStarted","Data":"13f4727b57659715c52e96bd687f24171cd77d70bf1911e1275af62913f01e3e"} Nov 23 16:26:04 crc kubenswrapper[5050]: I1123 16:26:04.511771 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c8292c7e-9f14-4f99-9a13-65472ed4a9e0","Type":"ContainerStarted","Data":"3accdbaab4c4f1cd995bc3f0bc17b05e9eb962c80300b3eb7a32c124e70d93f7"} Nov 23 16:26:04 crc kubenswrapper[5050]: I1123 16:26:04.514549 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" event={"ID":"bca35152-4426-4f06-8268-ad2f82241949","Type":"ContainerDied","Data":"b932666f1e8a763012b55798f43dbf28d215814218fcaf5feb8cc830b31270af"} Nov 23 16:26:04 crc kubenswrapper[5050]: I1123 16:26:04.514679 5050 scope.go:117] "RemoveContainer" containerID="4cf53a2831965d9d530323389471986b18ffdb18110228100065f2d45a3b4123" Nov 23 16:26:04 crc kubenswrapper[5050]: I1123 16:26:04.514685 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95c9fdf89-5ptdz" Nov 23 16:26:04 crc kubenswrapper[5050]: I1123 16:26:04.568414 5050 scope.go:117] "RemoveContainer" containerID="154251a98360974e93141cbc8f1fbfd995d7e086316dbb6417ba7a86069fb28d" Nov 23 16:26:04 crc kubenswrapper[5050]: I1123 16:26:04.580241 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-95c9fdf89-5ptdz"] Nov 23 16:26:04 crc kubenswrapper[5050]: I1123 16:26:04.590015 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-95c9fdf89-5ptdz"] Nov 23 16:26:04 crc kubenswrapper[5050]: I1123 16:26:04.926399 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 23 16:26:05 crc kubenswrapper[5050]: I1123 16:26:05.572122 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bca35152-4426-4f06-8268-ad2f82241949" path="/var/lib/kubelet/pods/bca35152-4426-4f06-8268-ad2f82241949/volumes" Nov 23 16:26:06 crc kubenswrapper[5050]: I1123 16:26:06.552438 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c8292c7e-9f14-4f99-9a13-65472ed4a9e0","Type":"ContainerStarted","Data":"3d9db83f3c939a3c3b52e0e8f5c4d4e0a83f9438bb102c9a1d34d7dd10b8e7c1"} Nov 23 16:26:06 crc kubenswrapper[5050]: I1123 16:26:06.553382 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 23 16:26:06 crc kubenswrapper[5050]: I1123 16:26:06.552754 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c8292c7e-9f14-4f99-9a13-65472ed4a9e0" containerName="sg-core" containerID="cri-o://13f4727b57659715c52e96bd687f24171cd77d70bf1911e1275af62913f01e3e" gracePeriod=30 Nov 23 16:26:06 crc kubenswrapper[5050]: I1123 16:26:06.552637 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c8292c7e-9f14-4f99-9a13-65472ed4a9e0" containerName="ceilometer-central-agent" containerID="cri-o://fa04512575e5732109b824b6e19e9b5b7bfc1e699a5eba87ed9a8cb599df6ec6" gracePeriod=30 Nov 23 16:26:06 crc kubenswrapper[5050]: I1123 16:26:06.552836 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c8292c7e-9f14-4f99-9a13-65472ed4a9e0" containerName="ceilometer-notification-agent" containerID="cri-o://3accdbaab4c4f1cd995bc3f0bc17b05e9eb962c80300b3eb7a32c124e70d93f7" gracePeriod=30 Nov 23 16:26:06 crc kubenswrapper[5050]: I1123 16:26:06.552836 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c8292c7e-9f14-4f99-9a13-65472ed4a9e0" containerName="proxy-httpd" containerID="cri-o://3d9db83f3c939a3c3b52e0e8f5c4d4e0a83f9438bb102c9a1d34d7dd10b8e7c1" gracePeriod=30 Nov 23 16:26:06 crc kubenswrapper[5050]: I1123 16:26:06.590917 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.172896175 podStartE2EDuration="6.590885508s" podCreationTimestamp="2025-11-23 16:26:00 +0000 UTC" firstStartedPulling="2025-11-23 16:26:01.568026624 +0000 UTC m=+6256.735023109" lastFinishedPulling="2025-11-23 16:26:05.986015947 +0000 UTC m=+6261.153012442" observedRunningTime="2025-11-23 16:26:06.583427197 +0000 UTC m=+6261.750423722" watchObservedRunningTime="2025-11-23 16:26:06.590885508 +0000 UTC m=+6261.757881993" Nov 23 16:26:07 crc kubenswrapper[5050]: I1123 16:26:07.163105 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dlwfc"] Nov 23 16:26:07 crc kubenswrapper[5050]: E1123 16:26:07.163853 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bca35152-4426-4f06-8268-ad2f82241949" containerName="init" Nov 23 16:26:07 crc kubenswrapper[5050]: I1123 16:26:07.163879 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="bca35152-4426-4f06-8268-ad2f82241949" containerName="init" Nov 23 16:26:07 crc kubenswrapper[5050]: E1123 16:26:07.163898 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bca35152-4426-4f06-8268-ad2f82241949" containerName="dnsmasq-dns" Nov 23 16:26:07 crc kubenswrapper[5050]: I1123 16:26:07.163907 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="bca35152-4426-4f06-8268-ad2f82241949" containerName="dnsmasq-dns" Nov 23 16:26:07 crc kubenswrapper[5050]: I1123 16:26:07.164196 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="bca35152-4426-4f06-8268-ad2f82241949" containerName="dnsmasq-dns" Nov 23 16:26:07 crc kubenswrapper[5050]: I1123 16:26:07.166248 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dlwfc" Nov 23 16:26:07 crc kubenswrapper[5050]: I1123 16:26:07.189982 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dlwfc"] Nov 23 16:26:07 crc kubenswrapper[5050]: I1123 16:26:07.215158 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgm7j\" (UniqueName: \"kubernetes.io/projected/1eef8db6-6ab9-40d1-aefa-8b92e42693f3-kube-api-access-cgm7j\") pod \"redhat-marketplace-dlwfc\" (UID: \"1eef8db6-6ab9-40d1-aefa-8b92e42693f3\") " pod="openshift-marketplace/redhat-marketplace-dlwfc" Nov 23 16:26:07 crc kubenswrapper[5050]: I1123 16:26:07.215225 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1eef8db6-6ab9-40d1-aefa-8b92e42693f3-utilities\") pod \"redhat-marketplace-dlwfc\" (UID: \"1eef8db6-6ab9-40d1-aefa-8b92e42693f3\") " pod="openshift-marketplace/redhat-marketplace-dlwfc" Nov 23 16:26:07 crc kubenswrapper[5050]: I1123 16:26:07.215348 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1eef8db6-6ab9-40d1-aefa-8b92e42693f3-catalog-content\") pod \"redhat-marketplace-dlwfc\" (UID: \"1eef8db6-6ab9-40d1-aefa-8b92e42693f3\") " pod="openshift-marketplace/redhat-marketplace-dlwfc" Nov 23 16:26:07 crc kubenswrapper[5050]: I1123 16:26:07.317910 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgm7j\" (UniqueName: \"kubernetes.io/projected/1eef8db6-6ab9-40d1-aefa-8b92e42693f3-kube-api-access-cgm7j\") pod \"redhat-marketplace-dlwfc\" (UID: \"1eef8db6-6ab9-40d1-aefa-8b92e42693f3\") " pod="openshift-marketplace/redhat-marketplace-dlwfc" Nov 23 16:26:07 crc kubenswrapper[5050]: I1123 16:26:07.317982 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1eef8db6-6ab9-40d1-aefa-8b92e42693f3-utilities\") pod \"redhat-marketplace-dlwfc\" (UID: \"1eef8db6-6ab9-40d1-aefa-8b92e42693f3\") " pod="openshift-marketplace/redhat-marketplace-dlwfc" Nov 23 16:26:07 crc kubenswrapper[5050]: I1123 16:26:07.318141 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1eef8db6-6ab9-40d1-aefa-8b92e42693f3-catalog-content\") pod \"redhat-marketplace-dlwfc\" (UID: \"1eef8db6-6ab9-40d1-aefa-8b92e42693f3\") " pod="openshift-marketplace/redhat-marketplace-dlwfc" Nov 23 16:26:07 crc kubenswrapper[5050]: I1123 16:26:07.318947 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1eef8db6-6ab9-40d1-aefa-8b92e42693f3-utilities\") pod \"redhat-marketplace-dlwfc\" (UID: \"1eef8db6-6ab9-40d1-aefa-8b92e42693f3\") " pod="openshift-marketplace/redhat-marketplace-dlwfc" Nov 23 16:26:07 crc kubenswrapper[5050]: I1123 16:26:07.319189 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1eef8db6-6ab9-40d1-aefa-8b92e42693f3-catalog-content\") pod \"redhat-marketplace-dlwfc\" (UID: \"1eef8db6-6ab9-40d1-aefa-8b92e42693f3\") " pod="openshift-marketplace/redhat-marketplace-dlwfc" Nov 23 16:26:07 crc kubenswrapper[5050]: I1123 16:26:07.344412 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgm7j\" (UniqueName: \"kubernetes.io/projected/1eef8db6-6ab9-40d1-aefa-8b92e42693f3-kube-api-access-cgm7j\") pod \"redhat-marketplace-dlwfc\" (UID: \"1eef8db6-6ab9-40d1-aefa-8b92e42693f3\") " pod="openshift-marketplace/redhat-marketplace-dlwfc" Nov 23 16:26:07 crc kubenswrapper[5050]: I1123 16:26:07.495971 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dlwfc" Nov 23 16:26:07 crc kubenswrapper[5050]: I1123 16:26:07.588103 5050 generic.go:334] "Generic (PLEG): container finished" podID="c8292c7e-9f14-4f99-9a13-65472ed4a9e0" containerID="3d9db83f3c939a3c3b52e0e8f5c4d4e0a83f9438bb102c9a1d34d7dd10b8e7c1" exitCode=0 Nov 23 16:26:07 crc kubenswrapper[5050]: I1123 16:26:07.588153 5050 generic.go:334] "Generic (PLEG): container finished" podID="c8292c7e-9f14-4f99-9a13-65472ed4a9e0" containerID="13f4727b57659715c52e96bd687f24171cd77d70bf1911e1275af62913f01e3e" exitCode=2 Nov 23 16:26:07 crc kubenswrapper[5050]: I1123 16:26:07.588162 5050 generic.go:334] "Generic (PLEG): container finished" podID="c8292c7e-9f14-4f99-9a13-65472ed4a9e0" containerID="3accdbaab4c4f1cd995bc3f0bc17b05e9eb962c80300b3eb7a32c124e70d93f7" exitCode=0 Nov 23 16:26:07 crc kubenswrapper[5050]: I1123 16:26:07.588162 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c8292c7e-9f14-4f99-9a13-65472ed4a9e0","Type":"ContainerDied","Data":"3d9db83f3c939a3c3b52e0e8f5c4d4e0a83f9438bb102c9a1d34d7dd10b8e7c1"} Nov 23 16:26:07 crc kubenswrapper[5050]: I1123 16:26:07.588209 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c8292c7e-9f14-4f99-9a13-65472ed4a9e0","Type":"ContainerDied","Data":"13f4727b57659715c52e96bd687f24171cd77d70bf1911e1275af62913f01e3e"} Nov 23 16:26:07 crc kubenswrapper[5050]: I1123 16:26:07.588226 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c8292c7e-9f14-4f99-9a13-65472ed4a9e0","Type":"ContainerDied","Data":"3accdbaab4c4f1cd995bc3f0bc17b05e9eb962c80300b3eb7a32c124e70d93f7"} Nov 23 16:26:08 crc kubenswrapper[5050]: W1123 16:26:08.012469 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1eef8db6_6ab9_40d1_aefa_8b92e42693f3.slice/crio-4d0452906cc0b00017f58ebf220e57321f044c69b4a57dfcfa290b8ae000de59 WatchSource:0}: Error finding container 4d0452906cc0b00017f58ebf220e57321f044c69b4a57dfcfa290b8ae000de59: Status 404 returned error can't find the container with id 4d0452906cc0b00017f58ebf220e57321f044c69b4a57dfcfa290b8ae000de59 Nov 23 16:26:08 crc kubenswrapper[5050]: I1123 16:26:08.013264 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dlwfc"] Nov 23 16:26:08 crc kubenswrapper[5050]: I1123 16:26:08.598543 5050 generic.go:334] "Generic (PLEG): container finished" podID="1eef8db6-6ab9-40d1-aefa-8b92e42693f3" containerID="8125de89bfeadffd22aa7f0c60f98ebfe8cdb16c7aaaf32817ab74a3f19c2a3c" exitCode=0 Nov 23 16:26:08 crc kubenswrapper[5050]: I1123 16:26:08.598737 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dlwfc" event={"ID":"1eef8db6-6ab9-40d1-aefa-8b92e42693f3","Type":"ContainerDied","Data":"8125de89bfeadffd22aa7f0c60f98ebfe8cdb16c7aaaf32817ab74a3f19c2a3c"} Nov 23 16:26:08 crc kubenswrapper[5050]: I1123 16:26:08.598867 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dlwfc" event={"ID":"1eef8db6-6ab9-40d1-aefa-8b92e42693f3","Type":"ContainerStarted","Data":"4d0452906cc0b00017f58ebf220e57321f044c69b4a57dfcfa290b8ae000de59"} Nov 23 16:26:08 crc kubenswrapper[5050]: I1123 16:26:08.600614 5050 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.611817 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.614511 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dlwfc" event={"ID":"1eef8db6-6ab9-40d1-aefa-8b92e42693f3","Type":"ContainerStarted","Data":"828027face524e81d832522aca98f90407da6b5af139785f9cddbb0d7379a595"} Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.628836 5050 generic.go:334] "Generic (PLEG): container finished" podID="c8292c7e-9f14-4f99-9a13-65472ed4a9e0" containerID="fa04512575e5732109b824b6e19e9b5b7bfc1e699a5eba87ed9a8cb599df6ec6" exitCode=0 Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.628895 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c8292c7e-9f14-4f99-9a13-65472ed4a9e0","Type":"ContainerDied","Data":"fa04512575e5732109b824b6e19e9b5b7bfc1e699a5eba87ed9a8cb599df6ec6"} Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.628937 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c8292c7e-9f14-4f99-9a13-65472ed4a9e0","Type":"ContainerDied","Data":"264b7f4afeb39b0644fd58814ceb8c29b541a0b6ff07268f5c2c8ff1d4fe06f9"} Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.628960 5050 scope.go:117] "RemoveContainer" containerID="3d9db83f3c939a3c3b52e0e8f5c4d4e0a83f9438bb102c9a1d34d7dd10b8e7c1" Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.629022 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.663786 5050 scope.go:117] "RemoveContainer" containerID="13f4727b57659715c52e96bd687f24171cd77d70bf1911e1275af62913f01e3e" Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.716227 5050 scope.go:117] "RemoveContainer" containerID="3accdbaab4c4f1cd995bc3f0bc17b05e9eb962c80300b3eb7a32c124e70d93f7" Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.744161 5050 scope.go:117] "RemoveContainer" containerID="fa04512575e5732109b824b6e19e9b5b7bfc1e699a5eba87ed9a8cb599df6ec6" Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.767379 5050 scope.go:117] "RemoveContainer" containerID="3d9db83f3c939a3c3b52e0e8f5c4d4e0a83f9438bb102c9a1d34d7dd10b8e7c1" Nov 23 16:26:09 crc kubenswrapper[5050]: E1123 16:26:09.768022 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d9db83f3c939a3c3b52e0e8f5c4d4e0a83f9438bb102c9a1d34d7dd10b8e7c1\": container with ID starting with 3d9db83f3c939a3c3b52e0e8f5c4d4e0a83f9438bb102c9a1d34d7dd10b8e7c1 not found: ID does not exist" containerID="3d9db83f3c939a3c3b52e0e8f5c4d4e0a83f9438bb102c9a1d34d7dd10b8e7c1" Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.768060 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d9db83f3c939a3c3b52e0e8f5c4d4e0a83f9438bb102c9a1d34d7dd10b8e7c1"} err="failed to get container status \"3d9db83f3c939a3c3b52e0e8f5c4d4e0a83f9438bb102c9a1d34d7dd10b8e7c1\": rpc error: code = NotFound desc = could not find container \"3d9db83f3c939a3c3b52e0e8f5c4d4e0a83f9438bb102c9a1d34d7dd10b8e7c1\": container with ID starting with 3d9db83f3c939a3c3b52e0e8f5c4d4e0a83f9438bb102c9a1d34d7dd10b8e7c1 not found: ID does not exist" Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.768089 5050 scope.go:117] "RemoveContainer" containerID="13f4727b57659715c52e96bd687f24171cd77d70bf1911e1275af62913f01e3e" Nov 23 16:26:09 crc kubenswrapper[5050]: E1123 16:26:09.768506 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13f4727b57659715c52e96bd687f24171cd77d70bf1911e1275af62913f01e3e\": container with ID starting with 13f4727b57659715c52e96bd687f24171cd77d70bf1911e1275af62913f01e3e not found: ID does not exist" containerID="13f4727b57659715c52e96bd687f24171cd77d70bf1911e1275af62913f01e3e" Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.768530 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13f4727b57659715c52e96bd687f24171cd77d70bf1911e1275af62913f01e3e"} err="failed to get container status \"13f4727b57659715c52e96bd687f24171cd77d70bf1911e1275af62913f01e3e\": rpc error: code = NotFound desc = could not find container \"13f4727b57659715c52e96bd687f24171cd77d70bf1911e1275af62913f01e3e\": container with ID starting with 13f4727b57659715c52e96bd687f24171cd77d70bf1911e1275af62913f01e3e not found: ID does not exist" Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.768546 5050 scope.go:117] "RemoveContainer" containerID="3accdbaab4c4f1cd995bc3f0bc17b05e9eb962c80300b3eb7a32c124e70d93f7" Nov 23 16:26:09 crc kubenswrapper[5050]: E1123 16:26:09.768973 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3accdbaab4c4f1cd995bc3f0bc17b05e9eb962c80300b3eb7a32c124e70d93f7\": container with ID starting with 3accdbaab4c4f1cd995bc3f0bc17b05e9eb962c80300b3eb7a32c124e70d93f7 not found: ID does not exist" containerID="3accdbaab4c4f1cd995bc3f0bc17b05e9eb962c80300b3eb7a32c124e70d93f7" Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.768993 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3accdbaab4c4f1cd995bc3f0bc17b05e9eb962c80300b3eb7a32c124e70d93f7"} err="failed to get container status \"3accdbaab4c4f1cd995bc3f0bc17b05e9eb962c80300b3eb7a32c124e70d93f7\": rpc error: code = NotFound desc = could not find container \"3accdbaab4c4f1cd995bc3f0bc17b05e9eb962c80300b3eb7a32c124e70d93f7\": container with ID starting with 3accdbaab4c4f1cd995bc3f0bc17b05e9eb962c80300b3eb7a32c124e70d93f7 not found: ID does not exist" Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.769016 5050 scope.go:117] "RemoveContainer" containerID="fa04512575e5732109b824b6e19e9b5b7bfc1e699a5eba87ed9a8cb599df6ec6" Nov 23 16:26:09 crc kubenswrapper[5050]: E1123 16:26:09.769309 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa04512575e5732109b824b6e19e9b5b7bfc1e699a5eba87ed9a8cb599df6ec6\": container with ID starting with fa04512575e5732109b824b6e19e9b5b7bfc1e699a5eba87ed9a8cb599df6ec6 not found: ID does not exist" containerID="fa04512575e5732109b824b6e19e9b5b7bfc1e699a5eba87ed9a8cb599df6ec6" Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.769332 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa04512575e5732109b824b6e19e9b5b7bfc1e699a5eba87ed9a8cb599df6ec6"} err="failed to get container status \"fa04512575e5732109b824b6e19e9b5b7bfc1e699a5eba87ed9a8cb599df6ec6\": rpc error: code = NotFound desc = could not find container \"fa04512575e5732109b824b6e19e9b5b7bfc1e699a5eba87ed9a8cb599df6ec6\": container with ID starting with fa04512575e5732109b824b6e19e9b5b7bfc1e699a5eba87ed9a8cb599df6ec6 not found: ID does not exist" Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.808247 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-config-data\") pod \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.808495 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-sg-core-conf-yaml\") pod \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.808606 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-log-httpd\") pod \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.808756 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xqjm5\" (UniqueName: \"kubernetes.io/projected/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-kube-api-access-xqjm5\") pod \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.808795 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-scripts\") pod \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.808878 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-run-httpd\") pod \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.809277 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-combined-ca-bundle\") pod \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\" (UID: \"c8292c7e-9f14-4f99-9a13-65472ed4a9e0\") " Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.809339 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c8292c7e-9f14-4f99-9a13-65472ed4a9e0" (UID: "c8292c7e-9f14-4f99-9a13-65472ed4a9e0"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.810001 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c8292c7e-9f14-4f99-9a13-65472ed4a9e0" (UID: "c8292c7e-9f14-4f99-9a13-65472ed4a9e0"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.812421 5050 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.812511 5050 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.825743 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-scripts" (OuterVolumeSpecName: "scripts") pod "c8292c7e-9f14-4f99-9a13-65472ed4a9e0" (UID: "c8292c7e-9f14-4f99-9a13-65472ed4a9e0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.825895 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-kube-api-access-xqjm5" (OuterVolumeSpecName: "kube-api-access-xqjm5") pod "c8292c7e-9f14-4f99-9a13-65472ed4a9e0" (UID: "c8292c7e-9f14-4f99-9a13-65472ed4a9e0"). InnerVolumeSpecName "kube-api-access-xqjm5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.858522 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c8292c7e-9f14-4f99-9a13-65472ed4a9e0" (UID: "c8292c7e-9f14-4f99-9a13-65472ed4a9e0"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.915795 5050 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.915844 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xqjm5\" (UniqueName: \"kubernetes.io/projected/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-kube-api-access-xqjm5\") on node \"crc\" DevicePath \"\"" Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.915855 5050 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-scripts\") on node \"crc\" DevicePath \"\"" Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.954615 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c8292c7e-9f14-4f99-9a13-65472ed4a9e0" (UID: "c8292c7e-9f14-4f99-9a13-65472ed4a9e0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:26:09 crc kubenswrapper[5050]: I1123 16:26:09.968200 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-config-data" (OuterVolumeSpecName: "config-data") pod "c8292c7e-9f14-4f99-9a13-65472ed4a9e0" (UID: "c8292c7e-9f14-4f99-9a13-65472ed4a9e0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.018750 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.019110 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8292c7e-9f14-4f99-9a13-65472ed4a9e0-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.275711 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.286131 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.321557 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 23 16:26:10 crc kubenswrapper[5050]: E1123 16:26:10.322430 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8292c7e-9f14-4f99-9a13-65472ed4a9e0" containerName="sg-core" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.322514 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8292c7e-9f14-4f99-9a13-65472ed4a9e0" containerName="sg-core" Nov 23 16:26:10 crc kubenswrapper[5050]: E1123 16:26:10.322581 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8292c7e-9f14-4f99-9a13-65472ed4a9e0" containerName="ceilometer-central-agent" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.322631 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8292c7e-9f14-4f99-9a13-65472ed4a9e0" containerName="ceilometer-central-agent" Nov 23 16:26:10 crc kubenswrapper[5050]: E1123 16:26:10.322725 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8292c7e-9f14-4f99-9a13-65472ed4a9e0" containerName="ceilometer-notification-agent" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.322821 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8292c7e-9f14-4f99-9a13-65472ed4a9e0" containerName="ceilometer-notification-agent" Nov 23 16:26:10 crc kubenswrapper[5050]: E1123 16:26:10.322911 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8292c7e-9f14-4f99-9a13-65472ed4a9e0" containerName="proxy-httpd" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.322960 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8292c7e-9f14-4f99-9a13-65472ed4a9e0" containerName="proxy-httpd" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.323320 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8292c7e-9f14-4f99-9a13-65472ed4a9e0" containerName="proxy-httpd" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.323429 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8292c7e-9f14-4f99-9a13-65472ed4a9e0" containerName="ceilometer-notification-agent" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.323539 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8292c7e-9f14-4f99-9a13-65472ed4a9e0" containerName="ceilometer-central-agent" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.323614 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8292c7e-9f14-4f99-9a13-65472ed4a9e0" containerName="sg-core" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.326178 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.345364 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.350496 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.350789 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.431947 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dfb247e9-89fe-4eb2-a573-3797ccad68f8-config-data\") pod \"ceilometer-0\" (UID: \"dfb247e9-89fe-4eb2-a573-3797ccad68f8\") " pod="openstack/ceilometer-0" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.432082 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dfb247e9-89fe-4eb2-a573-3797ccad68f8-run-httpd\") pod \"ceilometer-0\" (UID: \"dfb247e9-89fe-4eb2-a573-3797ccad68f8\") " pod="openstack/ceilometer-0" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.432348 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wcpj6\" (UniqueName: \"kubernetes.io/projected/dfb247e9-89fe-4eb2-a573-3797ccad68f8-kube-api-access-wcpj6\") pod \"ceilometer-0\" (UID: \"dfb247e9-89fe-4eb2-a573-3797ccad68f8\") " pod="openstack/ceilometer-0" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.432562 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfb247e9-89fe-4eb2-a573-3797ccad68f8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"dfb247e9-89fe-4eb2-a573-3797ccad68f8\") " pod="openstack/ceilometer-0" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.432627 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dfb247e9-89fe-4eb2-a573-3797ccad68f8-log-httpd\") pod \"ceilometer-0\" (UID: \"dfb247e9-89fe-4eb2-a573-3797ccad68f8\") " pod="openstack/ceilometer-0" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.432810 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dfb247e9-89fe-4eb2-a573-3797ccad68f8-scripts\") pod \"ceilometer-0\" (UID: \"dfb247e9-89fe-4eb2-a573-3797ccad68f8\") " pod="openstack/ceilometer-0" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.432932 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dfb247e9-89fe-4eb2-a573-3797ccad68f8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"dfb247e9-89fe-4eb2-a573-3797ccad68f8\") " pod="openstack/ceilometer-0" Nov 23 16:26:10 crc kubenswrapper[5050]: E1123 16:26:10.512792 5050 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc8292c7e_9f14_4f99_9a13_65472ed4a9e0.slice\": RecentStats: unable to find data in memory cache]" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.535824 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dfb247e9-89fe-4eb2-a573-3797ccad68f8-config-data\") pod \"ceilometer-0\" (UID: \"dfb247e9-89fe-4eb2-a573-3797ccad68f8\") " pod="openstack/ceilometer-0" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.536359 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dfb247e9-89fe-4eb2-a573-3797ccad68f8-run-httpd\") pod \"ceilometer-0\" (UID: \"dfb247e9-89fe-4eb2-a573-3797ccad68f8\") " pod="openstack/ceilometer-0" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.536466 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wcpj6\" (UniqueName: \"kubernetes.io/projected/dfb247e9-89fe-4eb2-a573-3797ccad68f8-kube-api-access-wcpj6\") pod \"ceilometer-0\" (UID: \"dfb247e9-89fe-4eb2-a573-3797ccad68f8\") " pod="openstack/ceilometer-0" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.536524 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfb247e9-89fe-4eb2-a573-3797ccad68f8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"dfb247e9-89fe-4eb2-a573-3797ccad68f8\") " pod="openstack/ceilometer-0" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.536551 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dfb247e9-89fe-4eb2-a573-3797ccad68f8-log-httpd\") pod \"ceilometer-0\" (UID: \"dfb247e9-89fe-4eb2-a573-3797ccad68f8\") " pod="openstack/ceilometer-0" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.536591 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dfb247e9-89fe-4eb2-a573-3797ccad68f8-scripts\") pod \"ceilometer-0\" (UID: \"dfb247e9-89fe-4eb2-a573-3797ccad68f8\") " pod="openstack/ceilometer-0" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.537067 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dfb247e9-89fe-4eb2-a573-3797ccad68f8-run-httpd\") pod \"ceilometer-0\" (UID: \"dfb247e9-89fe-4eb2-a573-3797ccad68f8\") " pod="openstack/ceilometer-0" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.537346 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dfb247e9-89fe-4eb2-a573-3797ccad68f8-log-httpd\") pod \"ceilometer-0\" (UID: \"dfb247e9-89fe-4eb2-a573-3797ccad68f8\") " pod="openstack/ceilometer-0" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.537405 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dfb247e9-89fe-4eb2-a573-3797ccad68f8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"dfb247e9-89fe-4eb2-a573-3797ccad68f8\") " pod="openstack/ceilometer-0" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.544019 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dfb247e9-89fe-4eb2-a573-3797ccad68f8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"dfb247e9-89fe-4eb2-a573-3797ccad68f8\") " pod="openstack/ceilometer-0" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.544637 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dfb247e9-89fe-4eb2-a573-3797ccad68f8-config-data\") pod \"ceilometer-0\" (UID: \"dfb247e9-89fe-4eb2-a573-3797ccad68f8\") " pod="openstack/ceilometer-0" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.544936 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfb247e9-89fe-4eb2-a573-3797ccad68f8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"dfb247e9-89fe-4eb2-a573-3797ccad68f8\") " pod="openstack/ceilometer-0" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.547650 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dfb247e9-89fe-4eb2-a573-3797ccad68f8-scripts\") pod \"ceilometer-0\" (UID: \"dfb247e9-89fe-4eb2-a573-3797ccad68f8\") " pod="openstack/ceilometer-0" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.560315 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wcpj6\" (UniqueName: \"kubernetes.io/projected/dfb247e9-89fe-4eb2-a573-3797ccad68f8-kube-api-access-wcpj6\") pod \"ceilometer-0\" (UID: \"dfb247e9-89fe-4eb2-a573-3797ccad68f8\") " pod="openstack/ceilometer-0" Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.648976 5050 generic.go:334] "Generic (PLEG): container finished" podID="1eef8db6-6ab9-40d1-aefa-8b92e42693f3" containerID="828027face524e81d832522aca98f90407da6b5af139785f9cddbb0d7379a595" exitCode=0 Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.649041 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dlwfc" event={"ID":"1eef8db6-6ab9-40d1-aefa-8b92e42693f3","Type":"ContainerDied","Data":"828027face524e81d832522aca98f90407da6b5af139785f9cddbb0d7379a595"} Nov 23 16:26:10 crc kubenswrapper[5050]: I1123 16:26:10.672937 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 23 16:26:11 crc kubenswrapper[5050]: I1123 16:26:11.227469 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 23 16:26:11 crc kubenswrapper[5050]: W1123 16:26:11.227434 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddfb247e9_89fe_4eb2_a573_3797ccad68f8.slice/crio-bd2929d75853086ca64d249ff9a147231273f672b51ed7414927126a178741cc WatchSource:0}: Error finding container bd2929d75853086ca64d249ff9a147231273f672b51ed7414927126a178741cc: Status 404 returned error can't find the container with id bd2929d75853086ca64d249ff9a147231273f672b51ed7414927126a178741cc Nov 23 16:26:11 crc kubenswrapper[5050]: I1123 16:26:11.566255 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c8292c7e-9f14-4f99-9a13-65472ed4a9e0" path="/var/lib/kubelet/pods/c8292c7e-9f14-4f99-9a13-65472ed4a9e0/volumes" Nov 23 16:26:11 crc kubenswrapper[5050]: I1123 16:26:11.704727 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dlwfc" event={"ID":"1eef8db6-6ab9-40d1-aefa-8b92e42693f3","Type":"ContainerStarted","Data":"dd616c15c1a7d698eaa65a76f9260d88ccb5cc0ca0db320e3250753102c39874"} Nov 23 16:26:11 crc kubenswrapper[5050]: I1123 16:26:11.710493 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dfb247e9-89fe-4eb2-a573-3797ccad68f8","Type":"ContainerStarted","Data":"bd2929d75853086ca64d249ff9a147231273f672b51ed7414927126a178741cc"} Nov 23 16:26:11 crc kubenswrapper[5050]: I1123 16:26:11.727308 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dlwfc" podStartSLOduration=2.21245557 podStartE2EDuration="4.727288862s" podCreationTimestamp="2025-11-23 16:26:07 +0000 UTC" firstStartedPulling="2025-11-23 16:26:08.600405767 +0000 UTC m=+6263.767402252" lastFinishedPulling="2025-11-23 16:26:11.115239059 +0000 UTC m=+6266.282235544" observedRunningTime="2025-11-23 16:26:11.722769105 +0000 UTC m=+6266.889765600" watchObservedRunningTime="2025-11-23 16:26:11.727288862 +0000 UTC m=+6266.894285347" Nov 23 16:26:12 crc kubenswrapper[5050]: I1123 16:26:12.726804 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dfb247e9-89fe-4eb2-a573-3797ccad68f8","Type":"ContainerStarted","Data":"006f10f0eb2d6fe48b3d9270b7359dba1ffc33dec17f0b43ea6c335041ad8b34"} Nov 23 16:26:13 crc kubenswrapper[5050]: I1123 16:26:13.744891 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dfb247e9-89fe-4eb2-a573-3797ccad68f8","Type":"ContainerStarted","Data":"d7e6483cd40f09fc60b0b5b7dd72c290fa2addb34e46f4472ae9dfdf6fa8cfbe"} Nov 23 16:26:13 crc kubenswrapper[5050]: I1123 16:26:13.916226 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/manila-api-0" Nov 23 16:26:14 crc kubenswrapper[5050]: I1123 16:26:14.088163 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-share-share1-0" Nov 23 16:26:14 crc kubenswrapper[5050]: I1123 16:26:14.098657 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-scheduler-0" Nov 23 16:26:14 crc kubenswrapper[5050]: I1123 16:26:14.795319 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dfb247e9-89fe-4eb2-a573-3797ccad68f8","Type":"ContainerStarted","Data":"91a78e70f90d41843ea8ff2b81b639749ac89aaaba6177e468ece9fab33f3902"} Nov 23 16:26:16 crc kubenswrapper[5050]: I1123 16:26:16.822032 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dfb247e9-89fe-4eb2-a573-3797ccad68f8","Type":"ContainerStarted","Data":"55770dbc395a832e7f3f5bb6749185a588c701239ef704d23b74bb537fac8174"} Nov 23 16:26:16 crc kubenswrapper[5050]: I1123 16:26:16.824179 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 23 16:26:17 crc kubenswrapper[5050]: I1123 16:26:17.496508 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dlwfc" Nov 23 16:26:17 crc kubenswrapper[5050]: I1123 16:26:17.497403 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dlwfc" Nov 23 16:26:17 crc kubenswrapper[5050]: I1123 16:26:17.569090 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dlwfc" Nov 23 16:26:17 crc kubenswrapper[5050]: I1123 16:26:17.610177 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.064456949 podStartE2EDuration="7.610144643s" podCreationTimestamp="2025-11-23 16:26:10 +0000 UTC" firstStartedPulling="2025-11-23 16:26:11.231819907 +0000 UTC m=+6266.398816392" lastFinishedPulling="2025-11-23 16:26:15.777507591 +0000 UTC m=+6270.944504086" observedRunningTime="2025-11-23 16:26:16.848826069 +0000 UTC m=+6272.015822554" watchObservedRunningTime="2025-11-23 16:26:17.610144643 +0000 UTC m=+6272.777141128" Nov 23 16:26:17 crc kubenswrapper[5050]: I1123 16:26:17.911577 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dlwfc" Nov 23 16:26:17 crc kubenswrapper[5050]: I1123 16:26:17.973745 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dlwfc"] Nov 23 16:26:18 crc kubenswrapper[5050]: I1123 16:26:18.067818 5050 scope.go:117] "RemoveContainer" containerID="7f5573ffd97138d8eb1f60674b7d15cedd559f14cca884b678f03d8db9ffddd9" Nov 23 16:26:18 crc kubenswrapper[5050]: I1123 16:26:18.105257 5050 scope.go:117] "RemoveContainer" containerID="eae5b93fb507e149054a38ea712d10d2b101134a7996786579ad0798b07d71d2" Nov 23 16:26:18 crc kubenswrapper[5050]: I1123 16:26:18.168651 5050 scope.go:117] "RemoveContainer" containerID="f3997e49e5ca1823e5a10cfd87f46f57645ed93ae81ab4e356df1d9c093d5206" Nov 23 16:26:19 crc kubenswrapper[5050]: I1123 16:26:19.864209 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dlwfc" podUID="1eef8db6-6ab9-40d1-aefa-8b92e42693f3" containerName="registry-server" containerID="cri-o://dd616c15c1a7d698eaa65a76f9260d88ccb5cc0ca0db320e3250753102c39874" gracePeriod=2 Nov 23 16:26:20 crc kubenswrapper[5050]: I1123 16:26:20.446199 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dlwfc" Nov 23 16:26:20 crc kubenswrapper[5050]: I1123 16:26:20.522189 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cgm7j\" (UniqueName: \"kubernetes.io/projected/1eef8db6-6ab9-40d1-aefa-8b92e42693f3-kube-api-access-cgm7j\") pod \"1eef8db6-6ab9-40d1-aefa-8b92e42693f3\" (UID: \"1eef8db6-6ab9-40d1-aefa-8b92e42693f3\") " Nov 23 16:26:20 crc kubenswrapper[5050]: I1123 16:26:20.522770 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1eef8db6-6ab9-40d1-aefa-8b92e42693f3-utilities\") pod \"1eef8db6-6ab9-40d1-aefa-8b92e42693f3\" (UID: \"1eef8db6-6ab9-40d1-aefa-8b92e42693f3\") " Nov 23 16:26:20 crc kubenswrapper[5050]: I1123 16:26:20.523006 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1eef8db6-6ab9-40d1-aefa-8b92e42693f3-catalog-content\") pod \"1eef8db6-6ab9-40d1-aefa-8b92e42693f3\" (UID: \"1eef8db6-6ab9-40d1-aefa-8b92e42693f3\") " Nov 23 16:26:20 crc kubenswrapper[5050]: I1123 16:26:20.523877 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1eef8db6-6ab9-40d1-aefa-8b92e42693f3-utilities" (OuterVolumeSpecName: "utilities") pod "1eef8db6-6ab9-40d1-aefa-8b92e42693f3" (UID: "1eef8db6-6ab9-40d1-aefa-8b92e42693f3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:26:20 crc kubenswrapper[5050]: I1123 16:26:20.526458 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1eef8db6-6ab9-40d1-aefa-8b92e42693f3-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 16:26:20 crc kubenswrapper[5050]: I1123 16:26:20.538497 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1eef8db6-6ab9-40d1-aefa-8b92e42693f3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1eef8db6-6ab9-40d1-aefa-8b92e42693f3" (UID: "1eef8db6-6ab9-40d1-aefa-8b92e42693f3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:26:20 crc kubenswrapper[5050]: I1123 16:26:20.541784 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1eef8db6-6ab9-40d1-aefa-8b92e42693f3-kube-api-access-cgm7j" (OuterVolumeSpecName: "kube-api-access-cgm7j") pod "1eef8db6-6ab9-40d1-aefa-8b92e42693f3" (UID: "1eef8db6-6ab9-40d1-aefa-8b92e42693f3"). InnerVolumeSpecName "kube-api-access-cgm7j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:26:20 crc kubenswrapper[5050]: I1123 16:26:20.629322 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cgm7j\" (UniqueName: \"kubernetes.io/projected/1eef8db6-6ab9-40d1-aefa-8b92e42693f3-kube-api-access-cgm7j\") on node \"crc\" DevicePath \"\"" Nov 23 16:26:20 crc kubenswrapper[5050]: I1123 16:26:20.629496 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1eef8db6-6ab9-40d1-aefa-8b92e42693f3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 16:26:20 crc kubenswrapper[5050]: I1123 16:26:20.879369 5050 generic.go:334] "Generic (PLEG): container finished" podID="1eef8db6-6ab9-40d1-aefa-8b92e42693f3" containerID="dd616c15c1a7d698eaa65a76f9260d88ccb5cc0ca0db320e3250753102c39874" exitCode=0 Nov 23 16:26:20 crc kubenswrapper[5050]: I1123 16:26:20.879466 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dlwfc" Nov 23 16:26:20 crc kubenswrapper[5050]: I1123 16:26:20.879469 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dlwfc" event={"ID":"1eef8db6-6ab9-40d1-aefa-8b92e42693f3","Type":"ContainerDied","Data":"dd616c15c1a7d698eaa65a76f9260d88ccb5cc0ca0db320e3250753102c39874"} Nov 23 16:26:20 crc kubenswrapper[5050]: I1123 16:26:20.879928 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dlwfc" event={"ID":"1eef8db6-6ab9-40d1-aefa-8b92e42693f3","Type":"ContainerDied","Data":"4d0452906cc0b00017f58ebf220e57321f044c69b4a57dfcfa290b8ae000de59"} Nov 23 16:26:20 crc kubenswrapper[5050]: I1123 16:26:20.879957 5050 scope.go:117] "RemoveContainer" containerID="dd616c15c1a7d698eaa65a76f9260d88ccb5cc0ca0db320e3250753102c39874" Nov 23 16:26:20 crc kubenswrapper[5050]: I1123 16:26:20.931482 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dlwfc"] Nov 23 16:26:20 crc kubenswrapper[5050]: I1123 16:26:20.938606 5050 scope.go:117] "RemoveContainer" containerID="828027face524e81d832522aca98f90407da6b5af139785f9cddbb0d7379a595" Nov 23 16:26:20 crc kubenswrapper[5050]: I1123 16:26:20.940584 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dlwfc"] Nov 23 16:26:20 crc kubenswrapper[5050]: I1123 16:26:20.968662 5050 scope.go:117] "RemoveContainer" containerID="8125de89bfeadffd22aa7f0c60f98ebfe8cdb16c7aaaf32817ab74a3f19c2a3c" Nov 23 16:26:21 crc kubenswrapper[5050]: I1123 16:26:21.019718 5050 scope.go:117] "RemoveContainer" containerID="dd616c15c1a7d698eaa65a76f9260d88ccb5cc0ca0db320e3250753102c39874" Nov 23 16:26:21 crc kubenswrapper[5050]: E1123 16:26:21.020293 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd616c15c1a7d698eaa65a76f9260d88ccb5cc0ca0db320e3250753102c39874\": container with ID starting with dd616c15c1a7d698eaa65a76f9260d88ccb5cc0ca0db320e3250753102c39874 not found: ID does not exist" containerID="dd616c15c1a7d698eaa65a76f9260d88ccb5cc0ca0db320e3250753102c39874" Nov 23 16:26:21 crc kubenswrapper[5050]: I1123 16:26:21.020372 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd616c15c1a7d698eaa65a76f9260d88ccb5cc0ca0db320e3250753102c39874"} err="failed to get container status \"dd616c15c1a7d698eaa65a76f9260d88ccb5cc0ca0db320e3250753102c39874\": rpc error: code = NotFound desc = could not find container \"dd616c15c1a7d698eaa65a76f9260d88ccb5cc0ca0db320e3250753102c39874\": container with ID starting with dd616c15c1a7d698eaa65a76f9260d88ccb5cc0ca0db320e3250753102c39874 not found: ID does not exist" Nov 23 16:26:21 crc kubenswrapper[5050]: I1123 16:26:21.020406 5050 scope.go:117] "RemoveContainer" containerID="828027face524e81d832522aca98f90407da6b5af139785f9cddbb0d7379a595" Nov 23 16:26:21 crc kubenswrapper[5050]: E1123 16:26:21.021076 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"828027face524e81d832522aca98f90407da6b5af139785f9cddbb0d7379a595\": container with ID starting with 828027face524e81d832522aca98f90407da6b5af139785f9cddbb0d7379a595 not found: ID does not exist" containerID="828027face524e81d832522aca98f90407da6b5af139785f9cddbb0d7379a595" Nov 23 16:26:21 crc kubenswrapper[5050]: I1123 16:26:21.021120 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"828027face524e81d832522aca98f90407da6b5af139785f9cddbb0d7379a595"} err="failed to get container status \"828027face524e81d832522aca98f90407da6b5af139785f9cddbb0d7379a595\": rpc error: code = NotFound desc = could not find container \"828027face524e81d832522aca98f90407da6b5af139785f9cddbb0d7379a595\": container with ID starting with 828027face524e81d832522aca98f90407da6b5af139785f9cddbb0d7379a595 not found: ID does not exist" Nov 23 16:26:21 crc kubenswrapper[5050]: I1123 16:26:21.021148 5050 scope.go:117] "RemoveContainer" containerID="8125de89bfeadffd22aa7f0c60f98ebfe8cdb16c7aaaf32817ab74a3f19c2a3c" Nov 23 16:26:21 crc kubenswrapper[5050]: E1123 16:26:21.021478 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8125de89bfeadffd22aa7f0c60f98ebfe8cdb16c7aaaf32817ab74a3f19c2a3c\": container with ID starting with 8125de89bfeadffd22aa7f0c60f98ebfe8cdb16c7aaaf32817ab74a3f19c2a3c not found: ID does not exist" containerID="8125de89bfeadffd22aa7f0c60f98ebfe8cdb16c7aaaf32817ab74a3f19c2a3c" Nov 23 16:26:21 crc kubenswrapper[5050]: I1123 16:26:21.021520 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8125de89bfeadffd22aa7f0c60f98ebfe8cdb16c7aaaf32817ab74a3f19c2a3c"} err="failed to get container status \"8125de89bfeadffd22aa7f0c60f98ebfe8cdb16c7aaaf32817ab74a3f19c2a3c\": rpc error: code = NotFound desc = could not find container \"8125de89bfeadffd22aa7f0c60f98ebfe8cdb16c7aaaf32817ab74a3f19c2a3c\": container with ID starting with 8125de89bfeadffd22aa7f0c60f98ebfe8cdb16c7aaaf32817ab74a3f19c2a3c not found: ID does not exist" Nov 23 16:26:21 crc kubenswrapper[5050]: I1123 16:26:21.574055 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1eef8db6-6ab9-40d1-aefa-8b92e42693f3" path="/var/lib/kubelet/pods/1eef8db6-6ab9-40d1-aefa-8b92e42693f3/volumes" Nov 23 16:26:29 crc kubenswrapper[5050]: I1123 16:26:29.225016 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:26:29 crc kubenswrapper[5050]: I1123 16:26:29.226000 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:26:29 crc kubenswrapper[5050]: I1123 16:26:29.226060 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 16:26:29 crc kubenswrapper[5050]: I1123 16:26:29.227181 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 16:26:29 crc kubenswrapper[5050]: I1123 16:26:29.227248 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" gracePeriod=600 Nov 23 16:26:29 crc kubenswrapper[5050]: E1123 16:26:29.861533 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:26:30 crc kubenswrapper[5050]: I1123 16:26:30.011844 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" exitCode=0 Nov 23 16:26:30 crc kubenswrapper[5050]: I1123 16:26:30.011904 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486"} Nov 23 16:26:30 crc kubenswrapper[5050]: I1123 16:26:30.011952 5050 scope.go:117] "RemoveContainer" containerID="6c3e7a6f5a62c9666075227e68f8208f00db4be7096dab6e83261908dedc75a8" Nov 23 16:26:30 crc kubenswrapper[5050]: I1123 16:26:30.013401 5050 scope.go:117] "RemoveContainer" containerID="c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" Nov 23 16:26:30 crc kubenswrapper[5050]: E1123 16:26:30.014341 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:26:40 crc kubenswrapper[5050]: I1123 16:26:40.681922 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 23 16:26:41 crc kubenswrapper[5050]: I1123 16:26:41.553142 5050 scope.go:117] "RemoveContainer" containerID="c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" Nov 23 16:26:41 crc kubenswrapper[5050]: E1123 16:26:41.554308 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:26:52 crc kubenswrapper[5050]: I1123 16:26:52.549537 5050 scope.go:117] "RemoveContainer" containerID="c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" Nov 23 16:26:52 crc kubenswrapper[5050]: E1123 16:26:52.550404 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:27:01 crc kubenswrapper[5050]: I1123 16:27:01.871722 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5466f6b6b5-b8dfn"] Nov 23 16:27:01 crc kubenswrapper[5050]: E1123 16:27:01.872780 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1eef8db6-6ab9-40d1-aefa-8b92e42693f3" containerName="registry-server" Nov 23 16:27:01 crc kubenswrapper[5050]: I1123 16:27:01.872795 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="1eef8db6-6ab9-40d1-aefa-8b92e42693f3" containerName="registry-server" Nov 23 16:27:01 crc kubenswrapper[5050]: E1123 16:27:01.872822 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1eef8db6-6ab9-40d1-aefa-8b92e42693f3" containerName="extract-utilities" Nov 23 16:27:01 crc kubenswrapper[5050]: I1123 16:27:01.872829 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="1eef8db6-6ab9-40d1-aefa-8b92e42693f3" containerName="extract-utilities" Nov 23 16:27:01 crc kubenswrapper[5050]: E1123 16:27:01.872838 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1eef8db6-6ab9-40d1-aefa-8b92e42693f3" containerName="extract-content" Nov 23 16:27:01 crc kubenswrapper[5050]: I1123 16:27:01.872844 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="1eef8db6-6ab9-40d1-aefa-8b92e42693f3" containerName="extract-content" Nov 23 16:27:01 crc kubenswrapper[5050]: I1123 16:27:01.873069 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="1eef8db6-6ab9-40d1-aefa-8b92e42693f3" containerName="registry-server" Nov 23 16:27:01 crc kubenswrapper[5050]: I1123 16:27:01.874266 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" Nov 23 16:27:01 crc kubenswrapper[5050]: I1123 16:27:01.877483 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1" Nov 23 16:27:01 crc kubenswrapper[5050]: I1123 16:27:01.890928 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5466f6b6b5-b8dfn"] Nov 23 16:27:01 crc kubenswrapper[5050]: I1123 16:27:01.907579 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-openstack-cell1\") pod \"dnsmasq-dns-5466f6b6b5-b8dfn\" (UID: \"bda20f95-955e-4158-addd-e65de95f42c7\") " pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" Nov 23 16:27:01 crc kubenswrapper[5050]: I1123 16:27:01.907995 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sb6x2\" (UniqueName: \"kubernetes.io/projected/bda20f95-955e-4158-addd-e65de95f42c7-kube-api-access-sb6x2\") pod \"dnsmasq-dns-5466f6b6b5-b8dfn\" (UID: \"bda20f95-955e-4158-addd-e65de95f42c7\") " pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" Nov 23 16:27:01 crc kubenswrapper[5050]: I1123 16:27:01.908071 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-ovsdbserver-nb\") pod \"dnsmasq-dns-5466f6b6b5-b8dfn\" (UID: \"bda20f95-955e-4158-addd-e65de95f42c7\") " pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" Nov 23 16:27:01 crc kubenswrapper[5050]: I1123 16:27:01.908100 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-dns-svc\") pod \"dnsmasq-dns-5466f6b6b5-b8dfn\" (UID: \"bda20f95-955e-4158-addd-e65de95f42c7\") " pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" Nov 23 16:27:01 crc kubenswrapper[5050]: I1123 16:27:01.908207 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-config\") pod \"dnsmasq-dns-5466f6b6b5-b8dfn\" (UID: \"bda20f95-955e-4158-addd-e65de95f42c7\") " pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" Nov 23 16:27:01 crc kubenswrapper[5050]: I1123 16:27:01.908241 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-ovsdbserver-sb\") pod \"dnsmasq-dns-5466f6b6b5-b8dfn\" (UID: \"bda20f95-955e-4158-addd-e65de95f42c7\") " pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" Nov 23 16:27:02 crc kubenswrapper[5050]: I1123 16:27:02.011330 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-config\") pod \"dnsmasq-dns-5466f6b6b5-b8dfn\" (UID: \"bda20f95-955e-4158-addd-e65de95f42c7\") " pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" Nov 23 16:27:02 crc kubenswrapper[5050]: I1123 16:27:02.011465 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-ovsdbserver-sb\") pod \"dnsmasq-dns-5466f6b6b5-b8dfn\" (UID: \"bda20f95-955e-4158-addd-e65de95f42c7\") " pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" Nov 23 16:27:02 crc kubenswrapper[5050]: I1123 16:27:02.011595 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-openstack-cell1\") pod \"dnsmasq-dns-5466f6b6b5-b8dfn\" (UID: \"bda20f95-955e-4158-addd-e65de95f42c7\") " pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" Nov 23 16:27:02 crc kubenswrapper[5050]: I1123 16:27:02.011658 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sb6x2\" (UniqueName: \"kubernetes.io/projected/bda20f95-955e-4158-addd-e65de95f42c7-kube-api-access-sb6x2\") pod \"dnsmasq-dns-5466f6b6b5-b8dfn\" (UID: \"bda20f95-955e-4158-addd-e65de95f42c7\") " pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" Nov 23 16:27:02 crc kubenswrapper[5050]: I1123 16:27:02.011807 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-ovsdbserver-nb\") pod \"dnsmasq-dns-5466f6b6b5-b8dfn\" (UID: \"bda20f95-955e-4158-addd-e65de95f42c7\") " pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" Nov 23 16:27:02 crc kubenswrapper[5050]: I1123 16:27:02.011865 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-dns-svc\") pod \"dnsmasq-dns-5466f6b6b5-b8dfn\" (UID: \"bda20f95-955e-4158-addd-e65de95f42c7\") " pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" Nov 23 16:27:02 crc kubenswrapper[5050]: I1123 16:27:02.012731 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-config\") pod \"dnsmasq-dns-5466f6b6b5-b8dfn\" (UID: \"bda20f95-955e-4158-addd-e65de95f42c7\") " pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" Nov 23 16:27:02 crc kubenswrapper[5050]: I1123 16:27:02.012829 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-ovsdbserver-sb\") pod \"dnsmasq-dns-5466f6b6b5-b8dfn\" (UID: \"bda20f95-955e-4158-addd-e65de95f42c7\") " pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" Nov 23 16:27:02 crc kubenswrapper[5050]: I1123 16:27:02.013418 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-openstack-cell1\") pod \"dnsmasq-dns-5466f6b6b5-b8dfn\" (UID: \"bda20f95-955e-4158-addd-e65de95f42c7\") " pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" Nov 23 16:27:02 crc kubenswrapper[5050]: I1123 16:27:02.013660 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-ovsdbserver-nb\") pod \"dnsmasq-dns-5466f6b6b5-b8dfn\" (UID: \"bda20f95-955e-4158-addd-e65de95f42c7\") " pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" Nov 23 16:27:02 crc kubenswrapper[5050]: I1123 16:27:02.013718 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-dns-svc\") pod \"dnsmasq-dns-5466f6b6b5-b8dfn\" (UID: \"bda20f95-955e-4158-addd-e65de95f42c7\") " pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" Nov 23 16:27:02 crc kubenswrapper[5050]: I1123 16:27:02.046815 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sb6x2\" (UniqueName: \"kubernetes.io/projected/bda20f95-955e-4158-addd-e65de95f42c7-kube-api-access-sb6x2\") pod \"dnsmasq-dns-5466f6b6b5-b8dfn\" (UID: \"bda20f95-955e-4158-addd-e65de95f42c7\") " pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" Nov 23 16:27:02 crc kubenswrapper[5050]: I1123 16:27:02.204220 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" Nov 23 16:27:02 crc kubenswrapper[5050]: I1123 16:27:02.720053 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5466f6b6b5-b8dfn"] Nov 23 16:27:03 crc kubenswrapper[5050]: I1123 16:27:03.404727 5050 generic.go:334] "Generic (PLEG): container finished" podID="bda20f95-955e-4158-addd-e65de95f42c7" containerID="7f84b9e7a7844dbf3be3de8040062250f3bd1f5cdb696bd4cdd5bcd82d5a26da" exitCode=0 Nov 23 16:27:03 crc kubenswrapper[5050]: I1123 16:27:03.404800 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" event={"ID":"bda20f95-955e-4158-addd-e65de95f42c7","Type":"ContainerDied","Data":"7f84b9e7a7844dbf3be3de8040062250f3bd1f5cdb696bd4cdd5bcd82d5a26da"} Nov 23 16:27:03 crc kubenswrapper[5050]: I1123 16:27:03.406664 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" event={"ID":"bda20f95-955e-4158-addd-e65de95f42c7","Type":"ContainerStarted","Data":"a6281af4474a537aabe5d8271d590e9128c579191b6137e03adb1821fc33f852"} Nov 23 16:27:04 crc kubenswrapper[5050]: I1123 16:27:04.427113 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" event={"ID":"bda20f95-955e-4158-addd-e65de95f42c7","Type":"ContainerStarted","Data":"0ef2eca042512bb2380c6be3c24f903987db6b2e419152d60cc9d08c860fc1a5"} Nov 23 16:27:04 crc kubenswrapper[5050]: I1123 16:27:04.427638 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" Nov 23 16:27:04 crc kubenswrapper[5050]: I1123 16:27:04.450928 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" podStartSLOduration=3.45090494 podStartE2EDuration="3.45090494s" podCreationTimestamp="2025-11-23 16:27:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:27:04.445065226 +0000 UTC m=+6319.612061711" watchObservedRunningTime="2025-11-23 16:27:04.45090494 +0000 UTC m=+6319.617901415" Nov 23 16:27:06 crc kubenswrapper[5050]: I1123 16:27:06.553003 5050 scope.go:117] "RemoveContainer" containerID="c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" Nov 23 16:27:06 crc kubenswrapper[5050]: E1123 16:27:06.555231 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:27:12 crc kubenswrapper[5050]: I1123 16:27:12.205689 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" Nov 23 16:27:12 crc kubenswrapper[5050]: I1123 16:27:12.323477 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84fbdc6f57-fwcg7"] Nov 23 16:27:12 crc kubenswrapper[5050]: I1123 16:27:12.330211 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" podUID="e4be80a9-30d1-47ad-9dff-6aa826295fdc" containerName="dnsmasq-dns" containerID="cri-o://f42ada5c5ac1f32abf97d461a1bade5e4dd703559b214fd3ecfbd1dd589ddee3" gracePeriod=10 Nov 23 16:27:12 crc kubenswrapper[5050]: I1123 16:27:12.542232 5050 generic.go:334] "Generic (PLEG): container finished" podID="e4be80a9-30d1-47ad-9dff-6aa826295fdc" containerID="f42ada5c5ac1f32abf97d461a1bade5e4dd703559b214fd3ecfbd1dd589ddee3" exitCode=0 Nov 23 16:27:12 crc kubenswrapper[5050]: I1123 16:27:12.542286 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" event={"ID":"e4be80a9-30d1-47ad-9dff-6aa826295fdc","Type":"ContainerDied","Data":"f42ada5c5ac1f32abf97d461a1bade5e4dd703559b214fd3ecfbd1dd589ddee3"} Nov 23 16:27:12 crc kubenswrapper[5050]: E1123 16:27:12.553300 5050 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode4be80a9_30d1_47ad_9dff_6aa826295fdc.slice/crio-f42ada5c5ac1f32abf97d461a1bade5e4dd703559b214fd3ecfbd1dd589ddee3.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode4be80a9_30d1_47ad_9dff_6aa826295fdc.slice/crio-conmon-f42ada5c5ac1f32abf97d461a1bade5e4dd703559b214fd3ecfbd1dd589ddee3.scope\": RecentStats: unable to find data in memory cache]" Nov 23 16:27:12 crc kubenswrapper[5050]: I1123 16:27:12.586639 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-848d65fcdc-kdfrp"] Nov 23 16:27:12 crc kubenswrapper[5050]: I1123 16:27:12.589241 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848d65fcdc-kdfrp" Nov 23 16:27:12 crc kubenswrapper[5050]: I1123 16:27:12.668662 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-848d65fcdc-kdfrp"] Nov 23 16:27:12 crc kubenswrapper[5050]: I1123 16:27:12.749996 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72f188fc-a3d6-4b46-81d0-9f1ed3985d10-config\") pod \"dnsmasq-dns-848d65fcdc-kdfrp\" (UID: \"72f188fc-a3d6-4b46-81d0-9f1ed3985d10\") " pod="openstack/dnsmasq-dns-848d65fcdc-kdfrp" Nov 23 16:27:12 crc kubenswrapper[5050]: I1123 16:27:12.750120 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/72f188fc-a3d6-4b46-81d0-9f1ed3985d10-openstack-cell1\") pod \"dnsmasq-dns-848d65fcdc-kdfrp\" (UID: \"72f188fc-a3d6-4b46-81d0-9f1ed3985d10\") " pod="openstack/dnsmasq-dns-848d65fcdc-kdfrp" Nov 23 16:27:12 crc kubenswrapper[5050]: I1123 16:27:12.750180 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/72f188fc-a3d6-4b46-81d0-9f1ed3985d10-ovsdbserver-sb\") pod \"dnsmasq-dns-848d65fcdc-kdfrp\" (UID: \"72f188fc-a3d6-4b46-81d0-9f1ed3985d10\") " pod="openstack/dnsmasq-dns-848d65fcdc-kdfrp" Nov 23 16:27:12 crc kubenswrapper[5050]: I1123 16:27:12.750202 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8ccn\" (UniqueName: \"kubernetes.io/projected/72f188fc-a3d6-4b46-81d0-9f1ed3985d10-kube-api-access-p8ccn\") pod \"dnsmasq-dns-848d65fcdc-kdfrp\" (UID: \"72f188fc-a3d6-4b46-81d0-9f1ed3985d10\") " pod="openstack/dnsmasq-dns-848d65fcdc-kdfrp" Nov 23 16:27:12 crc kubenswrapper[5050]: I1123 16:27:12.750246 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/72f188fc-a3d6-4b46-81d0-9f1ed3985d10-dns-svc\") pod \"dnsmasq-dns-848d65fcdc-kdfrp\" (UID: \"72f188fc-a3d6-4b46-81d0-9f1ed3985d10\") " pod="openstack/dnsmasq-dns-848d65fcdc-kdfrp" Nov 23 16:27:12 crc kubenswrapper[5050]: I1123 16:27:12.750323 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/72f188fc-a3d6-4b46-81d0-9f1ed3985d10-ovsdbserver-nb\") pod \"dnsmasq-dns-848d65fcdc-kdfrp\" (UID: \"72f188fc-a3d6-4b46-81d0-9f1ed3985d10\") " pod="openstack/dnsmasq-dns-848d65fcdc-kdfrp" Nov 23 16:27:12 crc kubenswrapper[5050]: I1123 16:27:12.852929 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72f188fc-a3d6-4b46-81d0-9f1ed3985d10-config\") pod \"dnsmasq-dns-848d65fcdc-kdfrp\" (UID: \"72f188fc-a3d6-4b46-81d0-9f1ed3985d10\") " pod="openstack/dnsmasq-dns-848d65fcdc-kdfrp" Nov 23 16:27:12 crc kubenswrapper[5050]: I1123 16:27:12.853001 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/72f188fc-a3d6-4b46-81d0-9f1ed3985d10-openstack-cell1\") pod \"dnsmasq-dns-848d65fcdc-kdfrp\" (UID: \"72f188fc-a3d6-4b46-81d0-9f1ed3985d10\") " pod="openstack/dnsmasq-dns-848d65fcdc-kdfrp" Nov 23 16:27:12 crc kubenswrapper[5050]: I1123 16:27:12.853032 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/72f188fc-a3d6-4b46-81d0-9f1ed3985d10-ovsdbserver-sb\") pod \"dnsmasq-dns-848d65fcdc-kdfrp\" (UID: \"72f188fc-a3d6-4b46-81d0-9f1ed3985d10\") " pod="openstack/dnsmasq-dns-848d65fcdc-kdfrp" Nov 23 16:27:12 crc kubenswrapper[5050]: I1123 16:27:12.853056 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8ccn\" (UniqueName: \"kubernetes.io/projected/72f188fc-a3d6-4b46-81d0-9f1ed3985d10-kube-api-access-p8ccn\") pod \"dnsmasq-dns-848d65fcdc-kdfrp\" (UID: \"72f188fc-a3d6-4b46-81d0-9f1ed3985d10\") " pod="openstack/dnsmasq-dns-848d65fcdc-kdfrp" Nov 23 16:27:12 crc kubenswrapper[5050]: I1123 16:27:12.853084 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/72f188fc-a3d6-4b46-81d0-9f1ed3985d10-dns-svc\") pod \"dnsmasq-dns-848d65fcdc-kdfrp\" (UID: \"72f188fc-a3d6-4b46-81d0-9f1ed3985d10\") " pod="openstack/dnsmasq-dns-848d65fcdc-kdfrp" Nov 23 16:27:12 crc kubenswrapper[5050]: I1123 16:27:12.853125 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/72f188fc-a3d6-4b46-81d0-9f1ed3985d10-ovsdbserver-nb\") pod \"dnsmasq-dns-848d65fcdc-kdfrp\" (UID: \"72f188fc-a3d6-4b46-81d0-9f1ed3985d10\") " pod="openstack/dnsmasq-dns-848d65fcdc-kdfrp" Nov 23 16:27:12 crc kubenswrapper[5050]: I1123 16:27:12.855364 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/72f188fc-a3d6-4b46-81d0-9f1ed3985d10-ovsdbserver-sb\") pod \"dnsmasq-dns-848d65fcdc-kdfrp\" (UID: \"72f188fc-a3d6-4b46-81d0-9f1ed3985d10\") " pod="openstack/dnsmasq-dns-848d65fcdc-kdfrp" Nov 23 16:27:12 crc kubenswrapper[5050]: I1123 16:27:12.856796 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72f188fc-a3d6-4b46-81d0-9f1ed3985d10-config\") pod \"dnsmasq-dns-848d65fcdc-kdfrp\" (UID: \"72f188fc-a3d6-4b46-81d0-9f1ed3985d10\") " pod="openstack/dnsmasq-dns-848d65fcdc-kdfrp" Nov 23 16:27:12 crc kubenswrapper[5050]: I1123 16:27:12.857655 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/72f188fc-a3d6-4b46-81d0-9f1ed3985d10-openstack-cell1\") pod \"dnsmasq-dns-848d65fcdc-kdfrp\" (UID: \"72f188fc-a3d6-4b46-81d0-9f1ed3985d10\") " pod="openstack/dnsmasq-dns-848d65fcdc-kdfrp" Nov 23 16:27:12 crc kubenswrapper[5050]: I1123 16:27:12.858374 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/72f188fc-a3d6-4b46-81d0-9f1ed3985d10-dns-svc\") pod \"dnsmasq-dns-848d65fcdc-kdfrp\" (UID: \"72f188fc-a3d6-4b46-81d0-9f1ed3985d10\") " pod="openstack/dnsmasq-dns-848d65fcdc-kdfrp" Nov 23 16:27:12 crc kubenswrapper[5050]: I1123 16:27:12.860544 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/72f188fc-a3d6-4b46-81d0-9f1ed3985d10-ovsdbserver-nb\") pod \"dnsmasq-dns-848d65fcdc-kdfrp\" (UID: \"72f188fc-a3d6-4b46-81d0-9f1ed3985d10\") " pod="openstack/dnsmasq-dns-848d65fcdc-kdfrp" Nov 23 16:27:12 crc kubenswrapper[5050]: I1123 16:27:12.882199 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8ccn\" (UniqueName: \"kubernetes.io/projected/72f188fc-a3d6-4b46-81d0-9f1ed3985d10-kube-api-access-p8ccn\") pod \"dnsmasq-dns-848d65fcdc-kdfrp\" (UID: \"72f188fc-a3d6-4b46-81d0-9f1ed3985d10\") " pod="openstack/dnsmasq-dns-848d65fcdc-kdfrp" Nov 23 16:27:12 crc kubenswrapper[5050]: I1123 16:27:12.953528 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848d65fcdc-kdfrp" Nov 23 16:27:13 crc kubenswrapper[5050]: I1123 16:27:13.101026 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" Nov 23 16:27:13 crc kubenswrapper[5050]: I1123 16:27:13.159351 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ckxh5\" (UniqueName: \"kubernetes.io/projected/e4be80a9-30d1-47ad-9dff-6aa826295fdc-kube-api-access-ckxh5\") pod \"e4be80a9-30d1-47ad-9dff-6aa826295fdc\" (UID: \"e4be80a9-30d1-47ad-9dff-6aa826295fdc\") " Nov 23 16:27:13 crc kubenswrapper[5050]: I1123 16:27:13.161334 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e4be80a9-30d1-47ad-9dff-6aa826295fdc-ovsdbserver-nb\") pod \"e4be80a9-30d1-47ad-9dff-6aa826295fdc\" (UID: \"e4be80a9-30d1-47ad-9dff-6aa826295fdc\") " Nov 23 16:27:13 crc kubenswrapper[5050]: I1123 16:27:13.162101 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4be80a9-30d1-47ad-9dff-6aa826295fdc-config\") pod \"e4be80a9-30d1-47ad-9dff-6aa826295fdc\" (UID: \"e4be80a9-30d1-47ad-9dff-6aa826295fdc\") " Nov 23 16:27:13 crc kubenswrapper[5050]: I1123 16:27:13.162159 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e4be80a9-30d1-47ad-9dff-6aa826295fdc-dns-svc\") pod \"e4be80a9-30d1-47ad-9dff-6aa826295fdc\" (UID: \"e4be80a9-30d1-47ad-9dff-6aa826295fdc\") " Nov 23 16:27:13 crc kubenswrapper[5050]: I1123 16:27:13.162244 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e4be80a9-30d1-47ad-9dff-6aa826295fdc-ovsdbserver-sb\") pod \"e4be80a9-30d1-47ad-9dff-6aa826295fdc\" (UID: \"e4be80a9-30d1-47ad-9dff-6aa826295fdc\") " Nov 23 16:27:13 crc kubenswrapper[5050]: I1123 16:27:13.176906 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4be80a9-30d1-47ad-9dff-6aa826295fdc-kube-api-access-ckxh5" (OuterVolumeSpecName: "kube-api-access-ckxh5") pod "e4be80a9-30d1-47ad-9dff-6aa826295fdc" (UID: "e4be80a9-30d1-47ad-9dff-6aa826295fdc"). InnerVolumeSpecName "kube-api-access-ckxh5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:27:13 crc kubenswrapper[5050]: I1123 16:27:13.224706 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4be80a9-30d1-47ad-9dff-6aa826295fdc-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e4be80a9-30d1-47ad-9dff-6aa826295fdc" (UID: "e4be80a9-30d1-47ad-9dff-6aa826295fdc"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:27:13 crc kubenswrapper[5050]: I1123 16:27:13.228945 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4be80a9-30d1-47ad-9dff-6aa826295fdc-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e4be80a9-30d1-47ad-9dff-6aa826295fdc" (UID: "e4be80a9-30d1-47ad-9dff-6aa826295fdc"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:27:13 crc kubenswrapper[5050]: I1123 16:27:13.243202 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4be80a9-30d1-47ad-9dff-6aa826295fdc-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e4be80a9-30d1-47ad-9dff-6aa826295fdc" (UID: "e4be80a9-30d1-47ad-9dff-6aa826295fdc"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:27:13 crc kubenswrapper[5050]: I1123 16:27:13.271876 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4be80a9-30d1-47ad-9dff-6aa826295fdc-config" (OuterVolumeSpecName: "config") pod "e4be80a9-30d1-47ad-9dff-6aa826295fdc" (UID: "e4be80a9-30d1-47ad-9dff-6aa826295fdc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:27:13 crc kubenswrapper[5050]: I1123 16:27:13.275291 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e4be80a9-30d1-47ad-9dff-6aa826295fdc-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 23 16:27:13 crc kubenswrapper[5050]: I1123 16:27:13.275325 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4be80a9-30d1-47ad-9dff-6aa826295fdc-config\") on node \"crc\" DevicePath \"\"" Nov 23 16:27:13 crc kubenswrapper[5050]: I1123 16:27:13.275338 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e4be80a9-30d1-47ad-9dff-6aa826295fdc-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 16:27:13 crc kubenswrapper[5050]: I1123 16:27:13.275349 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e4be80a9-30d1-47ad-9dff-6aa826295fdc-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 23 16:27:13 crc kubenswrapper[5050]: I1123 16:27:13.275362 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ckxh5\" (UniqueName: \"kubernetes.io/projected/e4be80a9-30d1-47ad-9dff-6aa826295fdc-kube-api-access-ckxh5\") on node \"crc\" DevicePath \"\"" Nov 23 16:27:13 crc kubenswrapper[5050]: I1123 16:27:13.555685 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" Nov 23 16:27:13 crc kubenswrapper[5050]: I1123 16:27:13.565520 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84fbdc6f57-fwcg7" event={"ID":"e4be80a9-30d1-47ad-9dff-6aa826295fdc","Type":"ContainerDied","Data":"194a74c832e7a84a20e8bc074aee85f2fb48731e10b114314d0ea53bba3356a8"} Nov 23 16:27:13 crc kubenswrapper[5050]: I1123 16:27:13.565596 5050 scope.go:117] "RemoveContainer" containerID="f42ada5c5ac1f32abf97d461a1bade5e4dd703559b214fd3ecfbd1dd589ddee3" Nov 23 16:27:13 crc kubenswrapper[5050]: I1123 16:27:13.605724 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84fbdc6f57-fwcg7"] Nov 23 16:27:13 crc kubenswrapper[5050]: I1123 16:27:13.611694 5050 scope.go:117] "RemoveContainer" containerID="406e2bb5ddb5b42f86cfe26b66a31f0a4fa605fa008ae956539b492e7265db6f" Nov 23 16:27:13 crc kubenswrapper[5050]: I1123 16:27:13.624312 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-84fbdc6f57-fwcg7"] Nov 23 16:27:13 crc kubenswrapper[5050]: I1123 16:27:13.638623 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-848d65fcdc-kdfrp"] Nov 23 16:27:14 crc kubenswrapper[5050]: I1123 16:27:14.574685 5050 generic.go:334] "Generic (PLEG): container finished" podID="72f188fc-a3d6-4b46-81d0-9f1ed3985d10" containerID="0133d1da1328bf987f1f9f921a34fc3383fa7110a7e1711b264a214366476963" exitCode=0 Nov 23 16:27:14 crc kubenswrapper[5050]: I1123 16:27:14.574790 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848d65fcdc-kdfrp" event={"ID":"72f188fc-a3d6-4b46-81d0-9f1ed3985d10","Type":"ContainerDied","Data":"0133d1da1328bf987f1f9f921a34fc3383fa7110a7e1711b264a214366476963"} Nov 23 16:27:14 crc kubenswrapper[5050]: I1123 16:27:14.576940 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848d65fcdc-kdfrp" event={"ID":"72f188fc-a3d6-4b46-81d0-9f1ed3985d10","Type":"ContainerStarted","Data":"41c414253c401f2e30650ec2d5765729c157a47c26063bdfa3915acb8326d800"} Nov 23 16:27:15 crc kubenswrapper[5050]: I1123 16:27:15.563970 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4be80a9-30d1-47ad-9dff-6aa826295fdc" path="/var/lib/kubelet/pods/e4be80a9-30d1-47ad-9dff-6aa826295fdc/volumes" Nov 23 16:27:15 crc kubenswrapper[5050]: I1123 16:27:15.594074 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848d65fcdc-kdfrp" event={"ID":"72f188fc-a3d6-4b46-81d0-9f1ed3985d10","Type":"ContainerStarted","Data":"5f9790d1a97366632d7c495e1f85c5b1f4b1c16824a442bf4285d8255a3c5a06"} Nov 23 16:27:15 crc kubenswrapper[5050]: I1123 16:27:15.594821 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-848d65fcdc-kdfrp" Nov 23 16:27:15 crc kubenswrapper[5050]: I1123 16:27:15.628707 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-848d65fcdc-kdfrp" podStartSLOduration=3.628675667 podStartE2EDuration="3.628675667s" podCreationTimestamp="2025-11-23 16:27:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 16:27:15.615542187 +0000 UTC m=+6330.782538702" watchObservedRunningTime="2025-11-23 16:27:15.628675667 +0000 UTC m=+6330.795672192" Nov 23 16:27:21 crc kubenswrapper[5050]: I1123 16:27:21.549737 5050 scope.go:117] "RemoveContainer" containerID="c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" Nov 23 16:27:21 crc kubenswrapper[5050]: E1123 16:27:21.550590 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:27:22 crc kubenswrapper[5050]: I1123 16:27:22.955953 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-848d65fcdc-kdfrp" Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.031563 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5466f6b6b5-b8dfn"] Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.035482 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" podUID="bda20f95-955e-4158-addd-e65de95f42c7" containerName="dnsmasq-dns" containerID="cri-o://0ef2eca042512bb2380c6be3c24f903987db6b2e419152d60cc9d08c860fc1a5" gracePeriod=10 Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.652277 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.708655 5050 generic.go:334] "Generic (PLEG): container finished" podID="bda20f95-955e-4158-addd-e65de95f42c7" containerID="0ef2eca042512bb2380c6be3c24f903987db6b2e419152d60cc9d08c860fc1a5" exitCode=0 Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.708722 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" event={"ID":"bda20f95-955e-4158-addd-e65de95f42c7","Type":"ContainerDied","Data":"0ef2eca042512bb2380c6be3c24f903987db6b2e419152d60cc9d08c860fc1a5"} Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.708763 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" event={"ID":"bda20f95-955e-4158-addd-e65de95f42c7","Type":"ContainerDied","Data":"a6281af4474a537aabe5d8271d590e9128c579191b6137e03adb1821fc33f852"} Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.708785 5050 scope.go:117] "RemoveContainer" containerID="0ef2eca042512bb2380c6be3c24f903987db6b2e419152d60cc9d08c860fc1a5" Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.709039 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5466f6b6b5-b8dfn" Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.738025 5050 scope.go:117] "RemoveContainer" containerID="7f84b9e7a7844dbf3be3de8040062250f3bd1f5cdb696bd4cdd5bcd82d5a26da" Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.763825 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-openstack-cell1\") pod \"bda20f95-955e-4158-addd-e65de95f42c7\" (UID: \"bda20f95-955e-4158-addd-e65de95f42c7\") " Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.763894 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-ovsdbserver-sb\") pod \"bda20f95-955e-4158-addd-e65de95f42c7\" (UID: \"bda20f95-955e-4158-addd-e65de95f42c7\") " Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.763982 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6x2\" (UniqueName: \"kubernetes.io/projected/bda20f95-955e-4158-addd-e65de95f42c7-kube-api-access-sb6x2\") pod \"bda20f95-955e-4158-addd-e65de95f42c7\" (UID: \"bda20f95-955e-4158-addd-e65de95f42c7\") " Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.764140 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-dns-svc\") pod \"bda20f95-955e-4158-addd-e65de95f42c7\" (UID: \"bda20f95-955e-4158-addd-e65de95f42c7\") " Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.764190 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-ovsdbserver-nb\") pod \"bda20f95-955e-4158-addd-e65de95f42c7\" (UID: \"bda20f95-955e-4158-addd-e65de95f42c7\") " Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.764576 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-config\") pod \"bda20f95-955e-4158-addd-e65de95f42c7\" (UID: \"bda20f95-955e-4158-addd-e65de95f42c7\") " Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.771954 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bda20f95-955e-4158-addd-e65de95f42c7-kube-api-access-sb6x2" (OuterVolumeSpecName: "kube-api-access-sb6x2") pod "bda20f95-955e-4158-addd-e65de95f42c7" (UID: "bda20f95-955e-4158-addd-e65de95f42c7"). InnerVolumeSpecName "kube-api-access-sb6x2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.787906 5050 scope.go:117] "RemoveContainer" containerID="0ef2eca042512bb2380c6be3c24f903987db6b2e419152d60cc9d08c860fc1a5" Nov 23 16:27:23 crc kubenswrapper[5050]: E1123 16:27:23.797125 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ef2eca042512bb2380c6be3c24f903987db6b2e419152d60cc9d08c860fc1a5\": container with ID starting with 0ef2eca042512bb2380c6be3c24f903987db6b2e419152d60cc9d08c860fc1a5 not found: ID does not exist" containerID="0ef2eca042512bb2380c6be3c24f903987db6b2e419152d60cc9d08c860fc1a5" Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.797201 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ef2eca042512bb2380c6be3c24f903987db6b2e419152d60cc9d08c860fc1a5"} err="failed to get container status \"0ef2eca042512bb2380c6be3c24f903987db6b2e419152d60cc9d08c860fc1a5\": rpc error: code = NotFound desc = could not find container \"0ef2eca042512bb2380c6be3c24f903987db6b2e419152d60cc9d08c860fc1a5\": container with ID starting with 0ef2eca042512bb2380c6be3c24f903987db6b2e419152d60cc9d08c860fc1a5 not found: ID does not exist" Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.797243 5050 scope.go:117] "RemoveContainer" containerID="7f84b9e7a7844dbf3be3de8040062250f3bd1f5cdb696bd4cdd5bcd82d5a26da" Nov 23 16:27:23 crc kubenswrapper[5050]: E1123 16:27:23.809652 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f84b9e7a7844dbf3be3de8040062250f3bd1f5cdb696bd4cdd5bcd82d5a26da\": container with ID starting with 7f84b9e7a7844dbf3be3de8040062250f3bd1f5cdb696bd4cdd5bcd82d5a26da not found: ID does not exist" containerID="7f84b9e7a7844dbf3be3de8040062250f3bd1f5cdb696bd4cdd5bcd82d5a26da" Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.809716 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f84b9e7a7844dbf3be3de8040062250f3bd1f5cdb696bd4cdd5bcd82d5a26da"} err="failed to get container status \"7f84b9e7a7844dbf3be3de8040062250f3bd1f5cdb696bd4cdd5bcd82d5a26da\": rpc error: code = NotFound desc = could not find container \"7f84b9e7a7844dbf3be3de8040062250f3bd1f5cdb696bd4cdd5bcd82d5a26da\": container with ID starting with 7f84b9e7a7844dbf3be3de8040062250f3bd1f5cdb696bd4cdd5bcd82d5a26da not found: ID does not exist" Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.836160 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "bda20f95-955e-4158-addd-e65de95f42c7" (UID: "bda20f95-955e-4158-addd-e65de95f42c7"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.836581 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-openstack-cell1" (OuterVolumeSpecName: "openstack-cell1") pod "bda20f95-955e-4158-addd-e65de95f42c7" (UID: "bda20f95-955e-4158-addd-e65de95f42c7"). InnerVolumeSpecName "openstack-cell1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.845353 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-config" (OuterVolumeSpecName: "config") pod "bda20f95-955e-4158-addd-e65de95f42c7" (UID: "bda20f95-955e-4158-addd-e65de95f42c7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.848988 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bda20f95-955e-4158-addd-e65de95f42c7" (UID: "bda20f95-955e-4158-addd-e65de95f42c7"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.851471 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "bda20f95-955e-4158-addd-e65de95f42c7" (UID: "bda20f95-955e-4158-addd-e65de95f42c7"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.868758 5050 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-config\") on node \"crc\" DevicePath \"\"" Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.868799 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.868816 5050 reconciler_common.go:293] "Volume detached for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-openstack-cell1\") on node \"crc\" DevicePath \"\"" Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.868833 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6x2\" (UniqueName: \"kubernetes.io/projected/bda20f95-955e-4158-addd-e65de95f42c7-kube-api-access-sb6x2\") on node \"crc\" DevicePath \"\"" Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.868844 5050 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 23 16:27:23 crc kubenswrapper[5050]: I1123 16:27:23.868856 5050 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bda20f95-955e-4158-addd-e65de95f42c7-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 23 16:27:24 crc kubenswrapper[5050]: I1123 16:27:24.051588 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5466f6b6b5-b8dfn"] Nov 23 16:27:24 crc kubenswrapper[5050]: I1123 16:27:24.061706 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5466f6b6b5-b8dfn"] Nov 23 16:27:25 crc kubenswrapper[5050]: I1123 16:27:25.567635 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bda20f95-955e-4158-addd-e65de95f42c7" path="/var/lib/kubelet/pods/bda20f95-955e-4158-addd-e65de95f42c7/volumes" Nov 23 16:27:33 crc kubenswrapper[5050]: I1123 16:27:33.549966 5050 scope.go:117] "RemoveContainer" containerID="c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" Nov 23 16:27:33 crc kubenswrapper[5050]: E1123 16:27:33.551779 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.135311 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cf767s"] Nov 23 16:27:34 crc kubenswrapper[5050]: E1123 16:27:34.136284 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4be80a9-30d1-47ad-9dff-6aa826295fdc" containerName="init" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.136305 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4be80a9-30d1-47ad-9dff-6aa826295fdc" containerName="init" Nov 23 16:27:34 crc kubenswrapper[5050]: E1123 16:27:34.136365 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4be80a9-30d1-47ad-9dff-6aa826295fdc" containerName="dnsmasq-dns" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.136373 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4be80a9-30d1-47ad-9dff-6aa826295fdc" containerName="dnsmasq-dns" Nov 23 16:27:34 crc kubenswrapper[5050]: E1123 16:27:34.136387 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bda20f95-955e-4158-addd-e65de95f42c7" containerName="init" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.136397 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="bda20f95-955e-4158-addd-e65de95f42c7" containerName="init" Nov 23 16:27:34 crc kubenswrapper[5050]: E1123 16:27:34.136417 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bda20f95-955e-4158-addd-e65de95f42c7" containerName="dnsmasq-dns" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.136425 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="bda20f95-955e-4158-addd-e65de95f42c7" containerName="dnsmasq-dns" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.136683 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4be80a9-30d1-47ad-9dff-6aa826295fdc" containerName="dnsmasq-dns" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.136705 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="bda20f95-955e-4158-addd-e65de95f42c7" containerName="dnsmasq-dns" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.137772 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cf767s" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.142738 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.142863 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.143008 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.143374 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-ckrpf" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.173498 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cf767s"] Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.249723 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4749617-bd73-4ca8-9008-124343133ee9-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cf767s\" (UID: \"a4749617-bd73-4ca8-9008-124343133ee9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cf767s" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.249776 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a4749617-bd73-4ca8-9008-124343133ee9-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cf767s\" (UID: \"a4749617-bd73-4ca8-9008-124343133ee9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cf767s" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.249867 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a4749617-bd73-4ca8-9008-124343133ee9-ceph\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cf767s\" (UID: \"a4749617-bd73-4ca8-9008-124343133ee9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cf767s" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.249910 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a4749617-bd73-4ca8-9008-124343133ee9-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cf767s\" (UID: \"a4749617-bd73-4ca8-9008-124343133ee9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cf767s" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.249944 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tpts6\" (UniqueName: \"kubernetes.io/projected/a4749617-bd73-4ca8-9008-124343133ee9-kube-api-access-tpts6\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cf767s\" (UID: \"a4749617-bd73-4ca8-9008-124343133ee9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cf767s" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.352093 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4749617-bd73-4ca8-9008-124343133ee9-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cf767s\" (UID: \"a4749617-bd73-4ca8-9008-124343133ee9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cf767s" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.352725 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a4749617-bd73-4ca8-9008-124343133ee9-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cf767s\" (UID: \"a4749617-bd73-4ca8-9008-124343133ee9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cf767s" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.352906 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a4749617-bd73-4ca8-9008-124343133ee9-ceph\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cf767s\" (UID: \"a4749617-bd73-4ca8-9008-124343133ee9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cf767s" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.353035 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a4749617-bd73-4ca8-9008-124343133ee9-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cf767s\" (UID: \"a4749617-bd73-4ca8-9008-124343133ee9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cf767s" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.353110 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tpts6\" (UniqueName: \"kubernetes.io/projected/a4749617-bd73-4ca8-9008-124343133ee9-kube-api-access-tpts6\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cf767s\" (UID: \"a4749617-bd73-4ca8-9008-124343133ee9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cf767s" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.360993 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a4749617-bd73-4ca8-9008-124343133ee9-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cf767s\" (UID: \"a4749617-bd73-4ca8-9008-124343133ee9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cf767s" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.361007 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a4749617-bd73-4ca8-9008-124343133ee9-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cf767s\" (UID: \"a4749617-bd73-4ca8-9008-124343133ee9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cf767s" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.373302 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a4749617-bd73-4ca8-9008-124343133ee9-ceph\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cf767s\" (UID: \"a4749617-bd73-4ca8-9008-124343133ee9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cf767s" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.376280 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4749617-bd73-4ca8-9008-124343133ee9-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cf767s\" (UID: \"a4749617-bd73-4ca8-9008-124343133ee9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cf767s" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.380340 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tpts6\" (UniqueName: \"kubernetes.io/projected/a4749617-bd73-4ca8-9008-124343133ee9-kube-api-access-tpts6\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cf767s\" (UID: \"a4749617-bd73-4ca8-9008-124343133ee9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cf767s" Nov 23 16:27:34 crc kubenswrapper[5050]: I1123 16:27:34.471055 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cf767s" Nov 23 16:27:35 crc kubenswrapper[5050]: I1123 16:27:35.144335 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cf767s"] Nov 23 16:27:35 crc kubenswrapper[5050]: W1123 16:27:35.162697 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda4749617_bd73_4ca8_9008_124343133ee9.slice/crio-4b3fc6a7283fafb9cebe16692aa4ae0b837364fc98dde992493491074a76788c WatchSource:0}: Error finding container 4b3fc6a7283fafb9cebe16692aa4ae0b837364fc98dde992493491074a76788c: Status 404 returned error can't find the container with id 4b3fc6a7283fafb9cebe16692aa4ae0b837364fc98dde992493491074a76788c Nov 23 16:27:35 crc kubenswrapper[5050]: I1123 16:27:35.874551 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cf767s" event={"ID":"a4749617-bd73-4ca8-9008-124343133ee9","Type":"ContainerStarted","Data":"4b3fc6a7283fafb9cebe16692aa4ae0b837364fc98dde992493491074a76788c"} Nov 23 16:27:44 crc kubenswrapper[5050]: I1123 16:27:44.550286 5050 scope.go:117] "RemoveContainer" containerID="c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" Nov 23 16:27:44 crc kubenswrapper[5050]: E1123 16:27:44.551220 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:27:45 crc kubenswrapper[5050]: I1123 16:27:45.975299 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 23 16:27:47 crc kubenswrapper[5050]: I1123 16:27:47.020666 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cf767s" event={"ID":"a4749617-bd73-4ca8-9008-124343133ee9","Type":"ContainerStarted","Data":"35f438b68f61f7878c03db3e8693b2cab16bc0f955a564eeebb9607e9f18e6c8"} Nov 23 16:27:47 crc kubenswrapper[5050]: I1123 16:27:47.051910 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cf767s" podStartSLOduration=2.242253218 podStartE2EDuration="13.051846351s" podCreationTimestamp="2025-11-23 16:27:34 +0000 UTC" firstStartedPulling="2025-11-23 16:27:35.162956746 +0000 UTC m=+6350.329953271" lastFinishedPulling="2025-11-23 16:27:45.972549919 +0000 UTC m=+6361.139546404" observedRunningTime="2025-11-23 16:27:47.043820765 +0000 UTC m=+6362.210817280" watchObservedRunningTime="2025-11-23 16:27:47.051846351 +0000 UTC m=+6362.218842876" Nov 23 16:27:56 crc kubenswrapper[5050]: I1123 16:27:56.548980 5050 scope.go:117] "RemoveContainer" containerID="c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" Nov 23 16:27:56 crc kubenswrapper[5050]: E1123 16:27:56.550310 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:28:00 crc kubenswrapper[5050]: I1123 16:28:00.201524 5050 generic.go:334] "Generic (PLEG): container finished" podID="a4749617-bd73-4ca8-9008-124343133ee9" containerID="35f438b68f61f7878c03db3e8693b2cab16bc0f955a564eeebb9607e9f18e6c8" exitCode=0 Nov 23 16:28:00 crc kubenswrapper[5050]: I1123 16:28:00.201611 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cf767s" event={"ID":"a4749617-bd73-4ca8-9008-124343133ee9","Type":"ContainerDied","Data":"35f438b68f61f7878c03db3e8693b2cab16bc0f955a564eeebb9607e9f18e6c8"} Nov 23 16:28:02 crc kubenswrapper[5050]: I1123 16:28:02.102799 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cf767s" Nov 23 16:28:02 crc kubenswrapper[5050]: I1123 16:28:02.142791 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4749617-bd73-4ca8-9008-124343133ee9-pre-adoption-validation-combined-ca-bundle\") pod \"a4749617-bd73-4ca8-9008-124343133ee9\" (UID: \"a4749617-bd73-4ca8-9008-124343133ee9\") " Nov 23 16:28:02 crc kubenswrapper[5050]: I1123 16:28:02.143215 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tpts6\" (UniqueName: \"kubernetes.io/projected/a4749617-bd73-4ca8-9008-124343133ee9-kube-api-access-tpts6\") pod \"a4749617-bd73-4ca8-9008-124343133ee9\" (UID: \"a4749617-bd73-4ca8-9008-124343133ee9\") " Nov 23 16:28:02 crc kubenswrapper[5050]: I1123 16:28:02.143293 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a4749617-bd73-4ca8-9008-124343133ee9-ssh-key\") pod \"a4749617-bd73-4ca8-9008-124343133ee9\" (UID: \"a4749617-bd73-4ca8-9008-124343133ee9\") " Nov 23 16:28:02 crc kubenswrapper[5050]: I1123 16:28:02.143334 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a4749617-bd73-4ca8-9008-124343133ee9-ceph\") pod \"a4749617-bd73-4ca8-9008-124343133ee9\" (UID: \"a4749617-bd73-4ca8-9008-124343133ee9\") " Nov 23 16:28:02 crc kubenswrapper[5050]: I1123 16:28:02.143512 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a4749617-bd73-4ca8-9008-124343133ee9-inventory\") pod \"a4749617-bd73-4ca8-9008-124343133ee9\" (UID: \"a4749617-bd73-4ca8-9008-124343133ee9\") " Nov 23 16:28:02 crc kubenswrapper[5050]: I1123 16:28:02.151585 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4749617-bd73-4ca8-9008-124343133ee9-pre-adoption-validation-combined-ca-bundle" (OuterVolumeSpecName: "pre-adoption-validation-combined-ca-bundle") pod "a4749617-bd73-4ca8-9008-124343133ee9" (UID: "a4749617-bd73-4ca8-9008-124343133ee9"). InnerVolumeSpecName "pre-adoption-validation-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:28:02 crc kubenswrapper[5050]: I1123 16:28:02.151716 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4749617-bd73-4ca8-9008-124343133ee9-ceph" (OuterVolumeSpecName: "ceph") pod "a4749617-bd73-4ca8-9008-124343133ee9" (UID: "a4749617-bd73-4ca8-9008-124343133ee9"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:28:02 crc kubenswrapper[5050]: I1123 16:28:02.154883 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4749617-bd73-4ca8-9008-124343133ee9-kube-api-access-tpts6" (OuterVolumeSpecName: "kube-api-access-tpts6") pod "a4749617-bd73-4ca8-9008-124343133ee9" (UID: "a4749617-bd73-4ca8-9008-124343133ee9"). InnerVolumeSpecName "kube-api-access-tpts6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:28:02 crc kubenswrapper[5050]: I1123 16:28:02.193705 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4749617-bd73-4ca8-9008-124343133ee9-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a4749617-bd73-4ca8-9008-124343133ee9" (UID: "a4749617-bd73-4ca8-9008-124343133ee9"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:28:02 crc kubenswrapper[5050]: I1123 16:28:02.195420 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4749617-bd73-4ca8-9008-124343133ee9-inventory" (OuterVolumeSpecName: "inventory") pod "a4749617-bd73-4ca8-9008-124343133ee9" (UID: "a4749617-bd73-4ca8-9008-124343133ee9"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:28:02 crc kubenswrapper[5050]: I1123 16:28:02.230007 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cf767s" event={"ID":"a4749617-bd73-4ca8-9008-124343133ee9","Type":"ContainerDied","Data":"4b3fc6a7283fafb9cebe16692aa4ae0b837364fc98dde992493491074a76788c"} Nov 23 16:28:02 crc kubenswrapper[5050]: I1123 16:28:02.230130 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4b3fc6a7283fafb9cebe16692aa4ae0b837364fc98dde992493491074a76788c" Nov 23 16:28:02 crc kubenswrapper[5050]: I1123 16:28:02.230364 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cf767s" Nov 23 16:28:02 crc kubenswrapper[5050]: I1123 16:28:02.249953 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tpts6\" (UniqueName: \"kubernetes.io/projected/a4749617-bd73-4ca8-9008-124343133ee9-kube-api-access-tpts6\") on node \"crc\" DevicePath \"\"" Nov 23 16:28:02 crc kubenswrapper[5050]: I1123 16:28:02.249998 5050 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a4749617-bd73-4ca8-9008-124343133ee9-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 23 16:28:02 crc kubenswrapper[5050]: I1123 16:28:02.250012 5050 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a4749617-bd73-4ca8-9008-124343133ee9-ceph\") on node \"crc\" DevicePath \"\"" Nov 23 16:28:02 crc kubenswrapper[5050]: I1123 16:28:02.250058 5050 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a4749617-bd73-4ca8-9008-124343133ee9-inventory\") on node \"crc\" DevicePath \"\"" Nov 23 16:28:02 crc kubenswrapper[5050]: I1123 16:28:02.250125 5050 reconciler_common.go:293] "Volume detached for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4749617-bd73-4ca8-9008-124343133ee9-pre-adoption-validation-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:28:07 crc kubenswrapper[5050]: I1123 16:28:07.497759 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7"] Nov 23 16:28:07 crc kubenswrapper[5050]: E1123 16:28:07.499100 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4749617-bd73-4ca8-9008-124343133ee9" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Nov 23 16:28:07 crc kubenswrapper[5050]: I1123 16:28:07.499117 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4749617-bd73-4ca8-9008-124343133ee9" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Nov 23 16:28:07 crc kubenswrapper[5050]: I1123 16:28:07.499495 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4749617-bd73-4ca8-9008-124343133ee9" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Nov 23 16:28:07 crc kubenswrapper[5050]: I1123 16:28:07.500745 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7" Nov 23 16:28:07 crc kubenswrapper[5050]: I1123 16:28:07.503244 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 23 16:28:07 crc kubenswrapper[5050]: I1123 16:28:07.503428 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-ckrpf" Nov 23 16:28:07 crc kubenswrapper[5050]: I1123 16:28:07.503685 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 23 16:28:07 crc kubenswrapper[5050]: I1123 16:28:07.509948 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 23 16:28:07 crc kubenswrapper[5050]: I1123 16:28:07.525016 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7"] Nov 23 16:28:07 crc kubenswrapper[5050]: I1123 16:28:07.689979 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jz4k2\" (UniqueName: \"kubernetes.io/projected/2c88cdbf-1f63-4305-a672-8cc67f06fa66-kube-api-access-jz4k2\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7\" (UID: \"2c88cdbf-1f63-4305-a672-8cc67f06fa66\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7" Nov 23 16:28:07 crc kubenswrapper[5050]: I1123 16:28:07.690065 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2c88cdbf-1f63-4305-a672-8cc67f06fa66-ssh-key\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7\" (UID: \"2c88cdbf-1f63-4305-a672-8cc67f06fa66\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7" Nov 23 16:28:07 crc kubenswrapper[5050]: I1123 16:28:07.690106 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2c88cdbf-1f63-4305-a672-8cc67f06fa66-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7\" (UID: \"2c88cdbf-1f63-4305-a672-8cc67f06fa66\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7" Nov 23 16:28:07 crc kubenswrapper[5050]: I1123 16:28:07.690160 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2c88cdbf-1f63-4305-a672-8cc67f06fa66-ceph\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7\" (UID: \"2c88cdbf-1f63-4305-a672-8cc67f06fa66\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7" Nov 23 16:28:07 crc kubenswrapper[5050]: I1123 16:28:07.690181 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c88cdbf-1f63-4305-a672-8cc67f06fa66-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7\" (UID: \"2c88cdbf-1f63-4305-a672-8cc67f06fa66\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7" Nov 23 16:28:07 crc kubenswrapper[5050]: I1123 16:28:07.793517 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2c88cdbf-1f63-4305-a672-8cc67f06fa66-ceph\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7\" (UID: \"2c88cdbf-1f63-4305-a672-8cc67f06fa66\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7" Nov 23 16:28:07 crc kubenswrapper[5050]: I1123 16:28:07.793610 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c88cdbf-1f63-4305-a672-8cc67f06fa66-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7\" (UID: \"2c88cdbf-1f63-4305-a672-8cc67f06fa66\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7" Nov 23 16:28:07 crc kubenswrapper[5050]: I1123 16:28:07.793958 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jz4k2\" (UniqueName: \"kubernetes.io/projected/2c88cdbf-1f63-4305-a672-8cc67f06fa66-kube-api-access-jz4k2\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7\" (UID: \"2c88cdbf-1f63-4305-a672-8cc67f06fa66\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7" Nov 23 16:28:07 crc kubenswrapper[5050]: I1123 16:28:07.794070 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2c88cdbf-1f63-4305-a672-8cc67f06fa66-ssh-key\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7\" (UID: \"2c88cdbf-1f63-4305-a672-8cc67f06fa66\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7" Nov 23 16:28:07 crc kubenswrapper[5050]: I1123 16:28:07.794156 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2c88cdbf-1f63-4305-a672-8cc67f06fa66-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7\" (UID: \"2c88cdbf-1f63-4305-a672-8cc67f06fa66\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7" Nov 23 16:28:07 crc kubenswrapper[5050]: I1123 16:28:07.801540 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c88cdbf-1f63-4305-a672-8cc67f06fa66-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7\" (UID: \"2c88cdbf-1f63-4305-a672-8cc67f06fa66\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7" Nov 23 16:28:07 crc kubenswrapper[5050]: I1123 16:28:07.801817 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2c88cdbf-1f63-4305-a672-8cc67f06fa66-ssh-key\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7\" (UID: \"2c88cdbf-1f63-4305-a672-8cc67f06fa66\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7" Nov 23 16:28:07 crc kubenswrapper[5050]: I1123 16:28:07.809008 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2c88cdbf-1f63-4305-a672-8cc67f06fa66-ceph\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7\" (UID: \"2c88cdbf-1f63-4305-a672-8cc67f06fa66\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7" Nov 23 16:28:07 crc kubenswrapper[5050]: I1123 16:28:07.810173 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2c88cdbf-1f63-4305-a672-8cc67f06fa66-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7\" (UID: \"2c88cdbf-1f63-4305-a672-8cc67f06fa66\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7" Nov 23 16:28:07 crc kubenswrapper[5050]: I1123 16:28:07.815486 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jz4k2\" (UniqueName: \"kubernetes.io/projected/2c88cdbf-1f63-4305-a672-8cc67f06fa66-kube-api-access-jz4k2\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7\" (UID: \"2c88cdbf-1f63-4305-a672-8cc67f06fa66\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7" Nov 23 16:28:07 crc kubenswrapper[5050]: I1123 16:28:07.828877 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7" Nov 23 16:28:08 crc kubenswrapper[5050]: I1123 16:28:08.423807 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7"] Nov 23 16:28:08 crc kubenswrapper[5050]: W1123 16:28:08.429775 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2c88cdbf_1f63_4305_a672_8cc67f06fa66.slice/crio-fad9e9c0b3f7e129210225ef36e51dc007b3e8f79c201cf0c37ac6276b8d1e2f WatchSource:0}: Error finding container fad9e9c0b3f7e129210225ef36e51dc007b3e8f79c201cf0c37ac6276b8d1e2f: Status 404 returned error can't find the container with id fad9e9c0b3f7e129210225ef36e51dc007b3e8f79c201cf0c37ac6276b8d1e2f Nov 23 16:28:08 crc kubenswrapper[5050]: I1123 16:28:08.549158 5050 scope.go:117] "RemoveContainer" containerID="c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" Nov 23 16:28:08 crc kubenswrapper[5050]: E1123 16:28:08.549895 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:28:09 crc kubenswrapper[5050]: I1123 16:28:09.355996 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7" event={"ID":"2c88cdbf-1f63-4305-a672-8cc67f06fa66","Type":"ContainerStarted","Data":"fad9e9c0b3f7e129210225ef36e51dc007b3e8f79c201cf0c37ac6276b8d1e2f"} Nov 23 16:28:10 crc kubenswrapper[5050]: I1123 16:28:10.370978 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7" event={"ID":"2c88cdbf-1f63-4305-a672-8cc67f06fa66","Type":"ContainerStarted","Data":"4c23b504aba0f854396e4676468ef586022b84506f66fd7461dab41a30b83190"} Nov 23 16:28:10 crc kubenswrapper[5050]: I1123 16:28:10.412016 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7" podStartSLOduration=2.791979452 podStartE2EDuration="3.41196693s" podCreationTimestamp="2025-11-23 16:28:07 +0000 UTC" firstStartedPulling="2025-11-23 16:28:08.433921618 +0000 UTC m=+6383.600918103" lastFinishedPulling="2025-11-23 16:28:09.053909096 +0000 UTC m=+6384.220905581" observedRunningTime="2025-11-23 16:28:10.393142059 +0000 UTC m=+6385.560138584" watchObservedRunningTime="2025-11-23 16:28:10.41196693 +0000 UTC m=+6385.578963425" Nov 23 16:28:19 crc kubenswrapper[5050]: I1123 16:28:19.063977 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-db-create-bw9cx"] Nov 23 16:28:19 crc kubenswrapper[5050]: I1123 16:28:19.080697 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-db-create-bw9cx"] Nov 23 16:28:19 crc kubenswrapper[5050]: I1123 16:28:19.568081 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ba0989c-deb9-4516-9d8b-8e59e0565cd1" path="/var/lib/kubelet/pods/5ba0989c-deb9-4516-9d8b-8e59e0565cd1/volumes" Nov 23 16:28:20 crc kubenswrapper[5050]: I1123 16:28:20.045495 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-add8-account-create-hgsgf"] Nov 23 16:28:20 crc kubenswrapper[5050]: I1123 16:28:20.071746 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-add8-account-create-hgsgf"] Nov 23 16:28:21 crc kubenswrapper[5050]: I1123 16:28:21.549625 5050 scope.go:117] "RemoveContainer" containerID="c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" Nov 23 16:28:21 crc kubenswrapper[5050]: E1123 16:28:21.550478 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:28:21 crc kubenswrapper[5050]: I1123 16:28:21.569424 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e91873a-a795-426c-b90c-bd25dec9386e" path="/var/lib/kubelet/pods/0e91873a-a795-426c-b90c-bd25dec9386e/volumes" Nov 23 16:28:26 crc kubenswrapper[5050]: I1123 16:28:26.036102 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-4b68-account-create-4466z"] Nov 23 16:28:26 crc kubenswrapper[5050]: I1123 16:28:26.048168 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-persistence-db-create-n99mq"] Nov 23 16:28:26 crc kubenswrapper[5050]: I1123 16:28:26.072600 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-4b68-account-create-4466z"] Nov 23 16:28:26 crc kubenswrapper[5050]: I1123 16:28:26.088403 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-persistence-db-create-n99mq"] Nov 23 16:28:27 crc kubenswrapper[5050]: I1123 16:28:27.566270 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b33829ce-bce6-479e-915b-aee1ffe96e5f" path="/var/lib/kubelet/pods/b33829ce-bce6-479e-915b-aee1ffe96e5f/volumes" Nov 23 16:28:27 crc kubenswrapper[5050]: I1123 16:28:27.570131 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6abf46b-e82a-404d-ae3b-5adddf6f4960" path="/var/lib/kubelet/pods/e6abf46b-e82a-404d-ae3b-5adddf6f4960/volumes" Nov 23 16:28:32 crc kubenswrapper[5050]: I1123 16:28:32.549112 5050 scope.go:117] "RemoveContainer" containerID="c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" Nov 23 16:28:32 crc kubenswrapper[5050]: E1123 16:28:32.550217 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:28:45 crc kubenswrapper[5050]: I1123 16:28:45.555034 5050 scope.go:117] "RemoveContainer" containerID="c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" Nov 23 16:28:45 crc kubenswrapper[5050]: E1123 16:28:45.556285 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:28:56 crc kubenswrapper[5050]: I1123 16:28:56.548626 5050 scope.go:117] "RemoveContainer" containerID="c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" Nov 23 16:28:56 crc kubenswrapper[5050]: E1123 16:28:56.549842 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:28:59 crc kubenswrapper[5050]: I1123 16:28:59.065345 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-db-sync-phxjz"] Nov 23 16:28:59 crc kubenswrapper[5050]: I1123 16:28:59.083632 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-db-sync-phxjz"] Nov 23 16:28:59 crc kubenswrapper[5050]: I1123 16:28:59.583171 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a6211f3-cd96-4278-b870-6e8f17da26dd" path="/var/lib/kubelet/pods/1a6211f3-cd96-4278-b870-6e8f17da26dd/volumes" Nov 23 16:29:09 crc kubenswrapper[5050]: I1123 16:29:09.549352 5050 scope.go:117] "RemoveContainer" containerID="c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" Nov 23 16:29:09 crc kubenswrapper[5050]: E1123 16:29:09.550385 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:29:18 crc kubenswrapper[5050]: I1123 16:29:18.481918 5050 scope.go:117] "RemoveContainer" containerID="4229c28564dcdb076c51d9999be27df633bb55ec6c736898cc5b93c7ae28a14a" Nov 23 16:29:18 crc kubenswrapper[5050]: I1123 16:29:18.534918 5050 scope.go:117] "RemoveContainer" containerID="969c7d5af6d321a0b0a60235c193dd3d84a266286ec3cdb1723362c111437d6e" Nov 23 16:29:18 crc kubenswrapper[5050]: I1123 16:29:18.587262 5050 scope.go:117] "RemoveContainer" containerID="205da7a70d393d0b009fb1cbdf28e73a3558cdd8a0505926985f1b8b7a4a8623" Nov 23 16:29:18 crc kubenswrapper[5050]: I1123 16:29:18.654119 5050 scope.go:117] "RemoveContainer" containerID="cb88b2e579978ea61e7fee63962165a38d9a7985b737800808b9cc9ed8718904" Nov 23 16:29:18 crc kubenswrapper[5050]: I1123 16:29:18.717692 5050 scope.go:117] "RemoveContainer" containerID="b8d4cd94669a986c203c808a2bb61a252807144e2a985a08aeeb6d41bc4a63b1" Nov 23 16:29:18 crc kubenswrapper[5050]: I1123 16:29:18.772162 5050 scope.go:117] "RemoveContainer" containerID="8657cdc0fa967668e658b8ff3329e6362417f03d33c237685c535d594fdd620d" Nov 23 16:29:20 crc kubenswrapper[5050]: I1123 16:29:20.549115 5050 scope.go:117] "RemoveContainer" containerID="c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" Nov 23 16:29:20 crc kubenswrapper[5050]: E1123 16:29:20.549924 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:29:31 crc kubenswrapper[5050]: I1123 16:29:31.549221 5050 scope.go:117] "RemoveContainer" containerID="c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" Nov 23 16:29:31 crc kubenswrapper[5050]: E1123 16:29:31.551922 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:29:44 crc kubenswrapper[5050]: I1123 16:29:44.549392 5050 scope.go:117] "RemoveContainer" containerID="c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" Nov 23 16:29:44 crc kubenswrapper[5050]: E1123 16:29:44.550924 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:29:59 crc kubenswrapper[5050]: I1123 16:29:59.550637 5050 scope.go:117] "RemoveContainer" containerID="c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" Nov 23 16:29:59 crc kubenswrapper[5050]: E1123 16:29:59.554928 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:30:00 crc kubenswrapper[5050]: I1123 16:30:00.209374 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398590-s2wsf"] Nov 23 16:30:00 crc kubenswrapper[5050]: I1123 16:30:00.216283 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398590-s2wsf" Nov 23 16:30:00 crc kubenswrapper[5050]: I1123 16:30:00.219570 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 23 16:30:00 crc kubenswrapper[5050]: I1123 16:30:00.219878 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 23 16:30:00 crc kubenswrapper[5050]: I1123 16:30:00.227909 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398590-s2wsf"] Nov 23 16:30:00 crc kubenswrapper[5050]: I1123 16:30:00.252543 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/23c62781-f73e-4fcd-b3e1-c94afe1cb76a-config-volume\") pod \"collect-profiles-29398590-s2wsf\" (UID: \"23c62781-f73e-4fcd-b3e1-c94afe1cb76a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398590-s2wsf" Nov 23 16:30:00 crc kubenswrapper[5050]: I1123 16:30:00.252663 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8zn74\" (UniqueName: \"kubernetes.io/projected/23c62781-f73e-4fcd-b3e1-c94afe1cb76a-kube-api-access-8zn74\") pod \"collect-profiles-29398590-s2wsf\" (UID: \"23c62781-f73e-4fcd-b3e1-c94afe1cb76a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398590-s2wsf" Nov 23 16:30:00 crc kubenswrapper[5050]: I1123 16:30:00.252721 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/23c62781-f73e-4fcd-b3e1-c94afe1cb76a-secret-volume\") pod \"collect-profiles-29398590-s2wsf\" (UID: \"23c62781-f73e-4fcd-b3e1-c94afe1cb76a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398590-s2wsf" Nov 23 16:30:00 crc kubenswrapper[5050]: I1123 16:30:00.355604 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/23c62781-f73e-4fcd-b3e1-c94afe1cb76a-config-volume\") pod \"collect-profiles-29398590-s2wsf\" (UID: \"23c62781-f73e-4fcd-b3e1-c94afe1cb76a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398590-s2wsf" Nov 23 16:30:00 crc kubenswrapper[5050]: I1123 16:30:00.355796 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8zn74\" (UniqueName: \"kubernetes.io/projected/23c62781-f73e-4fcd-b3e1-c94afe1cb76a-kube-api-access-8zn74\") pod \"collect-profiles-29398590-s2wsf\" (UID: \"23c62781-f73e-4fcd-b3e1-c94afe1cb76a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398590-s2wsf" Nov 23 16:30:00 crc kubenswrapper[5050]: I1123 16:30:00.355856 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/23c62781-f73e-4fcd-b3e1-c94afe1cb76a-secret-volume\") pod \"collect-profiles-29398590-s2wsf\" (UID: \"23c62781-f73e-4fcd-b3e1-c94afe1cb76a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398590-s2wsf" Nov 23 16:30:00 crc kubenswrapper[5050]: I1123 16:30:00.357625 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/23c62781-f73e-4fcd-b3e1-c94afe1cb76a-config-volume\") pod \"collect-profiles-29398590-s2wsf\" (UID: \"23c62781-f73e-4fcd-b3e1-c94afe1cb76a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398590-s2wsf" Nov 23 16:30:00 crc kubenswrapper[5050]: I1123 16:30:00.364770 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/23c62781-f73e-4fcd-b3e1-c94afe1cb76a-secret-volume\") pod \"collect-profiles-29398590-s2wsf\" (UID: \"23c62781-f73e-4fcd-b3e1-c94afe1cb76a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398590-s2wsf" Nov 23 16:30:00 crc kubenswrapper[5050]: I1123 16:30:00.377522 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8zn74\" (UniqueName: \"kubernetes.io/projected/23c62781-f73e-4fcd-b3e1-c94afe1cb76a-kube-api-access-8zn74\") pod \"collect-profiles-29398590-s2wsf\" (UID: \"23c62781-f73e-4fcd-b3e1-c94afe1cb76a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398590-s2wsf" Nov 23 16:30:00 crc kubenswrapper[5050]: I1123 16:30:00.548940 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398590-s2wsf" Nov 23 16:30:01 crc kubenswrapper[5050]: I1123 16:30:01.098758 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398590-s2wsf"] Nov 23 16:30:01 crc kubenswrapper[5050]: I1123 16:30:01.976338 5050 generic.go:334] "Generic (PLEG): container finished" podID="23c62781-f73e-4fcd-b3e1-c94afe1cb76a" containerID="52fc70b7d66863eac9bf8bbb06a91701d213447513d351984bdb03131ff7101b" exitCode=0 Nov 23 16:30:01 crc kubenswrapper[5050]: I1123 16:30:01.976598 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398590-s2wsf" event={"ID":"23c62781-f73e-4fcd-b3e1-c94afe1cb76a","Type":"ContainerDied","Data":"52fc70b7d66863eac9bf8bbb06a91701d213447513d351984bdb03131ff7101b"} Nov 23 16:30:01 crc kubenswrapper[5050]: I1123 16:30:01.977101 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398590-s2wsf" event={"ID":"23c62781-f73e-4fcd-b3e1-c94afe1cb76a","Type":"ContainerStarted","Data":"5aca20b965783eba6a91ce3411865992b8202924559d30b22f670bfc37a1c70b"} Nov 23 16:30:03 crc kubenswrapper[5050]: I1123 16:30:03.490588 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398590-s2wsf" Nov 23 16:30:03 crc kubenswrapper[5050]: I1123 16:30:03.563648 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8zn74\" (UniqueName: \"kubernetes.io/projected/23c62781-f73e-4fcd-b3e1-c94afe1cb76a-kube-api-access-8zn74\") pod \"23c62781-f73e-4fcd-b3e1-c94afe1cb76a\" (UID: \"23c62781-f73e-4fcd-b3e1-c94afe1cb76a\") " Nov 23 16:30:03 crc kubenswrapper[5050]: I1123 16:30:03.563799 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/23c62781-f73e-4fcd-b3e1-c94afe1cb76a-config-volume\") pod \"23c62781-f73e-4fcd-b3e1-c94afe1cb76a\" (UID: \"23c62781-f73e-4fcd-b3e1-c94afe1cb76a\") " Nov 23 16:30:03 crc kubenswrapper[5050]: I1123 16:30:03.563855 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/23c62781-f73e-4fcd-b3e1-c94afe1cb76a-secret-volume\") pod \"23c62781-f73e-4fcd-b3e1-c94afe1cb76a\" (UID: \"23c62781-f73e-4fcd-b3e1-c94afe1cb76a\") " Nov 23 16:30:03 crc kubenswrapper[5050]: I1123 16:30:03.564652 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/23c62781-f73e-4fcd-b3e1-c94afe1cb76a-config-volume" (OuterVolumeSpecName: "config-volume") pod "23c62781-f73e-4fcd-b3e1-c94afe1cb76a" (UID: "23c62781-f73e-4fcd-b3e1-c94afe1cb76a"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:30:03 crc kubenswrapper[5050]: I1123 16:30:03.565300 5050 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/23c62781-f73e-4fcd-b3e1-c94afe1cb76a-config-volume\") on node \"crc\" DevicePath \"\"" Nov 23 16:30:03 crc kubenswrapper[5050]: I1123 16:30:03.570608 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23c62781-f73e-4fcd-b3e1-c94afe1cb76a-kube-api-access-8zn74" (OuterVolumeSpecName: "kube-api-access-8zn74") pod "23c62781-f73e-4fcd-b3e1-c94afe1cb76a" (UID: "23c62781-f73e-4fcd-b3e1-c94afe1cb76a"). InnerVolumeSpecName "kube-api-access-8zn74". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:30:03 crc kubenswrapper[5050]: I1123 16:30:03.571247 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23c62781-f73e-4fcd-b3e1-c94afe1cb76a-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "23c62781-f73e-4fcd-b3e1-c94afe1cb76a" (UID: "23c62781-f73e-4fcd-b3e1-c94afe1cb76a"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:30:03 crc kubenswrapper[5050]: I1123 16:30:03.671275 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8zn74\" (UniqueName: \"kubernetes.io/projected/23c62781-f73e-4fcd-b3e1-c94afe1cb76a-kube-api-access-8zn74\") on node \"crc\" DevicePath \"\"" Nov 23 16:30:03 crc kubenswrapper[5050]: I1123 16:30:03.671333 5050 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/23c62781-f73e-4fcd-b3e1-c94afe1cb76a-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 23 16:30:04 crc kubenswrapper[5050]: I1123 16:30:04.010066 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398590-s2wsf" event={"ID":"23c62781-f73e-4fcd-b3e1-c94afe1cb76a","Type":"ContainerDied","Data":"5aca20b965783eba6a91ce3411865992b8202924559d30b22f670bfc37a1c70b"} Nov 23 16:30:04 crc kubenswrapper[5050]: I1123 16:30:04.010136 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5aca20b965783eba6a91ce3411865992b8202924559d30b22f670bfc37a1c70b" Nov 23 16:30:04 crc kubenswrapper[5050]: I1123 16:30:04.010191 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398590-s2wsf" Nov 23 16:30:04 crc kubenswrapper[5050]: I1123 16:30:04.624509 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398545-p8x8h"] Nov 23 16:30:04 crc kubenswrapper[5050]: I1123 16:30:04.641149 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398545-p8x8h"] Nov 23 16:30:05 crc kubenswrapper[5050]: I1123 16:30:05.631992 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d9c1450-0c73-427d-be1c-18f7ff013f00" path="/var/lib/kubelet/pods/0d9c1450-0c73-427d-be1c-18f7ff013f00/volumes" Nov 23 16:30:10 crc kubenswrapper[5050]: I1123 16:30:10.490382 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-58d5w"] Nov 23 16:30:10 crc kubenswrapper[5050]: E1123 16:30:10.491752 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23c62781-f73e-4fcd-b3e1-c94afe1cb76a" containerName="collect-profiles" Nov 23 16:30:10 crc kubenswrapper[5050]: I1123 16:30:10.491777 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="23c62781-f73e-4fcd-b3e1-c94afe1cb76a" containerName="collect-profiles" Nov 23 16:30:10 crc kubenswrapper[5050]: I1123 16:30:10.492159 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="23c62781-f73e-4fcd-b3e1-c94afe1cb76a" containerName="collect-profiles" Nov 23 16:30:10 crc kubenswrapper[5050]: I1123 16:30:10.494365 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-58d5w" Nov 23 16:30:10 crc kubenswrapper[5050]: I1123 16:30:10.519735 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-58d5w"] Nov 23 16:30:10 crc kubenswrapper[5050]: I1123 16:30:10.553587 5050 scope.go:117] "RemoveContainer" containerID="c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" Nov 23 16:30:10 crc kubenswrapper[5050]: E1123 16:30:10.553962 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:30:10 crc kubenswrapper[5050]: I1123 16:30:10.680692 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pjgb9\" (UniqueName: \"kubernetes.io/projected/1290f550-e0a1-4d77-9766-3bfb2b0569de-kube-api-access-pjgb9\") pod \"community-operators-58d5w\" (UID: \"1290f550-e0a1-4d77-9766-3bfb2b0569de\") " pod="openshift-marketplace/community-operators-58d5w" Nov 23 16:30:10 crc kubenswrapper[5050]: I1123 16:30:10.680787 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1290f550-e0a1-4d77-9766-3bfb2b0569de-utilities\") pod \"community-operators-58d5w\" (UID: \"1290f550-e0a1-4d77-9766-3bfb2b0569de\") " pod="openshift-marketplace/community-operators-58d5w" Nov 23 16:30:10 crc kubenswrapper[5050]: I1123 16:30:10.681753 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1290f550-e0a1-4d77-9766-3bfb2b0569de-catalog-content\") pod \"community-operators-58d5w\" (UID: \"1290f550-e0a1-4d77-9766-3bfb2b0569de\") " pod="openshift-marketplace/community-operators-58d5w" Nov 23 16:30:10 crc kubenswrapper[5050]: I1123 16:30:10.784949 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pjgb9\" (UniqueName: \"kubernetes.io/projected/1290f550-e0a1-4d77-9766-3bfb2b0569de-kube-api-access-pjgb9\") pod \"community-operators-58d5w\" (UID: \"1290f550-e0a1-4d77-9766-3bfb2b0569de\") " pod="openshift-marketplace/community-operators-58d5w" Nov 23 16:30:10 crc kubenswrapper[5050]: I1123 16:30:10.785537 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1290f550-e0a1-4d77-9766-3bfb2b0569de-utilities\") pod \"community-operators-58d5w\" (UID: \"1290f550-e0a1-4d77-9766-3bfb2b0569de\") " pod="openshift-marketplace/community-operators-58d5w" Nov 23 16:30:10 crc kubenswrapper[5050]: I1123 16:30:10.785742 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1290f550-e0a1-4d77-9766-3bfb2b0569de-catalog-content\") pod \"community-operators-58d5w\" (UID: \"1290f550-e0a1-4d77-9766-3bfb2b0569de\") " pod="openshift-marketplace/community-operators-58d5w" Nov 23 16:30:10 crc kubenswrapper[5050]: I1123 16:30:10.786671 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1290f550-e0a1-4d77-9766-3bfb2b0569de-catalog-content\") pod \"community-operators-58d5w\" (UID: \"1290f550-e0a1-4d77-9766-3bfb2b0569de\") " pod="openshift-marketplace/community-operators-58d5w" Nov 23 16:30:10 crc kubenswrapper[5050]: I1123 16:30:10.786792 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1290f550-e0a1-4d77-9766-3bfb2b0569de-utilities\") pod \"community-operators-58d5w\" (UID: \"1290f550-e0a1-4d77-9766-3bfb2b0569de\") " pod="openshift-marketplace/community-operators-58d5w" Nov 23 16:30:10 crc kubenswrapper[5050]: I1123 16:30:10.815147 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pjgb9\" (UniqueName: \"kubernetes.io/projected/1290f550-e0a1-4d77-9766-3bfb2b0569de-kube-api-access-pjgb9\") pod \"community-operators-58d5w\" (UID: \"1290f550-e0a1-4d77-9766-3bfb2b0569de\") " pod="openshift-marketplace/community-operators-58d5w" Nov 23 16:30:10 crc kubenswrapper[5050]: I1123 16:30:10.868505 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-58d5w" Nov 23 16:30:11 crc kubenswrapper[5050]: I1123 16:30:11.468567 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-58d5w"] Nov 23 16:30:12 crc kubenswrapper[5050]: I1123 16:30:12.149520 5050 generic.go:334] "Generic (PLEG): container finished" podID="1290f550-e0a1-4d77-9766-3bfb2b0569de" containerID="3240542c539677fcf4ae7e1c7f8ea775d3b56fde51ac931cb9f332ff35702757" exitCode=0 Nov 23 16:30:12 crc kubenswrapper[5050]: I1123 16:30:12.149923 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-58d5w" event={"ID":"1290f550-e0a1-4d77-9766-3bfb2b0569de","Type":"ContainerDied","Data":"3240542c539677fcf4ae7e1c7f8ea775d3b56fde51ac931cb9f332ff35702757"} Nov 23 16:30:12 crc kubenswrapper[5050]: I1123 16:30:12.149998 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-58d5w" event={"ID":"1290f550-e0a1-4d77-9766-3bfb2b0569de","Type":"ContainerStarted","Data":"791a30047d06e5339e9ebc59476c6351ee5dc6e3a23aafe9d07e6f81c289a567"} Nov 23 16:30:13 crc kubenswrapper[5050]: I1123 16:30:13.168238 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-58d5w" event={"ID":"1290f550-e0a1-4d77-9766-3bfb2b0569de","Type":"ContainerStarted","Data":"d07605c91a180a9d178bb0a2f097dab552f38cf6eb6c299cc58f1da41427f14c"} Nov 23 16:30:15 crc kubenswrapper[5050]: I1123 16:30:15.209954 5050 generic.go:334] "Generic (PLEG): container finished" podID="1290f550-e0a1-4d77-9766-3bfb2b0569de" containerID="d07605c91a180a9d178bb0a2f097dab552f38cf6eb6c299cc58f1da41427f14c" exitCode=0 Nov 23 16:30:15 crc kubenswrapper[5050]: I1123 16:30:15.210031 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-58d5w" event={"ID":"1290f550-e0a1-4d77-9766-3bfb2b0569de","Type":"ContainerDied","Data":"d07605c91a180a9d178bb0a2f097dab552f38cf6eb6c299cc58f1da41427f14c"} Nov 23 16:30:17 crc kubenswrapper[5050]: I1123 16:30:17.237873 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-58d5w" event={"ID":"1290f550-e0a1-4d77-9766-3bfb2b0569de","Type":"ContainerStarted","Data":"1687fb8e361d2b27d81c9da5a7a19d7045023512fc90b12b119709ee588cdba8"} Nov 23 16:30:17 crc kubenswrapper[5050]: I1123 16:30:17.281560 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-58d5w" podStartSLOduration=2.58965384 podStartE2EDuration="7.281517477s" podCreationTimestamp="2025-11-23 16:30:10 +0000 UTC" firstStartedPulling="2025-11-23 16:30:12.153210809 +0000 UTC m=+6507.320207294" lastFinishedPulling="2025-11-23 16:30:16.845074446 +0000 UTC m=+6512.012070931" observedRunningTime="2025-11-23 16:30:17.266891274 +0000 UTC m=+6512.433887799" watchObservedRunningTime="2025-11-23 16:30:17.281517477 +0000 UTC m=+6512.448513972" Nov 23 16:30:18 crc kubenswrapper[5050]: I1123 16:30:18.948331 5050 scope.go:117] "RemoveContainer" containerID="9fe892806dd3eb11fe3379fd174be6aa9514b32ac2d48fea80d79a8b04cf4575" Nov 23 16:30:20 crc kubenswrapper[5050]: I1123 16:30:20.868890 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-58d5w" Nov 23 16:30:20 crc kubenswrapper[5050]: I1123 16:30:20.869342 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-58d5w" Nov 23 16:30:20 crc kubenswrapper[5050]: I1123 16:30:20.952055 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-58d5w" Nov 23 16:30:23 crc kubenswrapper[5050]: I1123 16:30:23.549220 5050 scope.go:117] "RemoveContainer" containerID="c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" Nov 23 16:30:23 crc kubenswrapper[5050]: E1123 16:30:23.550503 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:30:30 crc kubenswrapper[5050]: I1123 16:30:30.963947 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-58d5w" Nov 23 16:30:31 crc kubenswrapper[5050]: I1123 16:30:31.053790 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-58d5w"] Nov 23 16:30:31 crc kubenswrapper[5050]: I1123 16:30:31.437271 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-58d5w" podUID="1290f550-e0a1-4d77-9766-3bfb2b0569de" containerName="registry-server" containerID="cri-o://1687fb8e361d2b27d81c9da5a7a19d7045023512fc90b12b119709ee588cdba8" gracePeriod=2 Nov 23 16:30:32 crc kubenswrapper[5050]: I1123 16:30:32.074016 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-58d5w" Nov 23 16:30:32 crc kubenswrapper[5050]: I1123 16:30:32.132877 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1290f550-e0a1-4d77-9766-3bfb2b0569de-catalog-content\") pod \"1290f550-e0a1-4d77-9766-3bfb2b0569de\" (UID: \"1290f550-e0a1-4d77-9766-3bfb2b0569de\") " Nov 23 16:30:32 crc kubenswrapper[5050]: I1123 16:30:32.133088 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjgb9\" (UniqueName: \"kubernetes.io/projected/1290f550-e0a1-4d77-9766-3bfb2b0569de-kube-api-access-pjgb9\") pod \"1290f550-e0a1-4d77-9766-3bfb2b0569de\" (UID: \"1290f550-e0a1-4d77-9766-3bfb2b0569de\") " Nov 23 16:30:32 crc kubenswrapper[5050]: I1123 16:30:32.133617 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1290f550-e0a1-4d77-9766-3bfb2b0569de-utilities\") pod \"1290f550-e0a1-4d77-9766-3bfb2b0569de\" (UID: \"1290f550-e0a1-4d77-9766-3bfb2b0569de\") " Nov 23 16:30:32 crc kubenswrapper[5050]: I1123 16:30:32.134962 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1290f550-e0a1-4d77-9766-3bfb2b0569de-utilities" (OuterVolumeSpecName: "utilities") pod "1290f550-e0a1-4d77-9766-3bfb2b0569de" (UID: "1290f550-e0a1-4d77-9766-3bfb2b0569de"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:30:32 crc kubenswrapper[5050]: I1123 16:30:32.139853 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1290f550-e0a1-4d77-9766-3bfb2b0569de-kube-api-access-pjgb9" (OuterVolumeSpecName: "kube-api-access-pjgb9") pod "1290f550-e0a1-4d77-9766-3bfb2b0569de" (UID: "1290f550-e0a1-4d77-9766-3bfb2b0569de"). InnerVolumeSpecName "kube-api-access-pjgb9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:30:32 crc kubenswrapper[5050]: I1123 16:30:32.197642 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1290f550-e0a1-4d77-9766-3bfb2b0569de-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1290f550-e0a1-4d77-9766-3bfb2b0569de" (UID: "1290f550-e0a1-4d77-9766-3bfb2b0569de"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:30:32 crc kubenswrapper[5050]: I1123 16:30:32.236718 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1290f550-e0a1-4d77-9766-3bfb2b0569de-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 16:30:32 crc kubenswrapper[5050]: I1123 16:30:32.236771 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjgb9\" (UniqueName: \"kubernetes.io/projected/1290f550-e0a1-4d77-9766-3bfb2b0569de-kube-api-access-pjgb9\") on node \"crc\" DevicePath \"\"" Nov 23 16:30:32 crc kubenswrapper[5050]: I1123 16:30:32.236786 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1290f550-e0a1-4d77-9766-3bfb2b0569de-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 16:30:32 crc kubenswrapper[5050]: I1123 16:30:32.450950 5050 generic.go:334] "Generic (PLEG): container finished" podID="1290f550-e0a1-4d77-9766-3bfb2b0569de" containerID="1687fb8e361d2b27d81c9da5a7a19d7045023512fc90b12b119709ee588cdba8" exitCode=0 Nov 23 16:30:32 crc kubenswrapper[5050]: I1123 16:30:32.450998 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-58d5w" Nov 23 16:30:32 crc kubenswrapper[5050]: I1123 16:30:32.451007 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-58d5w" event={"ID":"1290f550-e0a1-4d77-9766-3bfb2b0569de","Type":"ContainerDied","Data":"1687fb8e361d2b27d81c9da5a7a19d7045023512fc90b12b119709ee588cdba8"} Nov 23 16:30:32 crc kubenswrapper[5050]: I1123 16:30:32.451090 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-58d5w" event={"ID":"1290f550-e0a1-4d77-9766-3bfb2b0569de","Type":"ContainerDied","Data":"791a30047d06e5339e9ebc59476c6351ee5dc6e3a23aafe9d07e6f81c289a567"} Nov 23 16:30:32 crc kubenswrapper[5050]: I1123 16:30:32.451113 5050 scope.go:117] "RemoveContainer" containerID="1687fb8e361d2b27d81c9da5a7a19d7045023512fc90b12b119709ee588cdba8" Nov 23 16:30:32 crc kubenswrapper[5050]: I1123 16:30:32.504582 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-58d5w"] Nov 23 16:30:32 crc kubenswrapper[5050]: I1123 16:30:32.507903 5050 scope.go:117] "RemoveContainer" containerID="d07605c91a180a9d178bb0a2f097dab552f38cf6eb6c299cc58f1da41427f14c" Nov 23 16:30:32 crc kubenswrapper[5050]: I1123 16:30:32.516639 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-58d5w"] Nov 23 16:30:32 crc kubenswrapper[5050]: I1123 16:30:32.554043 5050 scope.go:117] "RemoveContainer" containerID="3240542c539677fcf4ae7e1c7f8ea775d3b56fde51ac931cb9f332ff35702757" Nov 23 16:30:32 crc kubenswrapper[5050]: I1123 16:30:32.610074 5050 scope.go:117] "RemoveContainer" containerID="1687fb8e361d2b27d81c9da5a7a19d7045023512fc90b12b119709ee588cdba8" Nov 23 16:30:32 crc kubenswrapper[5050]: E1123 16:30:32.613651 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1687fb8e361d2b27d81c9da5a7a19d7045023512fc90b12b119709ee588cdba8\": container with ID starting with 1687fb8e361d2b27d81c9da5a7a19d7045023512fc90b12b119709ee588cdba8 not found: ID does not exist" containerID="1687fb8e361d2b27d81c9da5a7a19d7045023512fc90b12b119709ee588cdba8" Nov 23 16:30:32 crc kubenswrapper[5050]: I1123 16:30:32.613706 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1687fb8e361d2b27d81c9da5a7a19d7045023512fc90b12b119709ee588cdba8"} err="failed to get container status \"1687fb8e361d2b27d81c9da5a7a19d7045023512fc90b12b119709ee588cdba8\": rpc error: code = NotFound desc = could not find container \"1687fb8e361d2b27d81c9da5a7a19d7045023512fc90b12b119709ee588cdba8\": container with ID starting with 1687fb8e361d2b27d81c9da5a7a19d7045023512fc90b12b119709ee588cdba8 not found: ID does not exist" Nov 23 16:30:32 crc kubenswrapper[5050]: I1123 16:30:32.613741 5050 scope.go:117] "RemoveContainer" containerID="d07605c91a180a9d178bb0a2f097dab552f38cf6eb6c299cc58f1da41427f14c" Nov 23 16:30:32 crc kubenswrapper[5050]: E1123 16:30:32.615558 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d07605c91a180a9d178bb0a2f097dab552f38cf6eb6c299cc58f1da41427f14c\": container with ID starting with d07605c91a180a9d178bb0a2f097dab552f38cf6eb6c299cc58f1da41427f14c not found: ID does not exist" containerID="d07605c91a180a9d178bb0a2f097dab552f38cf6eb6c299cc58f1da41427f14c" Nov 23 16:30:32 crc kubenswrapper[5050]: I1123 16:30:32.615626 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d07605c91a180a9d178bb0a2f097dab552f38cf6eb6c299cc58f1da41427f14c"} err="failed to get container status \"d07605c91a180a9d178bb0a2f097dab552f38cf6eb6c299cc58f1da41427f14c\": rpc error: code = NotFound desc = could not find container \"d07605c91a180a9d178bb0a2f097dab552f38cf6eb6c299cc58f1da41427f14c\": container with ID starting with d07605c91a180a9d178bb0a2f097dab552f38cf6eb6c299cc58f1da41427f14c not found: ID does not exist" Nov 23 16:30:32 crc kubenswrapper[5050]: I1123 16:30:32.615672 5050 scope.go:117] "RemoveContainer" containerID="3240542c539677fcf4ae7e1c7f8ea775d3b56fde51ac931cb9f332ff35702757" Nov 23 16:30:32 crc kubenswrapper[5050]: E1123 16:30:32.616050 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3240542c539677fcf4ae7e1c7f8ea775d3b56fde51ac931cb9f332ff35702757\": container with ID starting with 3240542c539677fcf4ae7e1c7f8ea775d3b56fde51ac931cb9f332ff35702757 not found: ID does not exist" containerID="3240542c539677fcf4ae7e1c7f8ea775d3b56fde51ac931cb9f332ff35702757" Nov 23 16:30:32 crc kubenswrapper[5050]: I1123 16:30:32.616097 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3240542c539677fcf4ae7e1c7f8ea775d3b56fde51ac931cb9f332ff35702757"} err="failed to get container status \"3240542c539677fcf4ae7e1c7f8ea775d3b56fde51ac931cb9f332ff35702757\": rpc error: code = NotFound desc = could not find container \"3240542c539677fcf4ae7e1c7f8ea775d3b56fde51ac931cb9f332ff35702757\": container with ID starting with 3240542c539677fcf4ae7e1c7f8ea775d3b56fde51ac931cb9f332ff35702757 not found: ID does not exist" Nov 23 16:30:33 crc kubenswrapper[5050]: I1123 16:30:33.578314 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1290f550-e0a1-4d77-9766-3bfb2b0569de" path="/var/lib/kubelet/pods/1290f550-e0a1-4d77-9766-3bfb2b0569de/volumes" Nov 23 16:30:34 crc kubenswrapper[5050]: I1123 16:30:34.551497 5050 scope.go:117] "RemoveContainer" containerID="c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" Nov 23 16:30:34 crc kubenswrapper[5050]: E1123 16:30:34.555283 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:30:48 crc kubenswrapper[5050]: I1123 16:30:48.549693 5050 scope.go:117] "RemoveContainer" containerID="c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" Nov 23 16:30:48 crc kubenswrapper[5050]: E1123 16:30:48.551105 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:31:03 crc kubenswrapper[5050]: I1123 16:31:03.550356 5050 scope.go:117] "RemoveContainer" containerID="c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" Nov 23 16:31:03 crc kubenswrapper[5050]: E1123 16:31:03.551984 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:31:14 crc kubenswrapper[5050]: I1123 16:31:14.550021 5050 scope.go:117] "RemoveContainer" containerID="c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" Nov 23 16:31:14 crc kubenswrapper[5050]: E1123 16:31:14.551176 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:31:22 crc kubenswrapper[5050]: I1123 16:31:22.431718 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-wwjkc"] Nov 23 16:31:22 crc kubenswrapper[5050]: E1123 16:31:22.433438 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1290f550-e0a1-4d77-9766-3bfb2b0569de" containerName="registry-server" Nov 23 16:31:22 crc kubenswrapper[5050]: I1123 16:31:22.433549 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="1290f550-e0a1-4d77-9766-3bfb2b0569de" containerName="registry-server" Nov 23 16:31:22 crc kubenswrapper[5050]: E1123 16:31:22.433596 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1290f550-e0a1-4d77-9766-3bfb2b0569de" containerName="extract-utilities" Nov 23 16:31:22 crc kubenswrapper[5050]: I1123 16:31:22.433605 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="1290f550-e0a1-4d77-9766-3bfb2b0569de" containerName="extract-utilities" Nov 23 16:31:22 crc kubenswrapper[5050]: E1123 16:31:22.433651 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1290f550-e0a1-4d77-9766-3bfb2b0569de" containerName="extract-content" Nov 23 16:31:22 crc kubenswrapper[5050]: I1123 16:31:22.433660 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="1290f550-e0a1-4d77-9766-3bfb2b0569de" containerName="extract-content" Nov 23 16:31:22 crc kubenswrapper[5050]: I1123 16:31:22.434360 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="1290f550-e0a1-4d77-9766-3bfb2b0569de" containerName="registry-server" Nov 23 16:31:22 crc kubenswrapper[5050]: I1123 16:31:22.437387 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wwjkc" Nov 23 16:31:22 crc kubenswrapper[5050]: I1123 16:31:22.449899 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wwjkc"] Nov 23 16:31:22 crc kubenswrapper[5050]: I1123 16:31:22.515405 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-grq2k\" (UniqueName: \"kubernetes.io/projected/753b817d-1af0-40c3-a799-77f55013eea6-kube-api-access-grq2k\") pod \"certified-operators-wwjkc\" (UID: \"753b817d-1af0-40c3-a799-77f55013eea6\") " pod="openshift-marketplace/certified-operators-wwjkc" Nov 23 16:31:22 crc kubenswrapper[5050]: I1123 16:31:22.516286 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/753b817d-1af0-40c3-a799-77f55013eea6-utilities\") pod \"certified-operators-wwjkc\" (UID: \"753b817d-1af0-40c3-a799-77f55013eea6\") " pod="openshift-marketplace/certified-operators-wwjkc" Nov 23 16:31:22 crc kubenswrapper[5050]: I1123 16:31:22.516445 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/753b817d-1af0-40c3-a799-77f55013eea6-catalog-content\") pod \"certified-operators-wwjkc\" (UID: \"753b817d-1af0-40c3-a799-77f55013eea6\") " pod="openshift-marketplace/certified-operators-wwjkc" Nov 23 16:31:22 crc kubenswrapper[5050]: I1123 16:31:22.619259 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/753b817d-1af0-40c3-a799-77f55013eea6-utilities\") pod \"certified-operators-wwjkc\" (UID: \"753b817d-1af0-40c3-a799-77f55013eea6\") " pod="openshift-marketplace/certified-operators-wwjkc" Nov 23 16:31:22 crc kubenswrapper[5050]: I1123 16:31:22.619320 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/753b817d-1af0-40c3-a799-77f55013eea6-catalog-content\") pod \"certified-operators-wwjkc\" (UID: \"753b817d-1af0-40c3-a799-77f55013eea6\") " pod="openshift-marketplace/certified-operators-wwjkc" Nov 23 16:31:22 crc kubenswrapper[5050]: I1123 16:31:22.619457 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-grq2k\" (UniqueName: \"kubernetes.io/projected/753b817d-1af0-40c3-a799-77f55013eea6-kube-api-access-grq2k\") pod \"certified-operators-wwjkc\" (UID: \"753b817d-1af0-40c3-a799-77f55013eea6\") " pod="openshift-marketplace/certified-operators-wwjkc" Nov 23 16:31:22 crc kubenswrapper[5050]: I1123 16:31:22.620451 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/753b817d-1af0-40c3-a799-77f55013eea6-catalog-content\") pod \"certified-operators-wwjkc\" (UID: \"753b817d-1af0-40c3-a799-77f55013eea6\") " pod="openshift-marketplace/certified-operators-wwjkc" Nov 23 16:31:22 crc kubenswrapper[5050]: I1123 16:31:22.620451 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/753b817d-1af0-40c3-a799-77f55013eea6-utilities\") pod \"certified-operators-wwjkc\" (UID: \"753b817d-1af0-40c3-a799-77f55013eea6\") " pod="openshift-marketplace/certified-operators-wwjkc" Nov 23 16:31:22 crc kubenswrapper[5050]: I1123 16:31:22.640653 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-grq2k\" (UniqueName: \"kubernetes.io/projected/753b817d-1af0-40c3-a799-77f55013eea6-kube-api-access-grq2k\") pod \"certified-operators-wwjkc\" (UID: \"753b817d-1af0-40c3-a799-77f55013eea6\") " pod="openshift-marketplace/certified-operators-wwjkc" Nov 23 16:31:22 crc kubenswrapper[5050]: I1123 16:31:22.774054 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wwjkc" Nov 23 16:31:23 crc kubenswrapper[5050]: I1123 16:31:23.284606 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wwjkc"] Nov 23 16:31:24 crc kubenswrapper[5050]: I1123 16:31:24.172361 5050 generic.go:334] "Generic (PLEG): container finished" podID="753b817d-1af0-40c3-a799-77f55013eea6" containerID="b5df007f2d4d35f4d051965f2a9b1aec9b69055c03f5e007a386069101d3faa5" exitCode=0 Nov 23 16:31:24 crc kubenswrapper[5050]: I1123 16:31:24.172529 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wwjkc" event={"ID":"753b817d-1af0-40c3-a799-77f55013eea6","Type":"ContainerDied","Data":"b5df007f2d4d35f4d051965f2a9b1aec9b69055c03f5e007a386069101d3faa5"} Nov 23 16:31:24 crc kubenswrapper[5050]: I1123 16:31:24.172908 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wwjkc" event={"ID":"753b817d-1af0-40c3-a799-77f55013eea6","Type":"ContainerStarted","Data":"99b3fd60b1072b78be4508b0a7e41e993cf4548a8e47daaf061a9f3a39471dd7"} Nov 23 16:31:24 crc kubenswrapper[5050]: I1123 16:31:24.177987 5050 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 23 16:31:26 crc kubenswrapper[5050]: I1123 16:31:26.202415 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wwjkc" event={"ID":"753b817d-1af0-40c3-a799-77f55013eea6","Type":"ContainerStarted","Data":"770b3603b38280edb2ce02fb77be74397449a78f7d1030cc1084cdc0180b00cb"} Nov 23 16:31:27 crc kubenswrapper[5050]: I1123 16:31:27.219665 5050 generic.go:334] "Generic (PLEG): container finished" podID="753b817d-1af0-40c3-a799-77f55013eea6" containerID="770b3603b38280edb2ce02fb77be74397449a78f7d1030cc1084cdc0180b00cb" exitCode=0 Nov 23 16:31:27 crc kubenswrapper[5050]: I1123 16:31:27.219768 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wwjkc" event={"ID":"753b817d-1af0-40c3-a799-77f55013eea6","Type":"ContainerDied","Data":"770b3603b38280edb2ce02fb77be74397449a78f7d1030cc1084cdc0180b00cb"} Nov 23 16:31:28 crc kubenswrapper[5050]: I1123 16:31:28.234985 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wwjkc" event={"ID":"753b817d-1af0-40c3-a799-77f55013eea6","Type":"ContainerStarted","Data":"6eb229b325d5499e7e5af3360b8c6c9b061a4bc67af831fe649f2382882db77d"} Nov 23 16:31:28 crc kubenswrapper[5050]: I1123 16:31:28.263118 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-wwjkc" podStartSLOduration=2.719790778 podStartE2EDuration="6.263091959s" podCreationTimestamp="2025-11-23 16:31:22 +0000 UTC" firstStartedPulling="2025-11-23 16:31:24.177634336 +0000 UTC m=+6579.344630821" lastFinishedPulling="2025-11-23 16:31:27.720935507 +0000 UTC m=+6582.887932002" observedRunningTime="2025-11-23 16:31:28.261019431 +0000 UTC m=+6583.428015926" watchObservedRunningTime="2025-11-23 16:31:28.263091959 +0000 UTC m=+6583.430088454" Nov 23 16:31:29 crc kubenswrapper[5050]: I1123 16:31:29.552136 5050 scope.go:117] "RemoveContainer" containerID="c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" Nov 23 16:31:30 crc kubenswrapper[5050]: I1123 16:31:30.263951 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"9b4e71185917f1b2a88497f2d03a44bf9689c07a3b5f51a006c85b796109d94d"} Nov 23 16:31:32 crc kubenswrapper[5050]: I1123 16:31:32.775180 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-wwjkc" Nov 23 16:31:32 crc kubenswrapper[5050]: I1123 16:31:32.776080 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-wwjkc" Nov 23 16:31:32 crc kubenswrapper[5050]: I1123 16:31:32.862685 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-wwjkc" Nov 23 16:31:33 crc kubenswrapper[5050]: I1123 16:31:33.403789 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-wwjkc" Nov 23 16:31:33 crc kubenswrapper[5050]: I1123 16:31:33.492014 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wwjkc"] Nov 23 16:31:35 crc kubenswrapper[5050]: I1123 16:31:35.341601 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-wwjkc" podUID="753b817d-1af0-40c3-a799-77f55013eea6" containerName="registry-server" containerID="cri-o://6eb229b325d5499e7e5af3360b8c6c9b061a4bc67af831fe649f2382882db77d" gracePeriod=2 Nov 23 16:31:35 crc kubenswrapper[5050]: I1123 16:31:35.929358 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wwjkc" Nov 23 16:31:36 crc kubenswrapper[5050]: I1123 16:31:36.025914 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/753b817d-1af0-40c3-a799-77f55013eea6-catalog-content\") pod \"753b817d-1af0-40c3-a799-77f55013eea6\" (UID: \"753b817d-1af0-40c3-a799-77f55013eea6\") " Nov 23 16:31:36 crc kubenswrapper[5050]: I1123 16:31:36.026004 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/753b817d-1af0-40c3-a799-77f55013eea6-utilities\") pod \"753b817d-1af0-40c3-a799-77f55013eea6\" (UID: \"753b817d-1af0-40c3-a799-77f55013eea6\") " Nov 23 16:31:36 crc kubenswrapper[5050]: I1123 16:31:36.026167 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-grq2k\" (UniqueName: \"kubernetes.io/projected/753b817d-1af0-40c3-a799-77f55013eea6-kube-api-access-grq2k\") pod \"753b817d-1af0-40c3-a799-77f55013eea6\" (UID: \"753b817d-1af0-40c3-a799-77f55013eea6\") " Nov 23 16:31:36 crc kubenswrapper[5050]: I1123 16:31:36.028169 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/753b817d-1af0-40c3-a799-77f55013eea6-utilities" (OuterVolumeSpecName: "utilities") pod "753b817d-1af0-40c3-a799-77f55013eea6" (UID: "753b817d-1af0-40c3-a799-77f55013eea6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:31:36 crc kubenswrapper[5050]: I1123 16:31:36.039843 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/753b817d-1af0-40c3-a799-77f55013eea6-kube-api-access-grq2k" (OuterVolumeSpecName: "kube-api-access-grq2k") pod "753b817d-1af0-40c3-a799-77f55013eea6" (UID: "753b817d-1af0-40c3-a799-77f55013eea6"). InnerVolumeSpecName "kube-api-access-grq2k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:31:36 crc kubenswrapper[5050]: I1123 16:31:36.081031 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/753b817d-1af0-40c3-a799-77f55013eea6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "753b817d-1af0-40c3-a799-77f55013eea6" (UID: "753b817d-1af0-40c3-a799-77f55013eea6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:31:36 crc kubenswrapper[5050]: I1123 16:31:36.129909 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/753b817d-1af0-40c3-a799-77f55013eea6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 16:31:36 crc kubenswrapper[5050]: I1123 16:31:36.130001 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/753b817d-1af0-40c3-a799-77f55013eea6-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 16:31:36 crc kubenswrapper[5050]: I1123 16:31:36.130018 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-grq2k\" (UniqueName: \"kubernetes.io/projected/753b817d-1af0-40c3-a799-77f55013eea6-kube-api-access-grq2k\") on node \"crc\" DevicePath \"\"" Nov 23 16:31:36 crc kubenswrapper[5050]: I1123 16:31:36.359344 5050 generic.go:334] "Generic (PLEG): container finished" podID="753b817d-1af0-40c3-a799-77f55013eea6" containerID="6eb229b325d5499e7e5af3360b8c6c9b061a4bc67af831fe649f2382882db77d" exitCode=0 Nov 23 16:31:36 crc kubenswrapper[5050]: I1123 16:31:36.359411 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wwjkc" event={"ID":"753b817d-1af0-40c3-a799-77f55013eea6","Type":"ContainerDied","Data":"6eb229b325d5499e7e5af3360b8c6c9b061a4bc67af831fe649f2382882db77d"} Nov 23 16:31:36 crc kubenswrapper[5050]: I1123 16:31:36.359482 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wwjkc" event={"ID":"753b817d-1af0-40c3-a799-77f55013eea6","Type":"ContainerDied","Data":"99b3fd60b1072b78be4508b0a7e41e993cf4548a8e47daaf061a9f3a39471dd7"} Nov 23 16:31:36 crc kubenswrapper[5050]: I1123 16:31:36.359493 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wwjkc" Nov 23 16:31:36 crc kubenswrapper[5050]: I1123 16:31:36.359509 5050 scope.go:117] "RemoveContainer" containerID="6eb229b325d5499e7e5af3360b8c6c9b061a4bc67af831fe649f2382882db77d" Nov 23 16:31:36 crc kubenswrapper[5050]: I1123 16:31:36.415010 5050 scope.go:117] "RemoveContainer" containerID="770b3603b38280edb2ce02fb77be74397449a78f7d1030cc1084cdc0180b00cb" Nov 23 16:31:36 crc kubenswrapper[5050]: I1123 16:31:36.415407 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wwjkc"] Nov 23 16:31:36 crc kubenswrapper[5050]: I1123 16:31:36.427161 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-wwjkc"] Nov 23 16:31:36 crc kubenswrapper[5050]: I1123 16:31:36.460963 5050 scope.go:117] "RemoveContainer" containerID="b5df007f2d4d35f4d051965f2a9b1aec9b69055c03f5e007a386069101d3faa5" Nov 23 16:31:36 crc kubenswrapper[5050]: I1123 16:31:36.515157 5050 scope.go:117] "RemoveContainer" containerID="6eb229b325d5499e7e5af3360b8c6c9b061a4bc67af831fe649f2382882db77d" Nov 23 16:31:36 crc kubenswrapper[5050]: E1123 16:31:36.516000 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6eb229b325d5499e7e5af3360b8c6c9b061a4bc67af831fe649f2382882db77d\": container with ID starting with 6eb229b325d5499e7e5af3360b8c6c9b061a4bc67af831fe649f2382882db77d not found: ID does not exist" containerID="6eb229b325d5499e7e5af3360b8c6c9b061a4bc67af831fe649f2382882db77d" Nov 23 16:31:36 crc kubenswrapper[5050]: I1123 16:31:36.516203 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6eb229b325d5499e7e5af3360b8c6c9b061a4bc67af831fe649f2382882db77d"} err="failed to get container status \"6eb229b325d5499e7e5af3360b8c6c9b061a4bc67af831fe649f2382882db77d\": rpc error: code = NotFound desc = could not find container \"6eb229b325d5499e7e5af3360b8c6c9b061a4bc67af831fe649f2382882db77d\": container with ID starting with 6eb229b325d5499e7e5af3360b8c6c9b061a4bc67af831fe649f2382882db77d not found: ID does not exist" Nov 23 16:31:36 crc kubenswrapper[5050]: I1123 16:31:36.516406 5050 scope.go:117] "RemoveContainer" containerID="770b3603b38280edb2ce02fb77be74397449a78f7d1030cc1084cdc0180b00cb" Nov 23 16:31:36 crc kubenswrapper[5050]: E1123 16:31:36.517030 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"770b3603b38280edb2ce02fb77be74397449a78f7d1030cc1084cdc0180b00cb\": container with ID starting with 770b3603b38280edb2ce02fb77be74397449a78f7d1030cc1084cdc0180b00cb not found: ID does not exist" containerID="770b3603b38280edb2ce02fb77be74397449a78f7d1030cc1084cdc0180b00cb" Nov 23 16:31:36 crc kubenswrapper[5050]: I1123 16:31:36.517180 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"770b3603b38280edb2ce02fb77be74397449a78f7d1030cc1084cdc0180b00cb"} err="failed to get container status \"770b3603b38280edb2ce02fb77be74397449a78f7d1030cc1084cdc0180b00cb\": rpc error: code = NotFound desc = could not find container \"770b3603b38280edb2ce02fb77be74397449a78f7d1030cc1084cdc0180b00cb\": container with ID starting with 770b3603b38280edb2ce02fb77be74397449a78f7d1030cc1084cdc0180b00cb not found: ID does not exist" Nov 23 16:31:36 crc kubenswrapper[5050]: I1123 16:31:36.517302 5050 scope.go:117] "RemoveContainer" containerID="b5df007f2d4d35f4d051965f2a9b1aec9b69055c03f5e007a386069101d3faa5" Nov 23 16:31:36 crc kubenswrapper[5050]: E1123 16:31:36.517848 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b5df007f2d4d35f4d051965f2a9b1aec9b69055c03f5e007a386069101d3faa5\": container with ID starting with b5df007f2d4d35f4d051965f2a9b1aec9b69055c03f5e007a386069101d3faa5 not found: ID does not exist" containerID="b5df007f2d4d35f4d051965f2a9b1aec9b69055c03f5e007a386069101d3faa5" Nov 23 16:31:36 crc kubenswrapper[5050]: I1123 16:31:36.518238 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b5df007f2d4d35f4d051965f2a9b1aec9b69055c03f5e007a386069101d3faa5"} err="failed to get container status \"b5df007f2d4d35f4d051965f2a9b1aec9b69055c03f5e007a386069101d3faa5\": rpc error: code = NotFound desc = could not find container \"b5df007f2d4d35f4d051965f2a9b1aec9b69055c03f5e007a386069101d3faa5\": container with ID starting with b5df007f2d4d35f4d051965f2a9b1aec9b69055c03f5e007a386069101d3faa5 not found: ID does not exist" Nov 23 16:31:37 crc kubenswrapper[5050]: I1123 16:31:37.567211 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="753b817d-1af0-40c3-a799-77f55013eea6" path="/var/lib/kubelet/pods/753b817d-1af0-40c3-a799-77f55013eea6/volumes" Nov 23 16:32:34 crc kubenswrapper[5050]: I1123 16:32:34.080997 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-l664c"] Nov 23 16:32:34 crc kubenswrapper[5050]: I1123 16:32:34.093364 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-0d12-account-create-h9wr8"] Nov 23 16:32:34 crc kubenswrapper[5050]: I1123 16:32:34.134503 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-0d12-account-create-h9wr8"] Nov 23 16:32:34 crc kubenswrapper[5050]: I1123 16:32:34.151586 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-l664c"] Nov 23 16:32:35 crc kubenswrapper[5050]: I1123 16:32:35.568916 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34b6d8e9-a394-439a-95bd-ed5c9b073581" path="/var/lib/kubelet/pods/34b6d8e9-a394-439a-95bd-ed5c9b073581/volumes" Nov 23 16:32:35 crc kubenswrapper[5050]: I1123 16:32:35.570292 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0" path="/var/lib/kubelet/pods/bf8c01a5-ac54-4ce4-adfd-4f267aae7ef0/volumes" Nov 23 16:32:51 crc kubenswrapper[5050]: I1123 16:32:51.042383 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-qpgwc"] Nov 23 16:32:51 crc kubenswrapper[5050]: I1123 16:32:51.057503 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-qpgwc"] Nov 23 16:32:51 crc kubenswrapper[5050]: I1123 16:32:51.564216 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="caa63d8d-4035-4c91-8a74-64b692dd7838" path="/var/lib/kubelet/pods/caa63d8d-4035-4c91-8a74-64b692dd7838/volumes" Nov 23 16:33:19 crc kubenswrapper[5050]: I1123 16:33:19.183918 5050 scope.go:117] "RemoveContainer" containerID="6c80117a9e017ec386d83f6ea689ee8b15abcadcc3f45495abd75afe1abc396d" Nov 23 16:33:19 crc kubenswrapper[5050]: I1123 16:33:19.224631 5050 scope.go:117] "RemoveContainer" containerID="e7d757ef1206a8f367e725b8dbf9fea21ac64a4d36e84a55f3454a5c3edd0fa3" Nov 23 16:33:19 crc kubenswrapper[5050]: I1123 16:33:19.305825 5050 scope.go:117] "RemoveContainer" containerID="5ee80f98bb9be001a5ed99e866bff219ebe3c0d4127de00bea8006af0b706e9a" Nov 23 16:33:29 crc kubenswrapper[5050]: I1123 16:33:29.224964 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:33:29 crc kubenswrapper[5050]: I1123 16:33:29.225884 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:33:59 crc kubenswrapper[5050]: I1123 16:33:59.224895 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:33:59 crc kubenswrapper[5050]: I1123 16:33:59.226015 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:34:29 crc kubenswrapper[5050]: I1123 16:34:29.224419 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:34:29 crc kubenswrapper[5050]: I1123 16:34:29.228151 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:34:29 crc kubenswrapper[5050]: I1123 16:34:29.228369 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 16:34:29 crc kubenswrapper[5050]: I1123 16:34:29.230385 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9b4e71185917f1b2a88497f2d03a44bf9689c07a3b5f51a006c85b796109d94d"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 16:34:29 crc kubenswrapper[5050]: I1123 16:34:29.230602 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://9b4e71185917f1b2a88497f2d03a44bf9689c07a3b5f51a006c85b796109d94d" gracePeriod=600 Nov 23 16:34:29 crc kubenswrapper[5050]: I1123 16:34:29.818203 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="9b4e71185917f1b2a88497f2d03a44bf9689c07a3b5f51a006c85b796109d94d" exitCode=0 Nov 23 16:34:29 crc kubenswrapper[5050]: I1123 16:34:29.818294 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"9b4e71185917f1b2a88497f2d03a44bf9689c07a3b5f51a006c85b796109d94d"} Nov 23 16:34:29 crc kubenswrapper[5050]: I1123 16:34:29.818539 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f"} Nov 23 16:34:29 crc kubenswrapper[5050]: I1123 16:34:29.818562 5050 scope.go:117] "RemoveContainer" containerID="c66ef3881ddcc9912b6c270cf4a535647442815ea6b0dd1bc5eb1cfcbc78f486" Nov 23 16:35:01 crc kubenswrapper[5050]: I1123 16:35:01.081920 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-ff36-account-create-frdwk"] Nov 23 16:35:01 crc kubenswrapper[5050]: I1123 16:35:01.094362 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-create-87h94"] Nov 23 16:35:01 crc kubenswrapper[5050]: I1123 16:35:01.109183 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-ff36-account-create-frdwk"] Nov 23 16:35:01 crc kubenswrapper[5050]: I1123 16:35:01.120045 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-create-87h94"] Nov 23 16:35:01 crc kubenswrapper[5050]: I1123 16:35:01.579674 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="469f5a48-e8fc-4c0c-b148-3a1275c9e2ae" path="/var/lib/kubelet/pods/469f5a48-e8fc-4c0c-b148-3a1275c9e2ae/volumes" Nov 23 16:35:01 crc kubenswrapper[5050]: I1123 16:35:01.586108 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b9acf01-e7ac-40f4-af07-ff0ba015c3cd" path="/var/lib/kubelet/pods/5b9acf01-e7ac-40f4-af07-ff0ba015c3cd/volumes" Nov 23 16:35:17 crc kubenswrapper[5050]: I1123 16:35:17.052357 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-92vrc"] Nov 23 16:35:17 crc kubenswrapper[5050]: I1123 16:35:17.061185 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-92vrc"] Nov 23 16:35:17 crc kubenswrapper[5050]: I1123 16:35:17.567732 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c0f19a-0135-4be9-9d78-8d8813d9d876" path="/var/lib/kubelet/pods/49c0f19a-0135-4be9-9d78-8d8813d9d876/volumes" Nov 23 16:35:19 crc kubenswrapper[5050]: I1123 16:35:19.496482 5050 scope.go:117] "RemoveContainer" containerID="c96a965778da0f778aaa9aeb6801957eb0f9ed943553ca635bd26a7f1bd2af33" Nov 23 16:35:19 crc kubenswrapper[5050]: I1123 16:35:19.575561 5050 scope.go:117] "RemoveContainer" containerID="d3872c1032bde051acc94e17cfcc1ec1dc137ddcaf91264b001a389466a7a1a9" Nov 23 16:35:19 crc kubenswrapper[5050]: I1123 16:35:19.632549 5050 scope.go:117] "RemoveContainer" containerID="c29b42916bc42cb02696fb4b8de68bc1f915c8fad18fc84402fd7c55cd8b30f8" Nov 23 16:35:38 crc kubenswrapper[5050]: I1123 16:35:38.074230 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-f8df-account-create-7w2hg"] Nov 23 16:35:38 crc kubenswrapper[5050]: I1123 16:35:38.091487 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-create-67qdh"] Nov 23 16:35:38 crc kubenswrapper[5050]: I1123 16:35:38.106060 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-f8df-account-create-7w2hg"] Nov 23 16:35:38 crc kubenswrapper[5050]: I1123 16:35:38.119340 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-create-67qdh"] Nov 23 16:35:39 crc kubenswrapper[5050]: I1123 16:35:39.569250 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dbfbd4d0-c340-4fdc-ac31-152b141957eb" path="/var/lib/kubelet/pods/dbfbd4d0-c340-4fdc-ac31-152b141957eb/volumes" Nov 23 16:35:39 crc kubenswrapper[5050]: I1123 16:35:39.570379 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fae4322c-6472-4a04-8bd6-b5703fd6dbd5" path="/var/lib/kubelet/pods/fae4322c-6472-4a04-8bd6-b5703fd6dbd5/volumes" Nov 23 16:35:51 crc kubenswrapper[5050]: I1123 16:35:51.065247 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-sync-jjgfp"] Nov 23 16:35:51 crc kubenswrapper[5050]: I1123 16:35:51.083916 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-sync-jjgfp"] Nov 23 16:35:51 crc kubenswrapper[5050]: I1123 16:35:51.568885 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55f77b8e-9262-4c72-a63b-7a71959fa156" path="/var/lib/kubelet/pods/55f77b8e-9262-4c72-a63b-7a71959fa156/volumes" Nov 23 16:36:19 crc kubenswrapper[5050]: I1123 16:36:19.824151 5050 scope.go:117] "RemoveContainer" containerID="4f3b8970c5734a3ffb958357facf1c25e5b926a2c2faaa5589697f06c1fa3966" Nov 23 16:36:19 crc kubenswrapper[5050]: I1123 16:36:19.882048 5050 scope.go:117] "RemoveContainer" containerID="a9b1bb09aa85270bce6ca938ebecc057d46e5ff528a30d43750354c836390916" Nov 23 16:36:19 crc kubenswrapper[5050]: I1123 16:36:19.932664 5050 scope.go:117] "RemoveContainer" containerID="101e900c816f095f6a234fbc3d346d71c6c7b6d60a1efb9919ddba37f960bac4" Nov 23 16:36:29 crc kubenswrapper[5050]: I1123 16:36:29.224618 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:36:29 crc kubenswrapper[5050]: I1123 16:36:29.225705 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:36:59 crc kubenswrapper[5050]: I1123 16:36:59.224798 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:36:59 crc kubenswrapper[5050]: I1123 16:36:59.225771 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:37:00 crc kubenswrapper[5050]: I1123 16:37:00.338597 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xbwtv"] Nov 23 16:37:00 crc kubenswrapper[5050]: E1123 16:37:00.340356 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="753b817d-1af0-40c3-a799-77f55013eea6" containerName="extract-utilities" Nov 23 16:37:00 crc kubenswrapper[5050]: I1123 16:37:00.340386 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="753b817d-1af0-40c3-a799-77f55013eea6" containerName="extract-utilities" Nov 23 16:37:00 crc kubenswrapper[5050]: E1123 16:37:00.340425 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="753b817d-1af0-40c3-a799-77f55013eea6" containerName="registry-server" Nov 23 16:37:00 crc kubenswrapper[5050]: I1123 16:37:00.340441 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="753b817d-1af0-40c3-a799-77f55013eea6" containerName="registry-server" Nov 23 16:37:00 crc kubenswrapper[5050]: E1123 16:37:00.340555 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="753b817d-1af0-40c3-a799-77f55013eea6" containerName="extract-content" Nov 23 16:37:00 crc kubenswrapper[5050]: I1123 16:37:00.340571 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="753b817d-1af0-40c3-a799-77f55013eea6" containerName="extract-content" Nov 23 16:37:00 crc kubenswrapper[5050]: I1123 16:37:00.341007 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="753b817d-1af0-40c3-a799-77f55013eea6" containerName="registry-server" Nov 23 16:37:00 crc kubenswrapper[5050]: I1123 16:37:00.347622 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xbwtv" Nov 23 16:37:00 crc kubenswrapper[5050]: I1123 16:37:00.365950 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xbwtv"] Nov 23 16:37:00 crc kubenswrapper[5050]: I1123 16:37:00.372915 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmtwm\" (UniqueName: \"kubernetes.io/projected/066c8d57-d25c-453e-ad80-df85fe064b5f-kube-api-access-gmtwm\") pod \"redhat-marketplace-xbwtv\" (UID: \"066c8d57-d25c-453e-ad80-df85fe064b5f\") " pod="openshift-marketplace/redhat-marketplace-xbwtv" Nov 23 16:37:00 crc kubenswrapper[5050]: I1123 16:37:00.373034 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/066c8d57-d25c-453e-ad80-df85fe064b5f-utilities\") pod \"redhat-marketplace-xbwtv\" (UID: \"066c8d57-d25c-453e-ad80-df85fe064b5f\") " pod="openshift-marketplace/redhat-marketplace-xbwtv" Nov 23 16:37:00 crc kubenswrapper[5050]: I1123 16:37:00.373297 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/066c8d57-d25c-453e-ad80-df85fe064b5f-catalog-content\") pod \"redhat-marketplace-xbwtv\" (UID: \"066c8d57-d25c-453e-ad80-df85fe064b5f\") " pod="openshift-marketplace/redhat-marketplace-xbwtv" Nov 23 16:37:00 crc kubenswrapper[5050]: I1123 16:37:00.477423 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/066c8d57-d25c-453e-ad80-df85fe064b5f-catalog-content\") pod \"redhat-marketplace-xbwtv\" (UID: \"066c8d57-d25c-453e-ad80-df85fe064b5f\") " pod="openshift-marketplace/redhat-marketplace-xbwtv" Nov 23 16:37:00 crc kubenswrapper[5050]: I1123 16:37:00.476594 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/066c8d57-d25c-453e-ad80-df85fe064b5f-catalog-content\") pod \"redhat-marketplace-xbwtv\" (UID: \"066c8d57-d25c-453e-ad80-df85fe064b5f\") " pod="openshift-marketplace/redhat-marketplace-xbwtv" Nov 23 16:37:00 crc kubenswrapper[5050]: I1123 16:37:00.477739 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmtwm\" (UniqueName: \"kubernetes.io/projected/066c8d57-d25c-453e-ad80-df85fe064b5f-kube-api-access-gmtwm\") pod \"redhat-marketplace-xbwtv\" (UID: \"066c8d57-d25c-453e-ad80-df85fe064b5f\") " pod="openshift-marketplace/redhat-marketplace-xbwtv" Nov 23 16:37:00 crc kubenswrapper[5050]: I1123 16:37:00.478362 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/066c8d57-d25c-453e-ad80-df85fe064b5f-utilities\") pod \"redhat-marketplace-xbwtv\" (UID: \"066c8d57-d25c-453e-ad80-df85fe064b5f\") " pod="openshift-marketplace/redhat-marketplace-xbwtv" Nov 23 16:37:00 crc kubenswrapper[5050]: I1123 16:37:00.478918 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/066c8d57-d25c-453e-ad80-df85fe064b5f-utilities\") pod \"redhat-marketplace-xbwtv\" (UID: \"066c8d57-d25c-453e-ad80-df85fe064b5f\") " pod="openshift-marketplace/redhat-marketplace-xbwtv" Nov 23 16:37:00 crc kubenswrapper[5050]: I1123 16:37:00.507830 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmtwm\" (UniqueName: \"kubernetes.io/projected/066c8d57-d25c-453e-ad80-df85fe064b5f-kube-api-access-gmtwm\") pod \"redhat-marketplace-xbwtv\" (UID: \"066c8d57-d25c-453e-ad80-df85fe064b5f\") " pod="openshift-marketplace/redhat-marketplace-xbwtv" Nov 23 16:37:00 crc kubenswrapper[5050]: I1123 16:37:00.670421 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xbwtv" Nov 23 16:37:00 crc kubenswrapper[5050]: I1123 16:37:00.962684 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-h9ttt"] Nov 23 16:37:00 crc kubenswrapper[5050]: I1123 16:37:00.966302 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h9ttt" Nov 23 16:37:00 crc kubenswrapper[5050]: I1123 16:37:00.976742 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-h9ttt"] Nov 23 16:37:01 crc kubenswrapper[5050]: I1123 16:37:01.022774 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4rg4\" (UniqueName: \"kubernetes.io/projected/ffa465e1-5174-449f-94d8-e7a15bca8eae-kube-api-access-r4rg4\") pod \"redhat-operators-h9ttt\" (UID: \"ffa465e1-5174-449f-94d8-e7a15bca8eae\") " pod="openshift-marketplace/redhat-operators-h9ttt" Nov 23 16:37:01 crc kubenswrapper[5050]: I1123 16:37:01.022961 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ffa465e1-5174-449f-94d8-e7a15bca8eae-utilities\") pod \"redhat-operators-h9ttt\" (UID: \"ffa465e1-5174-449f-94d8-e7a15bca8eae\") " pod="openshift-marketplace/redhat-operators-h9ttt" Nov 23 16:37:01 crc kubenswrapper[5050]: I1123 16:37:01.023012 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ffa465e1-5174-449f-94d8-e7a15bca8eae-catalog-content\") pod \"redhat-operators-h9ttt\" (UID: \"ffa465e1-5174-449f-94d8-e7a15bca8eae\") " pod="openshift-marketplace/redhat-operators-h9ttt" Nov 23 16:37:01 crc kubenswrapper[5050]: I1123 16:37:01.126533 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ffa465e1-5174-449f-94d8-e7a15bca8eae-catalog-content\") pod \"redhat-operators-h9ttt\" (UID: \"ffa465e1-5174-449f-94d8-e7a15bca8eae\") " pod="openshift-marketplace/redhat-operators-h9ttt" Nov 23 16:37:01 crc kubenswrapper[5050]: I1123 16:37:01.126670 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4rg4\" (UniqueName: \"kubernetes.io/projected/ffa465e1-5174-449f-94d8-e7a15bca8eae-kube-api-access-r4rg4\") pod \"redhat-operators-h9ttt\" (UID: \"ffa465e1-5174-449f-94d8-e7a15bca8eae\") " pod="openshift-marketplace/redhat-operators-h9ttt" Nov 23 16:37:01 crc kubenswrapper[5050]: I1123 16:37:01.126816 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ffa465e1-5174-449f-94d8-e7a15bca8eae-utilities\") pod \"redhat-operators-h9ttt\" (UID: \"ffa465e1-5174-449f-94d8-e7a15bca8eae\") " pod="openshift-marketplace/redhat-operators-h9ttt" Nov 23 16:37:01 crc kubenswrapper[5050]: I1123 16:37:01.127764 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ffa465e1-5174-449f-94d8-e7a15bca8eae-catalog-content\") pod \"redhat-operators-h9ttt\" (UID: \"ffa465e1-5174-449f-94d8-e7a15bca8eae\") " pod="openshift-marketplace/redhat-operators-h9ttt" Nov 23 16:37:01 crc kubenswrapper[5050]: I1123 16:37:01.127828 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ffa465e1-5174-449f-94d8-e7a15bca8eae-utilities\") pod \"redhat-operators-h9ttt\" (UID: \"ffa465e1-5174-449f-94d8-e7a15bca8eae\") " pod="openshift-marketplace/redhat-operators-h9ttt" Nov 23 16:37:01 crc kubenswrapper[5050]: I1123 16:37:01.167834 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4rg4\" (UniqueName: \"kubernetes.io/projected/ffa465e1-5174-449f-94d8-e7a15bca8eae-kube-api-access-r4rg4\") pod \"redhat-operators-h9ttt\" (UID: \"ffa465e1-5174-449f-94d8-e7a15bca8eae\") " pod="openshift-marketplace/redhat-operators-h9ttt" Nov 23 16:37:01 crc kubenswrapper[5050]: I1123 16:37:01.330540 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h9ttt" Nov 23 16:37:01 crc kubenswrapper[5050]: I1123 16:37:01.569372 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xbwtv"] Nov 23 16:37:01 crc kubenswrapper[5050]: I1123 16:37:01.884123 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-h9ttt"] Nov 23 16:37:01 crc kubenswrapper[5050]: W1123 16:37:01.947093 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podffa465e1_5174_449f_94d8_e7a15bca8eae.slice/crio-79e87371fa799830ade760910fbbe12a2a6a27d72f1221f599f5258e82c3cce6 WatchSource:0}: Error finding container 79e87371fa799830ade760910fbbe12a2a6a27d72f1221f599f5258e82c3cce6: Status 404 returned error can't find the container with id 79e87371fa799830ade760910fbbe12a2a6a27d72f1221f599f5258e82c3cce6 Nov 23 16:37:02 crc kubenswrapper[5050]: I1123 16:37:02.039914 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h9ttt" event={"ID":"ffa465e1-5174-449f-94d8-e7a15bca8eae","Type":"ContainerStarted","Data":"79e87371fa799830ade760910fbbe12a2a6a27d72f1221f599f5258e82c3cce6"} Nov 23 16:37:02 crc kubenswrapper[5050]: I1123 16:37:02.042690 5050 generic.go:334] "Generic (PLEG): container finished" podID="066c8d57-d25c-453e-ad80-df85fe064b5f" containerID="88c5ed24118d5fdfb85e11fec11f40d35add93320f5e0cefda22bc40b47bb2c5" exitCode=0 Nov 23 16:37:02 crc kubenswrapper[5050]: I1123 16:37:02.042753 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xbwtv" event={"ID":"066c8d57-d25c-453e-ad80-df85fe064b5f","Type":"ContainerDied","Data":"88c5ed24118d5fdfb85e11fec11f40d35add93320f5e0cefda22bc40b47bb2c5"} Nov 23 16:37:02 crc kubenswrapper[5050]: I1123 16:37:02.042787 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xbwtv" event={"ID":"066c8d57-d25c-453e-ad80-df85fe064b5f","Type":"ContainerStarted","Data":"7ce388ce87ce64dddca249a4d493fa5b194ed3a1f44c8b1cbc3be3b8d2d58719"} Nov 23 16:37:02 crc kubenswrapper[5050]: I1123 16:37:02.046016 5050 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 23 16:37:02 crc kubenswrapper[5050]: E1123 16:37:02.433029 5050 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podffa465e1_5174_449f_94d8_e7a15bca8eae.slice/crio-conmon-a19cbb4e900a710d8e1bc5b0a717cb8e385c0453c50a0b7e23da4964e121f3fb.scope\": RecentStats: unable to find data in memory cache]" Nov 23 16:37:03 crc kubenswrapper[5050]: I1123 16:37:03.054563 5050 generic.go:334] "Generic (PLEG): container finished" podID="ffa465e1-5174-449f-94d8-e7a15bca8eae" containerID="a19cbb4e900a710d8e1bc5b0a717cb8e385c0453c50a0b7e23da4964e121f3fb" exitCode=0 Nov 23 16:37:03 crc kubenswrapper[5050]: I1123 16:37:03.054660 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h9ttt" event={"ID":"ffa465e1-5174-449f-94d8-e7a15bca8eae","Type":"ContainerDied","Data":"a19cbb4e900a710d8e1bc5b0a717cb8e385c0453c50a0b7e23da4964e121f3fb"} Nov 23 16:37:03 crc kubenswrapper[5050]: I1123 16:37:03.057325 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xbwtv" event={"ID":"066c8d57-d25c-453e-ad80-df85fe064b5f","Type":"ContainerStarted","Data":"111a39647f4c00b63dfc7d3675e54b93e9d346de1182257939af58b41527dceb"} Nov 23 16:37:04 crc kubenswrapper[5050]: I1123 16:37:04.079803 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h9ttt" event={"ID":"ffa465e1-5174-449f-94d8-e7a15bca8eae","Type":"ContainerStarted","Data":"606e1cdd29f7084cd4a0ba0a408bf342ccb131cf215a9011702b3cb5ea37ace8"} Nov 23 16:37:04 crc kubenswrapper[5050]: I1123 16:37:04.083261 5050 generic.go:334] "Generic (PLEG): container finished" podID="066c8d57-d25c-453e-ad80-df85fe064b5f" containerID="111a39647f4c00b63dfc7d3675e54b93e9d346de1182257939af58b41527dceb" exitCode=0 Nov 23 16:37:04 crc kubenswrapper[5050]: I1123 16:37:04.083335 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xbwtv" event={"ID":"066c8d57-d25c-453e-ad80-df85fe064b5f","Type":"ContainerDied","Data":"111a39647f4c00b63dfc7d3675e54b93e9d346de1182257939af58b41527dceb"} Nov 23 16:37:05 crc kubenswrapper[5050]: I1123 16:37:05.112065 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xbwtv" event={"ID":"066c8d57-d25c-453e-ad80-df85fe064b5f","Type":"ContainerStarted","Data":"70e91b1bdda8418ff707b8f7b12580a07146969394f4aa6048120156bc1806d0"} Nov 23 16:37:05 crc kubenswrapper[5050]: I1123 16:37:05.149066 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xbwtv" podStartSLOduration=2.661486836 podStartE2EDuration="5.149041731s" podCreationTimestamp="2025-11-23 16:37:00 +0000 UTC" firstStartedPulling="2025-11-23 16:37:02.045687127 +0000 UTC m=+6917.212683612" lastFinishedPulling="2025-11-23 16:37:04.533241982 +0000 UTC m=+6919.700238507" observedRunningTime="2025-11-23 16:37:05.136210529 +0000 UTC m=+6920.303207014" watchObservedRunningTime="2025-11-23 16:37:05.149041731 +0000 UTC m=+6920.316038216" Nov 23 16:37:09 crc kubenswrapper[5050]: I1123 16:37:09.164243 5050 generic.go:334] "Generic (PLEG): container finished" podID="ffa465e1-5174-449f-94d8-e7a15bca8eae" containerID="606e1cdd29f7084cd4a0ba0a408bf342ccb131cf215a9011702b3cb5ea37ace8" exitCode=0 Nov 23 16:37:09 crc kubenswrapper[5050]: I1123 16:37:09.164358 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h9ttt" event={"ID":"ffa465e1-5174-449f-94d8-e7a15bca8eae","Type":"ContainerDied","Data":"606e1cdd29f7084cd4a0ba0a408bf342ccb131cf215a9011702b3cb5ea37ace8"} Nov 23 16:37:10 crc kubenswrapper[5050]: I1123 16:37:10.177413 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h9ttt" event={"ID":"ffa465e1-5174-449f-94d8-e7a15bca8eae","Type":"ContainerStarted","Data":"ce98f00af1561ebd12599646c20773c9cebc5d25c6f7fe7897121ee1c82fc2a4"} Nov 23 16:37:10 crc kubenswrapper[5050]: I1123 16:37:10.208920 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-h9ttt" podStartSLOduration=3.531278659 podStartE2EDuration="10.208897759s" podCreationTimestamp="2025-11-23 16:37:00 +0000 UTC" firstStartedPulling="2025-11-23 16:37:03.057561919 +0000 UTC m=+6918.224558404" lastFinishedPulling="2025-11-23 16:37:09.735180989 +0000 UTC m=+6924.902177504" observedRunningTime="2025-11-23 16:37:10.200372428 +0000 UTC m=+6925.367368913" watchObservedRunningTime="2025-11-23 16:37:10.208897759 +0000 UTC m=+6925.375894254" Nov 23 16:37:10 crc kubenswrapper[5050]: I1123 16:37:10.671745 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xbwtv" Nov 23 16:37:10 crc kubenswrapper[5050]: I1123 16:37:10.671845 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xbwtv" Nov 23 16:37:10 crc kubenswrapper[5050]: I1123 16:37:10.749481 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xbwtv" Nov 23 16:37:11 crc kubenswrapper[5050]: I1123 16:37:11.270224 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xbwtv" Nov 23 16:37:11 crc kubenswrapper[5050]: I1123 16:37:11.331414 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-h9ttt" Nov 23 16:37:11 crc kubenswrapper[5050]: I1123 16:37:11.331518 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-h9ttt" Nov 23 16:37:12 crc kubenswrapper[5050]: I1123 16:37:12.383763 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h9ttt" podUID="ffa465e1-5174-449f-94d8-e7a15bca8eae" containerName="registry-server" probeResult="failure" output=< Nov 23 16:37:12 crc kubenswrapper[5050]: timeout: failed to connect service ":50051" within 1s Nov 23 16:37:12 crc kubenswrapper[5050]: > Nov 23 16:37:13 crc kubenswrapper[5050]: I1123 16:37:13.310748 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xbwtv"] Nov 23 16:37:13 crc kubenswrapper[5050]: I1123 16:37:13.311685 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xbwtv" podUID="066c8d57-d25c-453e-ad80-df85fe064b5f" containerName="registry-server" containerID="cri-o://70e91b1bdda8418ff707b8f7b12580a07146969394f4aa6048120156bc1806d0" gracePeriod=2 Nov 23 16:37:13 crc kubenswrapper[5050]: I1123 16:37:13.848620 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xbwtv" Nov 23 16:37:13 crc kubenswrapper[5050]: I1123 16:37:13.912245 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/066c8d57-d25c-453e-ad80-df85fe064b5f-utilities\") pod \"066c8d57-d25c-453e-ad80-df85fe064b5f\" (UID: \"066c8d57-d25c-453e-ad80-df85fe064b5f\") " Nov 23 16:37:13 crc kubenswrapper[5050]: I1123 16:37:13.912578 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/066c8d57-d25c-453e-ad80-df85fe064b5f-catalog-content\") pod \"066c8d57-d25c-453e-ad80-df85fe064b5f\" (UID: \"066c8d57-d25c-453e-ad80-df85fe064b5f\") " Nov 23 16:37:13 crc kubenswrapper[5050]: I1123 16:37:13.913022 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gmtwm\" (UniqueName: \"kubernetes.io/projected/066c8d57-d25c-453e-ad80-df85fe064b5f-kube-api-access-gmtwm\") pod \"066c8d57-d25c-453e-ad80-df85fe064b5f\" (UID: \"066c8d57-d25c-453e-ad80-df85fe064b5f\") " Nov 23 16:37:13 crc kubenswrapper[5050]: I1123 16:37:13.913626 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/066c8d57-d25c-453e-ad80-df85fe064b5f-utilities" (OuterVolumeSpecName: "utilities") pod "066c8d57-d25c-453e-ad80-df85fe064b5f" (UID: "066c8d57-d25c-453e-ad80-df85fe064b5f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:37:13 crc kubenswrapper[5050]: I1123 16:37:13.915040 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/066c8d57-d25c-453e-ad80-df85fe064b5f-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 16:37:13 crc kubenswrapper[5050]: I1123 16:37:13.921125 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/066c8d57-d25c-453e-ad80-df85fe064b5f-kube-api-access-gmtwm" (OuterVolumeSpecName: "kube-api-access-gmtwm") pod "066c8d57-d25c-453e-ad80-df85fe064b5f" (UID: "066c8d57-d25c-453e-ad80-df85fe064b5f"). InnerVolumeSpecName "kube-api-access-gmtwm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:37:13 crc kubenswrapper[5050]: I1123 16:37:13.929321 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/066c8d57-d25c-453e-ad80-df85fe064b5f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "066c8d57-d25c-453e-ad80-df85fe064b5f" (UID: "066c8d57-d25c-453e-ad80-df85fe064b5f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:37:14 crc kubenswrapper[5050]: I1123 16:37:14.018710 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/066c8d57-d25c-453e-ad80-df85fe064b5f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 16:37:14 crc kubenswrapper[5050]: I1123 16:37:14.018764 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gmtwm\" (UniqueName: \"kubernetes.io/projected/066c8d57-d25c-453e-ad80-df85fe064b5f-kube-api-access-gmtwm\") on node \"crc\" DevicePath \"\"" Nov 23 16:37:14 crc kubenswrapper[5050]: I1123 16:37:14.227056 5050 generic.go:334] "Generic (PLEG): container finished" podID="066c8d57-d25c-453e-ad80-df85fe064b5f" containerID="70e91b1bdda8418ff707b8f7b12580a07146969394f4aa6048120156bc1806d0" exitCode=0 Nov 23 16:37:14 crc kubenswrapper[5050]: I1123 16:37:14.227118 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xbwtv" event={"ID":"066c8d57-d25c-453e-ad80-df85fe064b5f","Type":"ContainerDied","Data":"70e91b1bdda8418ff707b8f7b12580a07146969394f4aa6048120156bc1806d0"} Nov 23 16:37:14 crc kubenswrapper[5050]: I1123 16:37:14.227171 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xbwtv" event={"ID":"066c8d57-d25c-453e-ad80-df85fe064b5f","Type":"ContainerDied","Data":"7ce388ce87ce64dddca249a4d493fa5b194ed3a1f44c8b1cbc3be3b8d2d58719"} Nov 23 16:37:14 crc kubenswrapper[5050]: I1123 16:37:14.227200 5050 scope.go:117] "RemoveContainer" containerID="70e91b1bdda8418ff707b8f7b12580a07146969394f4aa6048120156bc1806d0" Nov 23 16:37:14 crc kubenswrapper[5050]: I1123 16:37:14.227232 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xbwtv" Nov 23 16:37:14 crc kubenswrapper[5050]: I1123 16:37:14.266845 5050 scope.go:117] "RemoveContainer" containerID="111a39647f4c00b63dfc7d3675e54b93e9d346de1182257939af58b41527dceb" Nov 23 16:37:14 crc kubenswrapper[5050]: I1123 16:37:14.293125 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xbwtv"] Nov 23 16:37:14 crc kubenswrapper[5050]: I1123 16:37:14.307123 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xbwtv"] Nov 23 16:37:14 crc kubenswrapper[5050]: I1123 16:37:14.321782 5050 scope.go:117] "RemoveContainer" containerID="88c5ed24118d5fdfb85e11fec11f40d35add93320f5e0cefda22bc40b47bb2c5" Nov 23 16:37:14 crc kubenswrapper[5050]: I1123 16:37:14.377902 5050 scope.go:117] "RemoveContainer" containerID="70e91b1bdda8418ff707b8f7b12580a07146969394f4aa6048120156bc1806d0" Nov 23 16:37:14 crc kubenswrapper[5050]: E1123 16:37:14.378758 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70e91b1bdda8418ff707b8f7b12580a07146969394f4aa6048120156bc1806d0\": container with ID starting with 70e91b1bdda8418ff707b8f7b12580a07146969394f4aa6048120156bc1806d0 not found: ID does not exist" containerID="70e91b1bdda8418ff707b8f7b12580a07146969394f4aa6048120156bc1806d0" Nov 23 16:37:14 crc kubenswrapper[5050]: I1123 16:37:14.378891 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70e91b1bdda8418ff707b8f7b12580a07146969394f4aa6048120156bc1806d0"} err="failed to get container status \"70e91b1bdda8418ff707b8f7b12580a07146969394f4aa6048120156bc1806d0\": rpc error: code = NotFound desc = could not find container \"70e91b1bdda8418ff707b8f7b12580a07146969394f4aa6048120156bc1806d0\": container with ID starting with 70e91b1bdda8418ff707b8f7b12580a07146969394f4aa6048120156bc1806d0 not found: ID does not exist" Nov 23 16:37:14 crc kubenswrapper[5050]: I1123 16:37:14.378948 5050 scope.go:117] "RemoveContainer" containerID="111a39647f4c00b63dfc7d3675e54b93e9d346de1182257939af58b41527dceb" Nov 23 16:37:14 crc kubenswrapper[5050]: E1123 16:37:14.379505 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"111a39647f4c00b63dfc7d3675e54b93e9d346de1182257939af58b41527dceb\": container with ID starting with 111a39647f4c00b63dfc7d3675e54b93e9d346de1182257939af58b41527dceb not found: ID does not exist" containerID="111a39647f4c00b63dfc7d3675e54b93e9d346de1182257939af58b41527dceb" Nov 23 16:37:14 crc kubenswrapper[5050]: I1123 16:37:14.379572 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"111a39647f4c00b63dfc7d3675e54b93e9d346de1182257939af58b41527dceb"} err="failed to get container status \"111a39647f4c00b63dfc7d3675e54b93e9d346de1182257939af58b41527dceb\": rpc error: code = NotFound desc = could not find container \"111a39647f4c00b63dfc7d3675e54b93e9d346de1182257939af58b41527dceb\": container with ID starting with 111a39647f4c00b63dfc7d3675e54b93e9d346de1182257939af58b41527dceb not found: ID does not exist" Nov 23 16:37:14 crc kubenswrapper[5050]: I1123 16:37:14.379610 5050 scope.go:117] "RemoveContainer" containerID="88c5ed24118d5fdfb85e11fec11f40d35add93320f5e0cefda22bc40b47bb2c5" Nov 23 16:37:14 crc kubenswrapper[5050]: E1123 16:37:14.380108 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88c5ed24118d5fdfb85e11fec11f40d35add93320f5e0cefda22bc40b47bb2c5\": container with ID starting with 88c5ed24118d5fdfb85e11fec11f40d35add93320f5e0cefda22bc40b47bb2c5 not found: ID does not exist" containerID="88c5ed24118d5fdfb85e11fec11f40d35add93320f5e0cefda22bc40b47bb2c5" Nov 23 16:37:14 crc kubenswrapper[5050]: I1123 16:37:14.380164 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88c5ed24118d5fdfb85e11fec11f40d35add93320f5e0cefda22bc40b47bb2c5"} err="failed to get container status \"88c5ed24118d5fdfb85e11fec11f40d35add93320f5e0cefda22bc40b47bb2c5\": rpc error: code = NotFound desc = could not find container \"88c5ed24118d5fdfb85e11fec11f40d35add93320f5e0cefda22bc40b47bb2c5\": container with ID starting with 88c5ed24118d5fdfb85e11fec11f40d35add93320f5e0cefda22bc40b47bb2c5 not found: ID does not exist" Nov 23 16:37:15 crc kubenswrapper[5050]: I1123 16:37:15.874809 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="066c8d57-d25c-453e-ad80-df85fe064b5f" path="/var/lib/kubelet/pods/066c8d57-d25c-453e-ad80-df85fe064b5f/volumes" Nov 23 16:37:21 crc kubenswrapper[5050]: I1123 16:37:21.420974 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-h9ttt" Nov 23 16:37:21 crc kubenswrapper[5050]: I1123 16:37:21.483190 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-h9ttt" Nov 23 16:37:21 crc kubenswrapper[5050]: I1123 16:37:21.679139 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-h9ttt"] Nov 23 16:37:23 crc kubenswrapper[5050]: I1123 16:37:23.364435 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-h9ttt" podUID="ffa465e1-5174-449f-94d8-e7a15bca8eae" containerName="registry-server" containerID="cri-o://ce98f00af1561ebd12599646c20773c9cebc5d25c6f7fe7897121ee1c82fc2a4" gracePeriod=2 Nov 23 16:37:24 crc kubenswrapper[5050]: I1123 16:37:24.024691 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h9ttt" Nov 23 16:37:24 crc kubenswrapper[5050]: I1123 16:37:24.144021 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ffa465e1-5174-449f-94d8-e7a15bca8eae-catalog-content\") pod \"ffa465e1-5174-449f-94d8-e7a15bca8eae\" (UID: \"ffa465e1-5174-449f-94d8-e7a15bca8eae\") " Nov 23 16:37:24 crc kubenswrapper[5050]: I1123 16:37:24.144125 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r4rg4\" (UniqueName: \"kubernetes.io/projected/ffa465e1-5174-449f-94d8-e7a15bca8eae-kube-api-access-r4rg4\") pod \"ffa465e1-5174-449f-94d8-e7a15bca8eae\" (UID: \"ffa465e1-5174-449f-94d8-e7a15bca8eae\") " Nov 23 16:37:24 crc kubenswrapper[5050]: I1123 16:37:24.144175 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ffa465e1-5174-449f-94d8-e7a15bca8eae-utilities\") pod \"ffa465e1-5174-449f-94d8-e7a15bca8eae\" (UID: \"ffa465e1-5174-449f-94d8-e7a15bca8eae\") " Nov 23 16:37:24 crc kubenswrapper[5050]: I1123 16:37:24.145511 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ffa465e1-5174-449f-94d8-e7a15bca8eae-utilities" (OuterVolumeSpecName: "utilities") pod "ffa465e1-5174-449f-94d8-e7a15bca8eae" (UID: "ffa465e1-5174-449f-94d8-e7a15bca8eae"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:37:24 crc kubenswrapper[5050]: I1123 16:37:24.151795 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffa465e1-5174-449f-94d8-e7a15bca8eae-kube-api-access-r4rg4" (OuterVolumeSpecName: "kube-api-access-r4rg4") pod "ffa465e1-5174-449f-94d8-e7a15bca8eae" (UID: "ffa465e1-5174-449f-94d8-e7a15bca8eae"). InnerVolumeSpecName "kube-api-access-r4rg4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:37:24 crc kubenswrapper[5050]: I1123 16:37:24.246714 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r4rg4\" (UniqueName: \"kubernetes.io/projected/ffa465e1-5174-449f-94d8-e7a15bca8eae-kube-api-access-r4rg4\") on node \"crc\" DevicePath \"\"" Nov 23 16:37:24 crc kubenswrapper[5050]: I1123 16:37:24.247035 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ffa465e1-5174-449f-94d8-e7a15bca8eae-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 16:37:24 crc kubenswrapper[5050]: I1123 16:37:24.249792 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ffa465e1-5174-449f-94d8-e7a15bca8eae-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ffa465e1-5174-449f-94d8-e7a15bca8eae" (UID: "ffa465e1-5174-449f-94d8-e7a15bca8eae"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:37:24 crc kubenswrapper[5050]: I1123 16:37:24.349882 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ffa465e1-5174-449f-94d8-e7a15bca8eae-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 16:37:24 crc kubenswrapper[5050]: I1123 16:37:24.377878 5050 generic.go:334] "Generic (PLEG): container finished" podID="ffa465e1-5174-449f-94d8-e7a15bca8eae" containerID="ce98f00af1561ebd12599646c20773c9cebc5d25c6f7fe7897121ee1c82fc2a4" exitCode=0 Nov 23 16:37:24 crc kubenswrapper[5050]: I1123 16:37:24.377938 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h9ttt" event={"ID":"ffa465e1-5174-449f-94d8-e7a15bca8eae","Type":"ContainerDied","Data":"ce98f00af1561ebd12599646c20773c9cebc5d25c6f7fe7897121ee1c82fc2a4"} Nov 23 16:37:24 crc kubenswrapper[5050]: I1123 16:37:24.377974 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h9ttt" event={"ID":"ffa465e1-5174-449f-94d8-e7a15bca8eae","Type":"ContainerDied","Data":"79e87371fa799830ade760910fbbe12a2a6a27d72f1221f599f5258e82c3cce6"} Nov 23 16:37:24 crc kubenswrapper[5050]: I1123 16:37:24.377997 5050 scope.go:117] "RemoveContainer" containerID="ce98f00af1561ebd12599646c20773c9cebc5d25c6f7fe7897121ee1c82fc2a4" Nov 23 16:37:24 crc kubenswrapper[5050]: I1123 16:37:24.378488 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h9ttt" Nov 23 16:37:24 crc kubenswrapper[5050]: I1123 16:37:24.412609 5050 scope.go:117] "RemoveContainer" containerID="606e1cdd29f7084cd4a0ba0a408bf342ccb131cf215a9011702b3cb5ea37ace8" Nov 23 16:37:24 crc kubenswrapper[5050]: I1123 16:37:24.434529 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-h9ttt"] Nov 23 16:37:24 crc kubenswrapper[5050]: I1123 16:37:24.450749 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-h9ttt"] Nov 23 16:37:24 crc kubenswrapper[5050]: I1123 16:37:24.459079 5050 scope.go:117] "RemoveContainer" containerID="a19cbb4e900a710d8e1bc5b0a717cb8e385c0453c50a0b7e23da4964e121f3fb" Nov 23 16:37:24 crc kubenswrapper[5050]: I1123 16:37:24.532102 5050 scope.go:117] "RemoveContainer" containerID="ce98f00af1561ebd12599646c20773c9cebc5d25c6f7fe7897121ee1c82fc2a4" Nov 23 16:37:24 crc kubenswrapper[5050]: E1123 16:37:24.539619 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce98f00af1561ebd12599646c20773c9cebc5d25c6f7fe7897121ee1c82fc2a4\": container with ID starting with ce98f00af1561ebd12599646c20773c9cebc5d25c6f7fe7897121ee1c82fc2a4 not found: ID does not exist" containerID="ce98f00af1561ebd12599646c20773c9cebc5d25c6f7fe7897121ee1c82fc2a4" Nov 23 16:37:24 crc kubenswrapper[5050]: I1123 16:37:24.539672 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce98f00af1561ebd12599646c20773c9cebc5d25c6f7fe7897121ee1c82fc2a4"} err="failed to get container status \"ce98f00af1561ebd12599646c20773c9cebc5d25c6f7fe7897121ee1c82fc2a4\": rpc error: code = NotFound desc = could not find container \"ce98f00af1561ebd12599646c20773c9cebc5d25c6f7fe7897121ee1c82fc2a4\": container with ID starting with ce98f00af1561ebd12599646c20773c9cebc5d25c6f7fe7897121ee1c82fc2a4 not found: ID does not exist" Nov 23 16:37:24 crc kubenswrapper[5050]: I1123 16:37:24.539702 5050 scope.go:117] "RemoveContainer" containerID="606e1cdd29f7084cd4a0ba0a408bf342ccb131cf215a9011702b3cb5ea37ace8" Nov 23 16:37:24 crc kubenswrapper[5050]: E1123 16:37:24.541034 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"606e1cdd29f7084cd4a0ba0a408bf342ccb131cf215a9011702b3cb5ea37ace8\": container with ID starting with 606e1cdd29f7084cd4a0ba0a408bf342ccb131cf215a9011702b3cb5ea37ace8 not found: ID does not exist" containerID="606e1cdd29f7084cd4a0ba0a408bf342ccb131cf215a9011702b3cb5ea37ace8" Nov 23 16:37:24 crc kubenswrapper[5050]: I1123 16:37:24.541064 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"606e1cdd29f7084cd4a0ba0a408bf342ccb131cf215a9011702b3cb5ea37ace8"} err="failed to get container status \"606e1cdd29f7084cd4a0ba0a408bf342ccb131cf215a9011702b3cb5ea37ace8\": rpc error: code = NotFound desc = could not find container \"606e1cdd29f7084cd4a0ba0a408bf342ccb131cf215a9011702b3cb5ea37ace8\": container with ID starting with 606e1cdd29f7084cd4a0ba0a408bf342ccb131cf215a9011702b3cb5ea37ace8 not found: ID does not exist" Nov 23 16:37:24 crc kubenswrapper[5050]: I1123 16:37:24.541081 5050 scope.go:117] "RemoveContainer" containerID="a19cbb4e900a710d8e1bc5b0a717cb8e385c0453c50a0b7e23da4964e121f3fb" Nov 23 16:37:24 crc kubenswrapper[5050]: E1123 16:37:24.541542 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a19cbb4e900a710d8e1bc5b0a717cb8e385c0453c50a0b7e23da4964e121f3fb\": container with ID starting with a19cbb4e900a710d8e1bc5b0a717cb8e385c0453c50a0b7e23da4964e121f3fb not found: ID does not exist" containerID="a19cbb4e900a710d8e1bc5b0a717cb8e385c0453c50a0b7e23da4964e121f3fb" Nov 23 16:37:24 crc kubenswrapper[5050]: I1123 16:37:24.541567 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a19cbb4e900a710d8e1bc5b0a717cb8e385c0453c50a0b7e23da4964e121f3fb"} err="failed to get container status \"a19cbb4e900a710d8e1bc5b0a717cb8e385c0453c50a0b7e23da4964e121f3fb\": rpc error: code = NotFound desc = could not find container \"a19cbb4e900a710d8e1bc5b0a717cb8e385c0453c50a0b7e23da4964e121f3fb\": container with ID starting with a19cbb4e900a710d8e1bc5b0a717cb8e385c0453c50a0b7e23da4964e121f3fb not found: ID does not exist" Nov 23 16:37:25 crc kubenswrapper[5050]: I1123 16:37:25.563813 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ffa465e1-5174-449f-94d8-e7a15bca8eae" path="/var/lib/kubelet/pods/ffa465e1-5174-449f-94d8-e7a15bca8eae/volumes" Nov 23 16:37:29 crc kubenswrapper[5050]: I1123 16:37:29.224534 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:37:29 crc kubenswrapper[5050]: I1123 16:37:29.225211 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:37:29 crc kubenswrapper[5050]: I1123 16:37:29.225274 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 16:37:29 crc kubenswrapper[5050]: I1123 16:37:29.226616 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 16:37:29 crc kubenswrapper[5050]: I1123 16:37:29.226707 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f" gracePeriod=600 Nov 23 16:37:29 crc kubenswrapper[5050]: E1123 16:37:29.376638 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:37:29 crc kubenswrapper[5050]: I1123 16:37:29.450759 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f" exitCode=0 Nov 23 16:37:29 crc kubenswrapper[5050]: I1123 16:37:29.450830 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f"} Nov 23 16:37:29 crc kubenswrapper[5050]: I1123 16:37:29.450885 5050 scope.go:117] "RemoveContainer" containerID="9b4e71185917f1b2a88497f2d03a44bf9689c07a3b5f51a006c85b796109d94d" Nov 23 16:37:29 crc kubenswrapper[5050]: I1123 16:37:29.452003 5050 scope.go:117] "RemoveContainer" containerID="88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f" Nov 23 16:37:29 crc kubenswrapper[5050]: E1123 16:37:29.452378 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:37:44 crc kubenswrapper[5050]: I1123 16:37:44.549838 5050 scope.go:117] "RemoveContainer" containerID="88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f" Nov 23 16:37:44 crc kubenswrapper[5050]: E1123 16:37:44.551297 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:37:56 crc kubenswrapper[5050]: I1123 16:37:56.550772 5050 scope.go:117] "RemoveContainer" containerID="88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f" Nov 23 16:37:56 crc kubenswrapper[5050]: E1123 16:37:56.551834 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:38:11 crc kubenswrapper[5050]: I1123 16:38:11.548876 5050 scope.go:117] "RemoveContainer" containerID="88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f" Nov 23 16:38:11 crc kubenswrapper[5050]: E1123 16:38:11.550039 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:38:22 crc kubenswrapper[5050]: I1123 16:38:22.549823 5050 scope.go:117] "RemoveContainer" containerID="88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f" Nov 23 16:38:22 crc kubenswrapper[5050]: E1123 16:38:22.551678 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:38:33 crc kubenswrapper[5050]: I1123 16:38:33.550181 5050 scope.go:117] "RemoveContainer" containerID="88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f" Nov 23 16:38:33 crc kubenswrapper[5050]: E1123 16:38:33.551509 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:38:45 crc kubenswrapper[5050]: I1123 16:38:45.558015 5050 scope.go:117] "RemoveContainer" containerID="88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f" Nov 23 16:38:45 crc kubenswrapper[5050]: E1123 16:38:45.559326 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:39:00 crc kubenswrapper[5050]: I1123 16:39:00.563226 5050 scope.go:117] "RemoveContainer" containerID="88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f" Nov 23 16:39:00 crc kubenswrapper[5050]: E1123 16:39:00.565290 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:39:15 crc kubenswrapper[5050]: I1123 16:39:15.556866 5050 scope.go:117] "RemoveContainer" containerID="88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f" Nov 23 16:39:15 crc kubenswrapper[5050]: E1123 16:39:15.558204 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:39:29 crc kubenswrapper[5050]: I1123 16:39:29.550523 5050 scope.go:117] "RemoveContainer" containerID="88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f" Nov 23 16:39:29 crc kubenswrapper[5050]: E1123 16:39:29.552003 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:39:41 crc kubenswrapper[5050]: I1123 16:39:41.568123 5050 scope.go:117] "RemoveContainer" containerID="88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f" Nov 23 16:39:41 crc kubenswrapper[5050]: E1123 16:39:41.569876 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:39:48 crc kubenswrapper[5050]: I1123 16:39:48.323066 5050 generic.go:334] "Generic (PLEG): container finished" podID="2c88cdbf-1f63-4305-a672-8cc67f06fa66" containerID="4c23b504aba0f854396e4676468ef586022b84506f66fd7461dab41a30b83190" exitCode=0 Nov 23 16:39:48 crc kubenswrapper[5050]: I1123 16:39:48.323336 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7" event={"ID":"2c88cdbf-1f63-4305-a672-8cc67f06fa66","Type":"ContainerDied","Data":"4c23b504aba0f854396e4676468ef586022b84506f66fd7461dab41a30b83190"} Nov 23 16:39:49 crc kubenswrapper[5050]: I1123 16:39:49.974057 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7" Nov 23 16:39:50 crc kubenswrapper[5050]: I1123 16:39:50.160743 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2c88cdbf-1f63-4305-a672-8cc67f06fa66-ceph\") pod \"2c88cdbf-1f63-4305-a672-8cc67f06fa66\" (UID: \"2c88cdbf-1f63-4305-a672-8cc67f06fa66\") " Nov 23 16:39:50 crc kubenswrapper[5050]: I1123 16:39:50.160931 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2c88cdbf-1f63-4305-a672-8cc67f06fa66-ssh-key\") pod \"2c88cdbf-1f63-4305-a672-8cc67f06fa66\" (UID: \"2c88cdbf-1f63-4305-a672-8cc67f06fa66\") " Nov 23 16:39:50 crc kubenswrapper[5050]: I1123 16:39:50.160960 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2c88cdbf-1f63-4305-a672-8cc67f06fa66-inventory\") pod \"2c88cdbf-1f63-4305-a672-8cc67f06fa66\" (UID: \"2c88cdbf-1f63-4305-a672-8cc67f06fa66\") " Nov 23 16:39:50 crc kubenswrapper[5050]: I1123 16:39:50.161056 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jz4k2\" (UniqueName: \"kubernetes.io/projected/2c88cdbf-1f63-4305-a672-8cc67f06fa66-kube-api-access-jz4k2\") pod \"2c88cdbf-1f63-4305-a672-8cc67f06fa66\" (UID: \"2c88cdbf-1f63-4305-a672-8cc67f06fa66\") " Nov 23 16:39:50 crc kubenswrapper[5050]: I1123 16:39:50.161302 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c88cdbf-1f63-4305-a672-8cc67f06fa66-tripleo-cleanup-combined-ca-bundle\") pod \"2c88cdbf-1f63-4305-a672-8cc67f06fa66\" (UID: \"2c88cdbf-1f63-4305-a672-8cc67f06fa66\") " Nov 23 16:39:50 crc kubenswrapper[5050]: I1123 16:39:50.188996 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c88cdbf-1f63-4305-a672-8cc67f06fa66-kube-api-access-jz4k2" (OuterVolumeSpecName: "kube-api-access-jz4k2") pod "2c88cdbf-1f63-4305-a672-8cc67f06fa66" (UID: "2c88cdbf-1f63-4305-a672-8cc67f06fa66"). InnerVolumeSpecName "kube-api-access-jz4k2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:39:50 crc kubenswrapper[5050]: I1123 16:39:50.190653 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c88cdbf-1f63-4305-a672-8cc67f06fa66-tripleo-cleanup-combined-ca-bundle" (OuterVolumeSpecName: "tripleo-cleanup-combined-ca-bundle") pod "2c88cdbf-1f63-4305-a672-8cc67f06fa66" (UID: "2c88cdbf-1f63-4305-a672-8cc67f06fa66"). InnerVolumeSpecName "tripleo-cleanup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:39:50 crc kubenswrapper[5050]: I1123 16:39:50.211688 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c88cdbf-1f63-4305-a672-8cc67f06fa66-ceph" (OuterVolumeSpecName: "ceph") pod "2c88cdbf-1f63-4305-a672-8cc67f06fa66" (UID: "2c88cdbf-1f63-4305-a672-8cc67f06fa66"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:39:50 crc kubenswrapper[5050]: I1123 16:39:50.244888 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c88cdbf-1f63-4305-a672-8cc67f06fa66-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "2c88cdbf-1f63-4305-a672-8cc67f06fa66" (UID: "2c88cdbf-1f63-4305-a672-8cc67f06fa66"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:39:50 crc kubenswrapper[5050]: I1123 16:39:50.264930 5050 reconciler_common.go:293] "Volume detached for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c88cdbf-1f63-4305-a672-8cc67f06fa66-tripleo-cleanup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:39:50 crc kubenswrapper[5050]: I1123 16:39:50.266651 5050 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2c88cdbf-1f63-4305-a672-8cc67f06fa66-ceph\") on node \"crc\" DevicePath \"\"" Nov 23 16:39:50 crc kubenswrapper[5050]: I1123 16:39:50.266793 5050 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2c88cdbf-1f63-4305-a672-8cc67f06fa66-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 23 16:39:50 crc kubenswrapper[5050]: I1123 16:39:50.266903 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jz4k2\" (UniqueName: \"kubernetes.io/projected/2c88cdbf-1f63-4305-a672-8cc67f06fa66-kube-api-access-jz4k2\") on node \"crc\" DevicePath \"\"" Nov 23 16:39:50 crc kubenswrapper[5050]: I1123 16:39:50.271677 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c88cdbf-1f63-4305-a672-8cc67f06fa66-inventory" (OuterVolumeSpecName: "inventory") pod "2c88cdbf-1f63-4305-a672-8cc67f06fa66" (UID: "2c88cdbf-1f63-4305-a672-8cc67f06fa66"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:39:50 crc kubenswrapper[5050]: I1123 16:39:50.355018 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7" event={"ID":"2c88cdbf-1f63-4305-a672-8cc67f06fa66","Type":"ContainerDied","Data":"fad9e9c0b3f7e129210225ef36e51dc007b3e8f79c201cf0c37ac6276b8d1e2f"} Nov 23 16:39:50 crc kubenswrapper[5050]: I1123 16:39:50.355061 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fad9e9c0b3f7e129210225ef36e51dc007b3e8f79c201cf0c37ac6276b8d1e2f" Nov 23 16:39:50 crc kubenswrapper[5050]: I1123 16:39:50.355374 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7" Nov 23 16:39:50 crc kubenswrapper[5050]: I1123 16:39:50.375271 5050 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2c88cdbf-1f63-4305-a672-8cc67f06fa66-inventory\") on node \"crc\" DevicePath \"\"" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.699911 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-pr9ll"] Nov 23 16:39:51 crc kubenswrapper[5050]: E1123 16:39:51.701667 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="066c8d57-d25c-453e-ad80-df85fe064b5f" containerName="registry-server" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.701779 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="066c8d57-d25c-453e-ad80-df85fe064b5f" containerName="registry-server" Nov 23 16:39:51 crc kubenswrapper[5050]: E1123 16:39:51.701877 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffa465e1-5174-449f-94d8-e7a15bca8eae" containerName="registry-server" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.701956 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffa465e1-5174-449f-94d8-e7a15bca8eae" containerName="registry-server" Nov 23 16:39:51 crc kubenswrapper[5050]: E1123 16:39:51.702047 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="066c8d57-d25c-453e-ad80-df85fe064b5f" containerName="extract-utilities" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.702111 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="066c8d57-d25c-453e-ad80-df85fe064b5f" containerName="extract-utilities" Nov 23 16:39:51 crc kubenswrapper[5050]: E1123 16:39:51.702182 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c88cdbf-1f63-4305-a672-8cc67f06fa66" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.702236 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c88cdbf-1f63-4305-a672-8cc67f06fa66" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Nov 23 16:39:51 crc kubenswrapper[5050]: E1123 16:39:51.702309 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffa465e1-5174-449f-94d8-e7a15bca8eae" containerName="extract-content" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.702370 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffa465e1-5174-449f-94d8-e7a15bca8eae" containerName="extract-content" Nov 23 16:39:51 crc kubenswrapper[5050]: E1123 16:39:51.702463 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffa465e1-5174-449f-94d8-e7a15bca8eae" containerName="extract-utilities" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.702538 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffa465e1-5174-449f-94d8-e7a15bca8eae" containerName="extract-utilities" Nov 23 16:39:51 crc kubenswrapper[5050]: E1123 16:39:51.702615 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="066c8d57-d25c-453e-ad80-df85fe064b5f" containerName="extract-content" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.702682 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="066c8d57-d25c-453e-ad80-df85fe064b5f" containerName="extract-content" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.708664 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="066c8d57-d25c-453e-ad80-df85fe064b5f" containerName="registry-server" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.708986 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c88cdbf-1f63-4305-a672-8cc67f06fa66" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.709138 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffa465e1-5174-449f-94d8-e7a15bca8eae" containerName="registry-server" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.714485 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-pr9ll" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.719617 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-ckrpf" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.719682 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.719871 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.720839 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.726614 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-pr9ll"] Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.818298 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-pr9ll\" (UID: \"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2\") " pod="openstack/bootstrap-openstack-openstack-cell1-pr9ll" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.818378 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-ceph\") pod \"bootstrap-openstack-openstack-cell1-pr9ll\" (UID: \"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2\") " pod="openstack/bootstrap-openstack-openstack-cell1-pr9ll" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.818413 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bdcll\" (UniqueName: \"kubernetes.io/projected/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-kube-api-access-bdcll\") pod \"bootstrap-openstack-openstack-cell1-pr9ll\" (UID: \"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2\") " pod="openstack/bootstrap-openstack-openstack-cell1-pr9ll" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.818587 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-ssh-key\") pod \"bootstrap-openstack-openstack-cell1-pr9ll\" (UID: \"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2\") " pod="openstack/bootstrap-openstack-openstack-cell1-pr9ll" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.818776 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-inventory\") pod \"bootstrap-openstack-openstack-cell1-pr9ll\" (UID: \"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2\") " pod="openstack/bootstrap-openstack-openstack-cell1-pr9ll" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.922028 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bdcll\" (UniqueName: \"kubernetes.io/projected/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-kube-api-access-bdcll\") pod \"bootstrap-openstack-openstack-cell1-pr9ll\" (UID: \"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2\") " pod="openstack/bootstrap-openstack-openstack-cell1-pr9ll" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.922172 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-ssh-key\") pod \"bootstrap-openstack-openstack-cell1-pr9ll\" (UID: \"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2\") " pod="openstack/bootstrap-openstack-openstack-cell1-pr9ll" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.922277 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-inventory\") pod \"bootstrap-openstack-openstack-cell1-pr9ll\" (UID: \"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2\") " pod="openstack/bootstrap-openstack-openstack-cell1-pr9ll" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.922859 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-pr9ll\" (UID: \"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2\") " pod="openstack/bootstrap-openstack-openstack-cell1-pr9ll" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.922922 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-ceph\") pod \"bootstrap-openstack-openstack-cell1-pr9ll\" (UID: \"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2\") " pod="openstack/bootstrap-openstack-openstack-cell1-pr9ll" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.931150 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-ssh-key\") pod \"bootstrap-openstack-openstack-cell1-pr9ll\" (UID: \"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2\") " pod="openstack/bootstrap-openstack-openstack-cell1-pr9ll" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.933079 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-inventory\") pod \"bootstrap-openstack-openstack-cell1-pr9ll\" (UID: \"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2\") " pod="openstack/bootstrap-openstack-openstack-cell1-pr9ll" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.933238 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-ceph\") pod \"bootstrap-openstack-openstack-cell1-pr9ll\" (UID: \"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2\") " pod="openstack/bootstrap-openstack-openstack-cell1-pr9ll" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.935690 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-pr9ll\" (UID: \"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2\") " pod="openstack/bootstrap-openstack-openstack-cell1-pr9ll" Nov 23 16:39:51 crc kubenswrapper[5050]: I1123 16:39:51.959423 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bdcll\" (UniqueName: \"kubernetes.io/projected/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-kube-api-access-bdcll\") pod \"bootstrap-openstack-openstack-cell1-pr9ll\" (UID: \"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2\") " pod="openstack/bootstrap-openstack-openstack-cell1-pr9ll" Nov 23 16:39:52 crc kubenswrapper[5050]: I1123 16:39:52.049330 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-pr9ll" Nov 23 16:39:52 crc kubenswrapper[5050]: I1123 16:39:52.741797 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-pr9ll"] Nov 23 16:39:53 crc kubenswrapper[5050]: I1123 16:39:53.398024 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-pr9ll" event={"ID":"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2","Type":"ContainerStarted","Data":"a7bd2ff9a463140e40a06325937a3617a27a4d3d801b474cb8126b90ba32bf2d"} Nov 23 16:39:54 crc kubenswrapper[5050]: I1123 16:39:54.416157 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-pr9ll" event={"ID":"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2","Type":"ContainerStarted","Data":"f17c6546bd72a9f5a5a426e173675b280ce23846f288798e64c9eb5c5a7b19a0"} Nov 23 16:39:54 crc kubenswrapper[5050]: I1123 16:39:54.468405 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-openstack-openstack-cell1-pr9ll" podStartSLOduration=2.933481723 podStartE2EDuration="3.468372603s" podCreationTimestamp="2025-11-23 16:39:51 +0000 UTC" firstStartedPulling="2025-11-23 16:39:52.753341961 +0000 UTC m=+7087.920338476" lastFinishedPulling="2025-11-23 16:39:53.288232861 +0000 UTC m=+7088.455229356" observedRunningTime="2025-11-23 16:39:54.453352278 +0000 UTC m=+7089.620348793" watchObservedRunningTime="2025-11-23 16:39:54.468372603 +0000 UTC m=+7089.635369118" Nov 23 16:39:55 crc kubenswrapper[5050]: I1123 16:39:55.548857 5050 scope.go:117] "RemoveContainer" containerID="88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f" Nov 23 16:39:55 crc kubenswrapper[5050]: E1123 16:39:55.549212 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:40:07 crc kubenswrapper[5050]: I1123 16:40:07.548950 5050 scope.go:117] "RemoveContainer" containerID="88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f" Nov 23 16:40:07 crc kubenswrapper[5050]: E1123 16:40:07.550184 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:40:21 crc kubenswrapper[5050]: I1123 16:40:21.549520 5050 scope.go:117] "RemoveContainer" containerID="88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f" Nov 23 16:40:21 crc kubenswrapper[5050]: E1123 16:40:21.550834 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:40:24 crc kubenswrapper[5050]: I1123 16:40:24.037171 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-dwn7f"] Nov 23 16:40:24 crc kubenswrapper[5050]: I1123 16:40:24.041949 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dwn7f" Nov 23 16:40:24 crc kubenswrapper[5050]: I1123 16:40:24.049282 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dwn7f"] Nov 23 16:40:24 crc kubenswrapper[5050]: I1123 16:40:24.162314 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6-utilities\") pod \"community-operators-dwn7f\" (UID: \"d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6\") " pod="openshift-marketplace/community-operators-dwn7f" Nov 23 16:40:24 crc kubenswrapper[5050]: I1123 16:40:24.162870 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6-catalog-content\") pod \"community-operators-dwn7f\" (UID: \"d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6\") " pod="openshift-marketplace/community-operators-dwn7f" Nov 23 16:40:24 crc kubenswrapper[5050]: I1123 16:40:24.162970 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vxlbp\" (UniqueName: \"kubernetes.io/projected/d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6-kube-api-access-vxlbp\") pod \"community-operators-dwn7f\" (UID: \"d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6\") " pod="openshift-marketplace/community-operators-dwn7f" Nov 23 16:40:24 crc kubenswrapper[5050]: I1123 16:40:24.265339 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6-catalog-content\") pod \"community-operators-dwn7f\" (UID: \"d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6\") " pod="openshift-marketplace/community-operators-dwn7f" Nov 23 16:40:24 crc kubenswrapper[5050]: I1123 16:40:24.265933 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6-catalog-content\") pod \"community-operators-dwn7f\" (UID: \"d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6\") " pod="openshift-marketplace/community-operators-dwn7f" Nov 23 16:40:24 crc kubenswrapper[5050]: I1123 16:40:24.266153 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vxlbp\" (UniqueName: \"kubernetes.io/projected/d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6-kube-api-access-vxlbp\") pod \"community-operators-dwn7f\" (UID: \"d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6\") " pod="openshift-marketplace/community-operators-dwn7f" Nov 23 16:40:24 crc kubenswrapper[5050]: I1123 16:40:24.266210 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6-utilities\") pod \"community-operators-dwn7f\" (UID: \"d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6\") " pod="openshift-marketplace/community-operators-dwn7f" Nov 23 16:40:24 crc kubenswrapper[5050]: I1123 16:40:24.266621 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6-utilities\") pod \"community-operators-dwn7f\" (UID: \"d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6\") " pod="openshift-marketplace/community-operators-dwn7f" Nov 23 16:40:24 crc kubenswrapper[5050]: I1123 16:40:24.302884 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vxlbp\" (UniqueName: \"kubernetes.io/projected/d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6-kube-api-access-vxlbp\") pod \"community-operators-dwn7f\" (UID: \"d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6\") " pod="openshift-marketplace/community-operators-dwn7f" Nov 23 16:40:24 crc kubenswrapper[5050]: I1123 16:40:24.404547 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dwn7f" Nov 23 16:40:24 crc kubenswrapper[5050]: I1123 16:40:24.967749 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dwn7f"] Nov 23 16:40:25 crc kubenswrapper[5050]: I1123 16:40:25.884359 5050 generic.go:334] "Generic (PLEG): container finished" podID="d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6" containerID="4c01dc7e573938fd9541b290122bbf4f062b369cedf13ab20a856950b85dd087" exitCode=0 Nov 23 16:40:25 crc kubenswrapper[5050]: I1123 16:40:25.884432 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dwn7f" event={"ID":"d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6","Type":"ContainerDied","Data":"4c01dc7e573938fd9541b290122bbf4f062b369cedf13ab20a856950b85dd087"} Nov 23 16:40:25 crc kubenswrapper[5050]: I1123 16:40:25.884952 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dwn7f" event={"ID":"d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6","Type":"ContainerStarted","Data":"593e5f6dd43397dbd68128cc9837f23c49f8814020f6211b58655609d415efc3"} Nov 23 16:40:27 crc kubenswrapper[5050]: I1123 16:40:27.912134 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dwn7f" event={"ID":"d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6","Type":"ContainerStarted","Data":"140249597acdb4cfad437438d846ac44a4b8c82eb96c0cd96507f160f1523670"} Nov 23 16:40:28 crc kubenswrapper[5050]: I1123 16:40:28.932215 5050 generic.go:334] "Generic (PLEG): container finished" podID="d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6" containerID="140249597acdb4cfad437438d846ac44a4b8c82eb96c0cd96507f160f1523670" exitCode=0 Nov 23 16:40:28 crc kubenswrapper[5050]: I1123 16:40:28.932655 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dwn7f" event={"ID":"d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6","Type":"ContainerDied","Data":"140249597acdb4cfad437438d846ac44a4b8c82eb96c0cd96507f160f1523670"} Nov 23 16:40:29 crc kubenswrapper[5050]: I1123 16:40:29.955057 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dwn7f" event={"ID":"d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6","Type":"ContainerStarted","Data":"26b31d8244785b2932d8a415ab166d03850915d560830ca8cbc92fe4c76589ca"} Nov 23 16:40:32 crc kubenswrapper[5050]: I1123 16:40:32.551381 5050 scope.go:117] "RemoveContainer" containerID="88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f" Nov 23 16:40:32 crc kubenswrapper[5050]: E1123 16:40:32.552711 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:40:34 crc kubenswrapper[5050]: I1123 16:40:34.405709 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-dwn7f" Nov 23 16:40:34 crc kubenswrapper[5050]: I1123 16:40:34.406675 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-dwn7f" Nov 23 16:40:34 crc kubenswrapper[5050]: I1123 16:40:34.502002 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-dwn7f" Nov 23 16:40:34 crc kubenswrapper[5050]: I1123 16:40:34.534751 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-dwn7f" podStartSLOduration=8.016269275 podStartE2EDuration="11.534726231s" podCreationTimestamp="2025-11-23 16:40:23 +0000 UTC" firstStartedPulling="2025-11-23 16:40:25.888317551 +0000 UTC m=+7121.055314076" lastFinishedPulling="2025-11-23 16:40:29.406774547 +0000 UTC m=+7124.573771032" observedRunningTime="2025-11-23 16:40:29.988120562 +0000 UTC m=+7125.155117057" watchObservedRunningTime="2025-11-23 16:40:34.534726231 +0000 UTC m=+7129.701722726" Nov 23 16:40:35 crc kubenswrapper[5050]: I1123 16:40:35.093888 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-dwn7f" Nov 23 16:40:35 crc kubenswrapper[5050]: I1123 16:40:35.182240 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dwn7f"] Nov 23 16:40:37 crc kubenswrapper[5050]: I1123 16:40:37.043911 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-dwn7f" podUID="d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6" containerName="registry-server" containerID="cri-o://26b31d8244785b2932d8a415ab166d03850915d560830ca8cbc92fe4c76589ca" gracePeriod=2 Nov 23 16:40:37 crc kubenswrapper[5050]: I1123 16:40:37.622631 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dwn7f" Nov 23 16:40:37 crc kubenswrapper[5050]: I1123 16:40:37.783472 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6-utilities\") pod \"d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6\" (UID: \"d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6\") " Nov 23 16:40:37 crc kubenswrapper[5050]: I1123 16:40:37.783586 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vxlbp\" (UniqueName: \"kubernetes.io/projected/d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6-kube-api-access-vxlbp\") pod \"d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6\" (UID: \"d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6\") " Nov 23 16:40:37 crc kubenswrapper[5050]: I1123 16:40:37.783895 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6-catalog-content\") pod \"d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6\" (UID: \"d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6\") " Nov 23 16:40:37 crc kubenswrapper[5050]: I1123 16:40:37.784940 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6-utilities" (OuterVolumeSpecName: "utilities") pod "d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6" (UID: "d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:40:37 crc kubenswrapper[5050]: I1123 16:40:37.794988 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6-kube-api-access-vxlbp" (OuterVolumeSpecName: "kube-api-access-vxlbp") pod "d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6" (UID: "d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6"). InnerVolumeSpecName "kube-api-access-vxlbp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:40:37 crc kubenswrapper[5050]: I1123 16:40:37.842171 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6" (UID: "d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:40:37 crc kubenswrapper[5050]: I1123 16:40:37.888108 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 16:40:37 crc kubenswrapper[5050]: I1123 16:40:37.888717 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 16:40:37 crc kubenswrapper[5050]: I1123 16:40:37.888753 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vxlbp\" (UniqueName: \"kubernetes.io/projected/d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6-kube-api-access-vxlbp\") on node \"crc\" DevicePath \"\"" Nov 23 16:40:38 crc kubenswrapper[5050]: I1123 16:40:38.058193 5050 generic.go:334] "Generic (PLEG): container finished" podID="d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6" containerID="26b31d8244785b2932d8a415ab166d03850915d560830ca8cbc92fe4c76589ca" exitCode=0 Nov 23 16:40:38 crc kubenswrapper[5050]: I1123 16:40:38.058275 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dwn7f" Nov 23 16:40:38 crc kubenswrapper[5050]: I1123 16:40:38.058278 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dwn7f" event={"ID":"d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6","Type":"ContainerDied","Data":"26b31d8244785b2932d8a415ab166d03850915d560830ca8cbc92fe4c76589ca"} Nov 23 16:40:38 crc kubenswrapper[5050]: I1123 16:40:38.058346 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dwn7f" event={"ID":"d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6","Type":"ContainerDied","Data":"593e5f6dd43397dbd68128cc9837f23c49f8814020f6211b58655609d415efc3"} Nov 23 16:40:38 crc kubenswrapper[5050]: I1123 16:40:38.058400 5050 scope.go:117] "RemoveContainer" containerID="26b31d8244785b2932d8a415ab166d03850915d560830ca8cbc92fe4c76589ca" Nov 23 16:40:38 crc kubenswrapper[5050]: I1123 16:40:38.086746 5050 scope.go:117] "RemoveContainer" containerID="140249597acdb4cfad437438d846ac44a4b8c82eb96c0cd96507f160f1523670" Nov 23 16:40:38 crc kubenswrapper[5050]: I1123 16:40:38.117937 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dwn7f"] Nov 23 16:40:38 crc kubenswrapper[5050]: I1123 16:40:38.122775 5050 scope.go:117] "RemoveContainer" containerID="4c01dc7e573938fd9541b290122bbf4f062b369cedf13ab20a856950b85dd087" Nov 23 16:40:38 crc kubenswrapper[5050]: I1123 16:40:38.134454 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-dwn7f"] Nov 23 16:40:38 crc kubenswrapper[5050]: I1123 16:40:38.182599 5050 scope.go:117] "RemoveContainer" containerID="26b31d8244785b2932d8a415ab166d03850915d560830ca8cbc92fe4c76589ca" Nov 23 16:40:38 crc kubenswrapper[5050]: E1123 16:40:38.183507 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"26b31d8244785b2932d8a415ab166d03850915d560830ca8cbc92fe4c76589ca\": container with ID starting with 26b31d8244785b2932d8a415ab166d03850915d560830ca8cbc92fe4c76589ca not found: ID does not exist" containerID="26b31d8244785b2932d8a415ab166d03850915d560830ca8cbc92fe4c76589ca" Nov 23 16:40:38 crc kubenswrapper[5050]: I1123 16:40:38.183562 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"26b31d8244785b2932d8a415ab166d03850915d560830ca8cbc92fe4c76589ca"} err="failed to get container status \"26b31d8244785b2932d8a415ab166d03850915d560830ca8cbc92fe4c76589ca\": rpc error: code = NotFound desc = could not find container \"26b31d8244785b2932d8a415ab166d03850915d560830ca8cbc92fe4c76589ca\": container with ID starting with 26b31d8244785b2932d8a415ab166d03850915d560830ca8cbc92fe4c76589ca not found: ID does not exist" Nov 23 16:40:38 crc kubenswrapper[5050]: I1123 16:40:38.183600 5050 scope.go:117] "RemoveContainer" containerID="140249597acdb4cfad437438d846ac44a4b8c82eb96c0cd96507f160f1523670" Nov 23 16:40:38 crc kubenswrapper[5050]: E1123 16:40:38.184021 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"140249597acdb4cfad437438d846ac44a4b8c82eb96c0cd96507f160f1523670\": container with ID starting with 140249597acdb4cfad437438d846ac44a4b8c82eb96c0cd96507f160f1523670 not found: ID does not exist" containerID="140249597acdb4cfad437438d846ac44a4b8c82eb96c0cd96507f160f1523670" Nov 23 16:40:38 crc kubenswrapper[5050]: I1123 16:40:38.184051 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"140249597acdb4cfad437438d846ac44a4b8c82eb96c0cd96507f160f1523670"} err="failed to get container status \"140249597acdb4cfad437438d846ac44a4b8c82eb96c0cd96507f160f1523670\": rpc error: code = NotFound desc = could not find container \"140249597acdb4cfad437438d846ac44a4b8c82eb96c0cd96507f160f1523670\": container with ID starting with 140249597acdb4cfad437438d846ac44a4b8c82eb96c0cd96507f160f1523670 not found: ID does not exist" Nov 23 16:40:38 crc kubenswrapper[5050]: I1123 16:40:38.184071 5050 scope.go:117] "RemoveContainer" containerID="4c01dc7e573938fd9541b290122bbf4f062b369cedf13ab20a856950b85dd087" Nov 23 16:40:38 crc kubenswrapper[5050]: E1123 16:40:38.185484 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c01dc7e573938fd9541b290122bbf4f062b369cedf13ab20a856950b85dd087\": container with ID starting with 4c01dc7e573938fd9541b290122bbf4f062b369cedf13ab20a856950b85dd087 not found: ID does not exist" containerID="4c01dc7e573938fd9541b290122bbf4f062b369cedf13ab20a856950b85dd087" Nov 23 16:40:38 crc kubenswrapper[5050]: I1123 16:40:38.185520 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c01dc7e573938fd9541b290122bbf4f062b369cedf13ab20a856950b85dd087"} err="failed to get container status \"4c01dc7e573938fd9541b290122bbf4f062b369cedf13ab20a856950b85dd087\": rpc error: code = NotFound desc = could not find container \"4c01dc7e573938fd9541b290122bbf4f062b369cedf13ab20a856950b85dd087\": container with ID starting with 4c01dc7e573938fd9541b290122bbf4f062b369cedf13ab20a856950b85dd087 not found: ID does not exist" Nov 23 16:40:39 crc kubenswrapper[5050]: I1123 16:40:39.572303 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6" path="/var/lib/kubelet/pods/d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6/volumes" Nov 23 16:40:44 crc kubenswrapper[5050]: I1123 16:40:44.549703 5050 scope.go:117] "RemoveContainer" containerID="88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f" Nov 23 16:40:44 crc kubenswrapper[5050]: E1123 16:40:44.551096 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:40:57 crc kubenswrapper[5050]: I1123 16:40:57.549968 5050 scope.go:117] "RemoveContainer" containerID="88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f" Nov 23 16:40:57 crc kubenswrapper[5050]: E1123 16:40:57.551876 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:41:08 crc kubenswrapper[5050]: I1123 16:41:08.549399 5050 scope.go:117] "RemoveContainer" containerID="88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f" Nov 23 16:41:08 crc kubenswrapper[5050]: E1123 16:41:08.551150 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:41:23 crc kubenswrapper[5050]: I1123 16:41:23.549885 5050 scope.go:117] "RemoveContainer" containerID="88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f" Nov 23 16:41:23 crc kubenswrapper[5050]: E1123 16:41:23.551486 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:41:35 crc kubenswrapper[5050]: I1123 16:41:35.564256 5050 scope.go:117] "RemoveContainer" containerID="88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f" Nov 23 16:41:35 crc kubenswrapper[5050]: E1123 16:41:35.566685 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:41:38 crc kubenswrapper[5050]: I1123 16:41:38.865709 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4wltp"] Nov 23 16:41:38 crc kubenswrapper[5050]: E1123 16:41:38.867089 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6" containerName="extract-content" Nov 23 16:41:38 crc kubenswrapper[5050]: I1123 16:41:38.867106 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6" containerName="extract-content" Nov 23 16:41:38 crc kubenswrapper[5050]: E1123 16:41:38.867138 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6" containerName="extract-utilities" Nov 23 16:41:38 crc kubenswrapper[5050]: I1123 16:41:38.867148 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6" containerName="extract-utilities" Nov 23 16:41:38 crc kubenswrapper[5050]: E1123 16:41:38.867169 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6" containerName="registry-server" Nov 23 16:41:38 crc kubenswrapper[5050]: I1123 16:41:38.867176 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6" containerName="registry-server" Nov 23 16:41:38 crc kubenswrapper[5050]: I1123 16:41:38.867424 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1f40fb6-3a86-42e3-bae1-e9e931ab4aa6" containerName="registry-server" Nov 23 16:41:38 crc kubenswrapper[5050]: I1123 16:41:38.869343 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4wltp" Nov 23 16:41:38 crc kubenswrapper[5050]: I1123 16:41:38.894351 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4wltp"] Nov 23 16:41:39 crc kubenswrapper[5050]: I1123 16:41:39.011946 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/424d753b-1b2b-400a-a5b6-d34663174545-catalog-content\") pod \"certified-operators-4wltp\" (UID: \"424d753b-1b2b-400a-a5b6-d34663174545\") " pod="openshift-marketplace/certified-operators-4wltp" Nov 23 16:41:39 crc kubenswrapper[5050]: I1123 16:41:39.012170 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6n8vz\" (UniqueName: \"kubernetes.io/projected/424d753b-1b2b-400a-a5b6-d34663174545-kube-api-access-6n8vz\") pod \"certified-operators-4wltp\" (UID: \"424d753b-1b2b-400a-a5b6-d34663174545\") " pod="openshift-marketplace/certified-operators-4wltp" Nov 23 16:41:39 crc kubenswrapper[5050]: I1123 16:41:39.012417 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/424d753b-1b2b-400a-a5b6-d34663174545-utilities\") pod \"certified-operators-4wltp\" (UID: \"424d753b-1b2b-400a-a5b6-d34663174545\") " pod="openshift-marketplace/certified-operators-4wltp" Nov 23 16:41:39 crc kubenswrapper[5050]: I1123 16:41:39.115802 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6n8vz\" (UniqueName: \"kubernetes.io/projected/424d753b-1b2b-400a-a5b6-d34663174545-kube-api-access-6n8vz\") pod \"certified-operators-4wltp\" (UID: \"424d753b-1b2b-400a-a5b6-d34663174545\") " pod="openshift-marketplace/certified-operators-4wltp" Nov 23 16:41:39 crc kubenswrapper[5050]: I1123 16:41:39.115922 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/424d753b-1b2b-400a-a5b6-d34663174545-utilities\") pod \"certified-operators-4wltp\" (UID: \"424d753b-1b2b-400a-a5b6-d34663174545\") " pod="openshift-marketplace/certified-operators-4wltp" Nov 23 16:41:39 crc kubenswrapper[5050]: I1123 16:41:39.116105 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/424d753b-1b2b-400a-a5b6-d34663174545-catalog-content\") pod \"certified-operators-4wltp\" (UID: \"424d753b-1b2b-400a-a5b6-d34663174545\") " pod="openshift-marketplace/certified-operators-4wltp" Nov 23 16:41:39 crc kubenswrapper[5050]: I1123 16:41:39.116692 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/424d753b-1b2b-400a-a5b6-d34663174545-catalog-content\") pod \"certified-operators-4wltp\" (UID: \"424d753b-1b2b-400a-a5b6-d34663174545\") " pod="openshift-marketplace/certified-operators-4wltp" Nov 23 16:41:39 crc kubenswrapper[5050]: I1123 16:41:39.117079 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/424d753b-1b2b-400a-a5b6-d34663174545-utilities\") pod \"certified-operators-4wltp\" (UID: \"424d753b-1b2b-400a-a5b6-d34663174545\") " pod="openshift-marketplace/certified-operators-4wltp" Nov 23 16:41:39 crc kubenswrapper[5050]: I1123 16:41:39.148876 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6n8vz\" (UniqueName: \"kubernetes.io/projected/424d753b-1b2b-400a-a5b6-d34663174545-kube-api-access-6n8vz\") pod \"certified-operators-4wltp\" (UID: \"424d753b-1b2b-400a-a5b6-d34663174545\") " pod="openshift-marketplace/certified-operators-4wltp" Nov 23 16:41:39 crc kubenswrapper[5050]: I1123 16:41:39.238725 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4wltp" Nov 23 16:41:39 crc kubenswrapper[5050]: W1123 16:41:39.810683 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod424d753b_1b2b_400a_a5b6_d34663174545.slice/crio-270c0b6d54f35bc680e9758126c906bc57789a33db145daea2d40f32c9d7b557 WatchSource:0}: Error finding container 270c0b6d54f35bc680e9758126c906bc57789a33db145daea2d40f32c9d7b557: Status 404 returned error can't find the container with id 270c0b6d54f35bc680e9758126c906bc57789a33db145daea2d40f32c9d7b557 Nov 23 16:41:39 crc kubenswrapper[5050]: I1123 16:41:39.812761 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4wltp"] Nov 23 16:41:39 crc kubenswrapper[5050]: I1123 16:41:39.917354 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4wltp" event={"ID":"424d753b-1b2b-400a-a5b6-d34663174545","Type":"ContainerStarted","Data":"270c0b6d54f35bc680e9758126c906bc57789a33db145daea2d40f32c9d7b557"} Nov 23 16:41:40 crc kubenswrapper[5050]: I1123 16:41:40.934684 5050 generic.go:334] "Generic (PLEG): container finished" podID="424d753b-1b2b-400a-a5b6-d34663174545" containerID="a6053eaced40ac3402fa0bb7d3dfec3b7b47a85e94eae242b058d1a69957fbba" exitCode=0 Nov 23 16:41:40 crc kubenswrapper[5050]: I1123 16:41:40.934863 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4wltp" event={"ID":"424d753b-1b2b-400a-a5b6-d34663174545","Type":"ContainerDied","Data":"a6053eaced40ac3402fa0bb7d3dfec3b7b47a85e94eae242b058d1a69957fbba"} Nov 23 16:41:42 crc kubenswrapper[5050]: I1123 16:41:42.966156 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4wltp" event={"ID":"424d753b-1b2b-400a-a5b6-d34663174545","Type":"ContainerStarted","Data":"6388fd2def379c333e1d3d641b9bf7f66f3e7303a51d2efb28b6ff205de75990"} Nov 23 16:41:43 crc kubenswrapper[5050]: I1123 16:41:43.998652 5050 generic.go:334] "Generic (PLEG): container finished" podID="424d753b-1b2b-400a-a5b6-d34663174545" containerID="6388fd2def379c333e1d3d641b9bf7f66f3e7303a51d2efb28b6ff205de75990" exitCode=0 Nov 23 16:41:43 crc kubenswrapper[5050]: I1123 16:41:43.998738 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4wltp" event={"ID":"424d753b-1b2b-400a-a5b6-d34663174545","Type":"ContainerDied","Data":"6388fd2def379c333e1d3d641b9bf7f66f3e7303a51d2efb28b6ff205de75990"} Nov 23 16:41:45 crc kubenswrapper[5050]: I1123 16:41:45.014409 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4wltp" event={"ID":"424d753b-1b2b-400a-a5b6-d34663174545","Type":"ContainerStarted","Data":"014f57b3a7d74722a4d7df4c947b96631a110b147f25714f994b97cc5423c537"} Nov 23 16:41:45 crc kubenswrapper[5050]: I1123 16:41:45.047799 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4wltp" podStartSLOduration=3.561476027 podStartE2EDuration="7.047768694s" podCreationTimestamp="2025-11-23 16:41:38 +0000 UTC" firstStartedPulling="2025-11-23 16:41:40.938042032 +0000 UTC m=+7196.105038557" lastFinishedPulling="2025-11-23 16:41:44.424334729 +0000 UTC m=+7199.591331224" observedRunningTime="2025-11-23 16:41:45.034332174 +0000 UTC m=+7200.201328659" watchObservedRunningTime="2025-11-23 16:41:45.047768694 +0000 UTC m=+7200.214765199" Nov 23 16:41:49 crc kubenswrapper[5050]: I1123 16:41:49.239040 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4wltp" Nov 23 16:41:49 crc kubenswrapper[5050]: I1123 16:41:49.239973 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4wltp" Nov 23 16:41:49 crc kubenswrapper[5050]: I1123 16:41:49.335111 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4wltp" Nov 23 16:41:50 crc kubenswrapper[5050]: I1123 16:41:50.143295 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4wltp" Nov 23 16:41:50 crc kubenswrapper[5050]: I1123 16:41:50.206274 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4wltp"] Nov 23 16:41:50 crc kubenswrapper[5050]: I1123 16:41:50.549545 5050 scope.go:117] "RemoveContainer" containerID="88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f" Nov 23 16:41:50 crc kubenswrapper[5050]: E1123 16:41:50.549874 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:41:52 crc kubenswrapper[5050]: I1123 16:41:52.102036 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4wltp" podUID="424d753b-1b2b-400a-a5b6-d34663174545" containerName="registry-server" containerID="cri-o://014f57b3a7d74722a4d7df4c947b96631a110b147f25714f994b97cc5423c537" gracePeriod=2 Nov 23 16:41:52 crc kubenswrapper[5050]: I1123 16:41:52.643565 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4wltp" Nov 23 16:41:52 crc kubenswrapper[5050]: I1123 16:41:52.738707 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/424d753b-1b2b-400a-a5b6-d34663174545-catalog-content\") pod \"424d753b-1b2b-400a-a5b6-d34663174545\" (UID: \"424d753b-1b2b-400a-a5b6-d34663174545\") " Nov 23 16:41:52 crc kubenswrapper[5050]: I1123 16:41:52.738786 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6n8vz\" (UniqueName: \"kubernetes.io/projected/424d753b-1b2b-400a-a5b6-d34663174545-kube-api-access-6n8vz\") pod \"424d753b-1b2b-400a-a5b6-d34663174545\" (UID: \"424d753b-1b2b-400a-a5b6-d34663174545\") " Nov 23 16:41:52 crc kubenswrapper[5050]: I1123 16:41:52.738903 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/424d753b-1b2b-400a-a5b6-d34663174545-utilities\") pod \"424d753b-1b2b-400a-a5b6-d34663174545\" (UID: \"424d753b-1b2b-400a-a5b6-d34663174545\") " Nov 23 16:41:52 crc kubenswrapper[5050]: I1123 16:41:52.740091 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/424d753b-1b2b-400a-a5b6-d34663174545-utilities" (OuterVolumeSpecName: "utilities") pod "424d753b-1b2b-400a-a5b6-d34663174545" (UID: "424d753b-1b2b-400a-a5b6-d34663174545"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:41:52 crc kubenswrapper[5050]: I1123 16:41:52.750098 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/424d753b-1b2b-400a-a5b6-d34663174545-kube-api-access-6n8vz" (OuterVolumeSpecName: "kube-api-access-6n8vz") pod "424d753b-1b2b-400a-a5b6-d34663174545" (UID: "424d753b-1b2b-400a-a5b6-d34663174545"). InnerVolumeSpecName "kube-api-access-6n8vz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:41:52 crc kubenswrapper[5050]: I1123 16:41:52.785568 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/424d753b-1b2b-400a-a5b6-d34663174545-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "424d753b-1b2b-400a-a5b6-d34663174545" (UID: "424d753b-1b2b-400a-a5b6-d34663174545"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:41:52 crc kubenswrapper[5050]: I1123 16:41:52.842521 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/424d753b-1b2b-400a-a5b6-d34663174545-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 16:41:52 crc kubenswrapper[5050]: I1123 16:41:52.842564 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/424d753b-1b2b-400a-a5b6-d34663174545-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 16:41:52 crc kubenswrapper[5050]: I1123 16:41:52.842595 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6n8vz\" (UniqueName: \"kubernetes.io/projected/424d753b-1b2b-400a-a5b6-d34663174545-kube-api-access-6n8vz\") on node \"crc\" DevicePath \"\"" Nov 23 16:41:53 crc kubenswrapper[5050]: I1123 16:41:53.115321 5050 generic.go:334] "Generic (PLEG): container finished" podID="424d753b-1b2b-400a-a5b6-d34663174545" containerID="014f57b3a7d74722a4d7df4c947b96631a110b147f25714f994b97cc5423c537" exitCode=0 Nov 23 16:41:53 crc kubenswrapper[5050]: I1123 16:41:53.115375 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4wltp" event={"ID":"424d753b-1b2b-400a-a5b6-d34663174545","Type":"ContainerDied","Data":"014f57b3a7d74722a4d7df4c947b96631a110b147f25714f994b97cc5423c537"} Nov 23 16:41:53 crc kubenswrapper[5050]: I1123 16:41:53.115412 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4wltp" event={"ID":"424d753b-1b2b-400a-a5b6-d34663174545","Type":"ContainerDied","Data":"270c0b6d54f35bc680e9758126c906bc57789a33db145daea2d40f32c9d7b557"} Nov 23 16:41:53 crc kubenswrapper[5050]: I1123 16:41:53.115413 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4wltp" Nov 23 16:41:53 crc kubenswrapper[5050]: I1123 16:41:53.115430 5050 scope.go:117] "RemoveContainer" containerID="014f57b3a7d74722a4d7df4c947b96631a110b147f25714f994b97cc5423c537" Nov 23 16:41:53 crc kubenswrapper[5050]: I1123 16:41:53.140674 5050 scope.go:117] "RemoveContainer" containerID="6388fd2def379c333e1d3d641b9bf7f66f3e7303a51d2efb28b6ff205de75990" Nov 23 16:41:53 crc kubenswrapper[5050]: I1123 16:41:53.171051 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4wltp"] Nov 23 16:41:53 crc kubenswrapper[5050]: I1123 16:41:53.193400 5050 scope.go:117] "RemoveContainer" containerID="a6053eaced40ac3402fa0bb7d3dfec3b7b47a85e94eae242b058d1a69957fbba" Nov 23 16:41:53 crc kubenswrapper[5050]: I1123 16:41:53.196815 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4wltp"] Nov 23 16:41:53 crc kubenswrapper[5050]: I1123 16:41:53.249306 5050 scope.go:117] "RemoveContainer" containerID="014f57b3a7d74722a4d7df4c947b96631a110b147f25714f994b97cc5423c537" Nov 23 16:41:53 crc kubenswrapper[5050]: E1123 16:41:53.250996 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"014f57b3a7d74722a4d7df4c947b96631a110b147f25714f994b97cc5423c537\": container with ID starting with 014f57b3a7d74722a4d7df4c947b96631a110b147f25714f994b97cc5423c537 not found: ID does not exist" containerID="014f57b3a7d74722a4d7df4c947b96631a110b147f25714f994b97cc5423c537" Nov 23 16:41:53 crc kubenswrapper[5050]: I1123 16:41:53.251059 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"014f57b3a7d74722a4d7df4c947b96631a110b147f25714f994b97cc5423c537"} err="failed to get container status \"014f57b3a7d74722a4d7df4c947b96631a110b147f25714f994b97cc5423c537\": rpc error: code = NotFound desc = could not find container \"014f57b3a7d74722a4d7df4c947b96631a110b147f25714f994b97cc5423c537\": container with ID starting with 014f57b3a7d74722a4d7df4c947b96631a110b147f25714f994b97cc5423c537 not found: ID does not exist" Nov 23 16:41:53 crc kubenswrapper[5050]: I1123 16:41:53.251091 5050 scope.go:117] "RemoveContainer" containerID="6388fd2def379c333e1d3d641b9bf7f66f3e7303a51d2efb28b6ff205de75990" Nov 23 16:41:53 crc kubenswrapper[5050]: E1123 16:41:53.254705 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6388fd2def379c333e1d3d641b9bf7f66f3e7303a51d2efb28b6ff205de75990\": container with ID starting with 6388fd2def379c333e1d3d641b9bf7f66f3e7303a51d2efb28b6ff205de75990 not found: ID does not exist" containerID="6388fd2def379c333e1d3d641b9bf7f66f3e7303a51d2efb28b6ff205de75990" Nov 23 16:41:53 crc kubenswrapper[5050]: I1123 16:41:53.254748 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6388fd2def379c333e1d3d641b9bf7f66f3e7303a51d2efb28b6ff205de75990"} err="failed to get container status \"6388fd2def379c333e1d3d641b9bf7f66f3e7303a51d2efb28b6ff205de75990\": rpc error: code = NotFound desc = could not find container \"6388fd2def379c333e1d3d641b9bf7f66f3e7303a51d2efb28b6ff205de75990\": container with ID starting with 6388fd2def379c333e1d3d641b9bf7f66f3e7303a51d2efb28b6ff205de75990 not found: ID does not exist" Nov 23 16:41:53 crc kubenswrapper[5050]: I1123 16:41:53.254771 5050 scope.go:117] "RemoveContainer" containerID="a6053eaced40ac3402fa0bb7d3dfec3b7b47a85e94eae242b058d1a69957fbba" Nov 23 16:41:53 crc kubenswrapper[5050]: E1123 16:41:53.255255 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a6053eaced40ac3402fa0bb7d3dfec3b7b47a85e94eae242b058d1a69957fbba\": container with ID starting with a6053eaced40ac3402fa0bb7d3dfec3b7b47a85e94eae242b058d1a69957fbba not found: ID does not exist" containerID="a6053eaced40ac3402fa0bb7d3dfec3b7b47a85e94eae242b058d1a69957fbba" Nov 23 16:41:53 crc kubenswrapper[5050]: I1123 16:41:53.255304 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6053eaced40ac3402fa0bb7d3dfec3b7b47a85e94eae242b058d1a69957fbba"} err="failed to get container status \"a6053eaced40ac3402fa0bb7d3dfec3b7b47a85e94eae242b058d1a69957fbba\": rpc error: code = NotFound desc = could not find container \"a6053eaced40ac3402fa0bb7d3dfec3b7b47a85e94eae242b058d1a69957fbba\": container with ID starting with a6053eaced40ac3402fa0bb7d3dfec3b7b47a85e94eae242b058d1a69957fbba not found: ID does not exist" Nov 23 16:41:53 crc kubenswrapper[5050]: I1123 16:41:53.562268 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="424d753b-1b2b-400a-a5b6-d34663174545" path="/var/lib/kubelet/pods/424d753b-1b2b-400a-a5b6-d34663174545/volumes" Nov 23 16:42:02 crc kubenswrapper[5050]: I1123 16:42:02.548960 5050 scope.go:117] "RemoveContainer" containerID="88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f" Nov 23 16:42:02 crc kubenswrapper[5050]: E1123 16:42:02.551656 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:42:15 crc kubenswrapper[5050]: I1123 16:42:15.556129 5050 scope.go:117] "RemoveContainer" containerID="88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f" Nov 23 16:42:15 crc kubenswrapper[5050]: E1123 16:42:15.556876 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:42:30 crc kubenswrapper[5050]: I1123 16:42:30.549162 5050 scope.go:117] "RemoveContainer" containerID="88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f" Nov 23 16:42:31 crc kubenswrapper[5050]: I1123 16:42:31.624866 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"1bfefb3171b8cf9d17cc642f8ef9433deda5bd5290186a212a4ff2a6117146a9"} Nov 23 16:43:02 crc kubenswrapper[5050]: I1123 16:43:02.068815 5050 generic.go:334] "Generic (PLEG): container finished" podID="39e4cb1a-f19a-4fd6-aa7d-aae2843166f2" containerID="f17c6546bd72a9f5a5a426e173675b280ce23846f288798e64c9eb5c5a7b19a0" exitCode=0 Nov 23 16:43:02 crc kubenswrapper[5050]: I1123 16:43:02.068898 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-pr9ll" event={"ID":"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2","Type":"ContainerDied","Data":"f17c6546bd72a9f5a5a426e173675b280ce23846f288798e64c9eb5c5a7b19a0"} Nov 23 16:43:03 crc kubenswrapper[5050]: I1123 16:43:03.727289 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-pr9ll" Nov 23 16:43:03 crc kubenswrapper[5050]: I1123 16:43:03.888636 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-ceph\") pod \"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2\" (UID: \"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2\") " Nov 23 16:43:03 crc kubenswrapper[5050]: I1123 16:43:03.888710 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bdcll\" (UniqueName: \"kubernetes.io/projected/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-kube-api-access-bdcll\") pod \"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2\" (UID: \"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2\") " Nov 23 16:43:03 crc kubenswrapper[5050]: I1123 16:43:03.888938 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-bootstrap-combined-ca-bundle\") pod \"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2\" (UID: \"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2\") " Nov 23 16:43:03 crc kubenswrapper[5050]: I1123 16:43:03.889005 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-ssh-key\") pod \"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2\" (UID: \"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2\") " Nov 23 16:43:03 crc kubenswrapper[5050]: I1123 16:43:03.889129 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-inventory\") pod \"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2\" (UID: \"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2\") " Nov 23 16:43:03 crc kubenswrapper[5050]: I1123 16:43:03.901855 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "39e4cb1a-f19a-4fd6-aa7d-aae2843166f2" (UID: "39e4cb1a-f19a-4fd6-aa7d-aae2843166f2"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:43:03 crc kubenswrapper[5050]: I1123 16:43:03.902290 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-ceph" (OuterVolumeSpecName: "ceph") pod "39e4cb1a-f19a-4fd6-aa7d-aae2843166f2" (UID: "39e4cb1a-f19a-4fd6-aa7d-aae2843166f2"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:43:03 crc kubenswrapper[5050]: I1123 16:43:03.903344 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-kube-api-access-bdcll" (OuterVolumeSpecName: "kube-api-access-bdcll") pod "39e4cb1a-f19a-4fd6-aa7d-aae2843166f2" (UID: "39e4cb1a-f19a-4fd6-aa7d-aae2843166f2"). InnerVolumeSpecName "kube-api-access-bdcll". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:43:03 crc kubenswrapper[5050]: I1123 16:43:03.943082 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-inventory" (OuterVolumeSpecName: "inventory") pod "39e4cb1a-f19a-4fd6-aa7d-aae2843166f2" (UID: "39e4cb1a-f19a-4fd6-aa7d-aae2843166f2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:43:03 crc kubenswrapper[5050]: I1123 16:43:03.955756 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "39e4cb1a-f19a-4fd6-aa7d-aae2843166f2" (UID: "39e4cb1a-f19a-4fd6-aa7d-aae2843166f2"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:43:03 crc kubenswrapper[5050]: I1123 16:43:03.992691 5050 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-ceph\") on node \"crc\" DevicePath \"\"" Nov 23 16:43:03 crc kubenswrapper[5050]: I1123 16:43:03.993158 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bdcll\" (UniqueName: \"kubernetes.io/projected/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-kube-api-access-bdcll\") on node \"crc\" DevicePath \"\"" Nov 23 16:43:03 crc kubenswrapper[5050]: I1123 16:43:03.993176 5050 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:43:03 crc kubenswrapper[5050]: I1123 16:43:03.993188 5050 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 23 16:43:03 crc kubenswrapper[5050]: I1123 16:43:03.993201 5050 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/39e4cb1a-f19a-4fd6-aa7d-aae2843166f2-inventory\") on node \"crc\" DevicePath \"\"" Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.098691 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-pr9ll" event={"ID":"39e4cb1a-f19a-4fd6-aa7d-aae2843166f2","Type":"ContainerDied","Data":"a7bd2ff9a463140e40a06325937a3617a27a4d3d801b474cb8126b90ba32bf2d"} Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.098782 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a7bd2ff9a463140e40a06325937a3617a27a4d3d801b474cb8126b90ba32bf2d" Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.098813 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-pr9ll" Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.202198 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-9fl52"] Nov 23 16:43:04 crc kubenswrapper[5050]: E1123 16:43:04.203101 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="424d753b-1b2b-400a-a5b6-d34663174545" containerName="extract-content" Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.203201 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="424d753b-1b2b-400a-a5b6-d34663174545" containerName="extract-content" Nov 23 16:43:04 crc kubenswrapper[5050]: E1123 16:43:04.203337 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39e4cb1a-f19a-4fd6-aa7d-aae2843166f2" containerName="bootstrap-openstack-openstack-cell1" Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.203432 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="39e4cb1a-f19a-4fd6-aa7d-aae2843166f2" containerName="bootstrap-openstack-openstack-cell1" Nov 23 16:43:04 crc kubenswrapper[5050]: E1123 16:43:04.203587 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="424d753b-1b2b-400a-a5b6-d34663174545" containerName="extract-utilities" Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.203661 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="424d753b-1b2b-400a-a5b6-d34663174545" containerName="extract-utilities" Nov 23 16:43:04 crc kubenswrapper[5050]: E1123 16:43:04.203768 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="424d753b-1b2b-400a-a5b6-d34663174545" containerName="registry-server" Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.203840 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="424d753b-1b2b-400a-a5b6-d34663174545" containerName="registry-server" Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.204179 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="39e4cb1a-f19a-4fd6-aa7d-aae2843166f2" containerName="bootstrap-openstack-openstack-cell1" Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.204273 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="424d753b-1b2b-400a-a5b6-d34663174545" containerName="registry-server" Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.207171 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-9fl52" Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.209519 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.210538 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-ckrpf" Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.210661 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.211238 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.216168 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-9fl52"] Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.301720 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a47c80f1-37fb-413d-9c56-0c445f609688-inventory\") pod \"download-cache-openstack-openstack-cell1-9fl52\" (UID: \"a47c80f1-37fb-413d-9c56-0c445f609688\") " pod="openstack/download-cache-openstack-openstack-cell1-9fl52" Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.302006 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a47c80f1-37fb-413d-9c56-0c445f609688-ceph\") pod \"download-cache-openstack-openstack-cell1-9fl52\" (UID: \"a47c80f1-37fb-413d-9c56-0c445f609688\") " pod="openstack/download-cache-openstack-openstack-cell1-9fl52" Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.302053 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bhlqc\" (UniqueName: \"kubernetes.io/projected/a47c80f1-37fb-413d-9c56-0c445f609688-kube-api-access-bhlqc\") pod \"download-cache-openstack-openstack-cell1-9fl52\" (UID: \"a47c80f1-37fb-413d-9c56-0c445f609688\") " pod="openstack/download-cache-openstack-openstack-cell1-9fl52" Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.302220 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a47c80f1-37fb-413d-9c56-0c445f609688-ssh-key\") pod \"download-cache-openstack-openstack-cell1-9fl52\" (UID: \"a47c80f1-37fb-413d-9c56-0c445f609688\") " pod="openstack/download-cache-openstack-openstack-cell1-9fl52" Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.404539 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a47c80f1-37fb-413d-9c56-0c445f609688-inventory\") pod \"download-cache-openstack-openstack-cell1-9fl52\" (UID: \"a47c80f1-37fb-413d-9c56-0c445f609688\") " pod="openstack/download-cache-openstack-openstack-cell1-9fl52" Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.404755 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a47c80f1-37fb-413d-9c56-0c445f609688-ceph\") pod \"download-cache-openstack-openstack-cell1-9fl52\" (UID: \"a47c80f1-37fb-413d-9c56-0c445f609688\") " pod="openstack/download-cache-openstack-openstack-cell1-9fl52" Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.404791 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bhlqc\" (UniqueName: \"kubernetes.io/projected/a47c80f1-37fb-413d-9c56-0c445f609688-kube-api-access-bhlqc\") pod \"download-cache-openstack-openstack-cell1-9fl52\" (UID: \"a47c80f1-37fb-413d-9c56-0c445f609688\") " pod="openstack/download-cache-openstack-openstack-cell1-9fl52" Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.404923 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a47c80f1-37fb-413d-9c56-0c445f609688-ssh-key\") pod \"download-cache-openstack-openstack-cell1-9fl52\" (UID: \"a47c80f1-37fb-413d-9c56-0c445f609688\") " pod="openstack/download-cache-openstack-openstack-cell1-9fl52" Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.410068 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a47c80f1-37fb-413d-9c56-0c445f609688-inventory\") pod \"download-cache-openstack-openstack-cell1-9fl52\" (UID: \"a47c80f1-37fb-413d-9c56-0c445f609688\") " pod="openstack/download-cache-openstack-openstack-cell1-9fl52" Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.410539 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a47c80f1-37fb-413d-9c56-0c445f609688-ceph\") pod \"download-cache-openstack-openstack-cell1-9fl52\" (UID: \"a47c80f1-37fb-413d-9c56-0c445f609688\") " pod="openstack/download-cache-openstack-openstack-cell1-9fl52" Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.412123 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a47c80f1-37fb-413d-9c56-0c445f609688-ssh-key\") pod \"download-cache-openstack-openstack-cell1-9fl52\" (UID: \"a47c80f1-37fb-413d-9c56-0c445f609688\") " pod="openstack/download-cache-openstack-openstack-cell1-9fl52" Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.432692 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bhlqc\" (UniqueName: \"kubernetes.io/projected/a47c80f1-37fb-413d-9c56-0c445f609688-kube-api-access-bhlqc\") pod \"download-cache-openstack-openstack-cell1-9fl52\" (UID: \"a47c80f1-37fb-413d-9c56-0c445f609688\") " pod="openstack/download-cache-openstack-openstack-cell1-9fl52" Nov 23 16:43:04 crc kubenswrapper[5050]: I1123 16:43:04.534506 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-9fl52" Nov 23 16:43:05 crc kubenswrapper[5050]: I1123 16:43:05.281254 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-9fl52"] Nov 23 16:43:05 crc kubenswrapper[5050]: W1123 16:43:05.289977 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda47c80f1_37fb_413d_9c56_0c445f609688.slice/crio-e2a611b0a786a5c08a222957ca96f9602bcc0804365331d3cc2e90dd3405e124 WatchSource:0}: Error finding container e2a611b0a786a5c08a222957ca96f9602bcc0804365331d3cc2e90dd3405e124: Status 404 returned error can't find the container with id e2a611b0a786a5c08a222957ca96f9602bcc0804365331d3cc2e90dd3405e124 Nov 23 16:43:05 crc kubenswrapper[5050]: I1123 16:43:05.295587 5050 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 23 16:43:06 crc kubenswrapper[5050]: I1123 16:43:06.121892 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-9fl52" event={"ID":"a47c80f1-37fb-413d-9c56-0c445f609688","Type":"ContainerStarted","Data":"73ffd97f03b9b851b27364cf7f9289f9d9161e05fe94052207d52290a32ef0fa"} Nov 23 16:43:06 crc kubenswrapper[5050]: I1123 16:43:06.122640 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-9fl52" event={"ID":"a47c80f1-37fb-413d-9c56-0c445f609688","Type":"ContainerStarted","Data":"e2a611b0a786a5c08a222957ca96f9602bcc0804365331d3cc2e90dd3405e124"} Nov 23 16:43:06 crc kubenswrapper[5050]: I1123 16:43:06.150165 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-openstack-openstack-cell1-9fl52" podStartSLOduration=1.646565089 podStartE2EDuration="2.150137094s" podCreationTimestamp="2025-11-23 16:43:04 +0000 UTC" firstStartedPulling="2025-11-23 16:43:05.295130418 +0000 UTC m=+7280.462126943" lastFinishedPulling="2025-11-23 16:43:05.798702453 +0000 UTC m=+7280.965698948" observedRunningTime="2025-11-23 16:43:06.139361069 +0000 UTC m=+7281.306357554" watchObservedRunningTime="2025-11-23 16:43:06.150137094 +0000 UTC m=+7281.317133579" Nov 23 16:44:41 crc kubenswrapper[5050]: I1123 16:44:41.385166 5050 generic.go:334] "Generic (PLEG): container finished" podID="a47c80f1-37fb-413d-9c56-0c445f609688" containerID="73ffd97f03b9b851b27364cf7f9289f9d9161e05fe94052207d52290a32ef0fa" exitCode=0 Nov 23 16:44:41 crc kubenswrapper[5050]: I1123 16:44:41.385266 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-9fl52" event={"ID":"a47c80f1-37fb-413d-9c56-0c445f609688","Type":"ContainerDied","Data":"73ffd97f03b9b851b27364cf7f9289f9d9161e05fe94052207d52290a32ef0fa"} Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.020686 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-9fl52" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.096406 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a47c80f1-37fb-413d-9c56-0c445f609688-ceph\") pod \"a47c80f1-37fb-413d-9c56-0c445f609688\" (UID: \"a47c80f1-37fb-413d-9c56-0c445f609688\") " Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.097158 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a47c80f1-37fb-413d-9c56-0c445f609688-inventory\") pod \"a47c80f1-37fb-413d-9c56-0c445f609688\" (UID: \"a47c80f1-37fb-413d-9c56-0c445f609688\") " Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.097404 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bhlqc\" (UniqueName: \"kubernetes.io/projected/a47c80f1-37fb-413d-9c56-0c445f609688-kube-api-access-bhlqc\") pod \"a47c80f1-37fb-413d-9c56-0c445f609688\" (UID: \"a47c80f1-37fb-413d-9c56-0c445f609688\") " Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.097770 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a47c80f1-37fb-413d-9c56-0c445f609688-ssh-key\") pod \"a47c80f1-37fb-413d-9c56-0c445f609688\" (UID: \"a47c80f1-37fb-413d-9c56-0c445f609688\") " Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.106159 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a47c80f1-37fb-413d-9c56-0c445f609688-ceph" (OuterVolumeSpecName: "ceph") pod "a47c80f1-37fb-413d-9c56-0c445f609688" (UID: "a47c80f1-37fb-413d-9c56-0c445f609688"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.109773 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a47c80f1-37fb-413d-9c56-0c445f609688-kube-api-access-bhlqc" (OuterVolumeSpecName: "kube-api-access-bhlqc") pod "a47c80f1-37fb-413d-9c56-0c445f609688" (UID: "a47c80f1-37fb-413d-9c56-0c445f609688"). InnerVolumeSpecName "kube-api-access-bhlqc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.133420 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a47c80f1-37fb-413d-9c56-0c445f609688-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a47c80f1-37fb-413d-9c56-0c445f609688" (UID: "a47c80f1-37fb-413d-9c56-0c445f609688"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.135190 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a47c80f1-37fb-413d-9c56-0c445f609688-inventory" (OuterVolumeSpecName: "inventory") pod "a47c80f1-37fb-413d-9c56-0c445f609688" (UID: "a47c80f1-37fb-413d-9c56-0c445f609688"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.201375 5050 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a47c80f1-37fb-413d-9c56-0c445f609688-inventory\") on node \"crc\" DevicePath \"\"" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.201416 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bhlqc\" (UniqueName: \"kubernetes.io/projected/a47c80f1-37fb-413d-9c56-0c445f609688-kube-api-access-bhlqc\") on node \"crc\" DevicePath \"\"" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.201428 5050 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a47c80f1-37fb-413d-9c56-0c445f609688-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.201467 5050 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a47c80f1-37fb-413d-9c56-0c445f609688-ceph\") on node \"crc\" DevicePath \"\"" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.416381 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-9fl52" event={"ID":"a47c80f1-37fb-413d-9c56-0c445f609688","Type":"ContainerDied","Data":"e2a611b0a786a5c08a222957ca96f9602bcc0804365331d3cc2e90dd3405e124"} Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.416477 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e2a611b0a786a5c08a222957ca96f9602bcc0804365331d3cc2e90dd3405e124" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.416669 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-9fl52" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.570623 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-qfcbd"] Nov 23 16:44:43 crc kubenswrapper[5050]: E1123 16:44:43.571089 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a47c80f1-37fb-413d-9c56-0c445f609688" containerName="download-cache-openstack-openstack-cell1" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.571113 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="a47c80f1-37fb-413d-9c56-0c445f609688" containerName="download-cache-openstack-openstack-cell1" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.571445 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="a47c80f1-37fb-413d-9c56-0c445f609688" containerName="download-cache-openstack-openstack-cell1" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.585969 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-qfcbd"] Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.586087 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-qfcbd" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.590166 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.590227 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-ckrpf" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.590250 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.598291 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.615500 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpp22\" (UniqueName: \"kubernetes.io/projected/10783393-ff0d-420a-9c89-913da2a4d3e0-kube-api-access-lpp22\") pod \"configure-network-openstack-openstack-cell1-qfcbd\" (UID: \"10783393-ff0d-420a-9c89-913da2a4d3e0\") " pod="openstack/configure-network-openstack-openstack-cell1-qfcbd" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.615648 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/10783393-ff0d-420a-9c89-913da2a4d3e0-inventory\") pod \"configure-network-openstack-openstack-cell1-qfcbd\" (UID: \"10783393-ff0d-420a-9c89-913da2a4d3e0\") " pod="openstack/configure-network-openstack-openstack-cell1-qfcbd" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.615683 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/10783393-ff0d-420a-9c89-913da2a4d3e0-ssh-key\") pod \"configure-network-openstack-openstack-cell1-qfcbd\" (UID: \"10783393-ff0d-420a-9c89-913da2a4d3e0\") " pod="openstack/configure-network-openstack-openstack-cell1-qfcbd" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.615757 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/10783393-ff0d-420a-9c89-913da2a4d3e0-ceph\") pod \"configure-network-openstack-openstack-cell1-qfcbd\" (UID: \"10783393-ff0d-420a-9c89-913da2a4d3e0\") " pod="openstack/configure-network-openstack-openstack-cell1-qfcbd" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.717799 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/10783393-ff0d-420a-9c89-913da2a4d3e0-inventory\") pod \"configure-network-openstack-openstack-cell1-qfcbd\" (UID: \"10783393-ff0d-420a-9c89-913da2a4d3e0\") " pod="openstack/configure-network-openstack-openstack-cell1-qfcbd" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.717851 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/10783393-ff0d-420a-9c89-913da2a4d3e0-ssh-key\") pod \"configure-network-openstack-openstack-cell1-qfcbd\" (UID: \"10783393-ff0d-420a-9c89-913da2a4d3e0\") " pod="openstack/configure-network-openstack-openstack-cell1-qfcbd" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.717924 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/10783393-ff0d-420a-9c89-913da2a4d3e0-ceph\") pod \"configure-network-openstack-openstack-cell1-qfcbd\" (UID: \"10783393-ff0d-420a-9c89-913da2a4d3e0\") " pod="openstack/configure-network-openstack-openstack-cell1-qfcbd" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.718004 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lpp22\" (UniqueName: \"kubernetes.io/projected/10783393-ff0d-420a-9c89-913da2a4d3e0-kube-api-access-lpp22\") pod \"configure-network-openstack-openstack-cell1-qfcbd\" (UID: \"10783393-ff0d-420a-9c89-913da2a4d3e0\") " pod="openstack/configure-network-openstack-openstack-cell1-qfcbd" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.722491 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/10783393-ff0d-420a-9c89-913da2a4d3e0-ceph\") pod \"configure-network-openstack-openstack-cell1-qfcbd\" (UID: \"10783393-ff0d-420a-9c89-913da2a4d3e0\") " pod="openstack/configure-network-openstack-openstack-cell1-qfcbd" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.722642 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/10783393-ff0d-420a-9c89-913da2a4d3e0-ssh-key\") pod \"configure-network-openstack-openstack-cell1-qfcbd\" (UID: \"10783393-ff0d-420a-9c89-913da2a4d3e0\") " pod="openstack/configure-network-openstack-openstack-cell1-qfcbd" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.736315 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/10783393-ff0d-420a-9c89-913da2a4d3e0-inventory\") pod \"configure-network-openstack-openstack-cell1-qfcbd\" (UID: \"10783393-ff0d-420a-9c89-913da2a4d3e0\") " pod="openstack/configure-network-openstack-openstack-cell1-qfcbd" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.738515 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpp22\" (UniqueName: \"kubernetes.io/projected/10783393-ff0d-420a-9c89-913da2a4d3e0-kube-api-access-lpp22\") pod \"configure-network-openstack-openstack-cell1-qfcbd\" (UID: \"10783393-ff0d-420a-9c89-913da2a4d3e0\") " pod="openstack/configure-network-openstack-openstack-cell1-qfcbd" Nov 23 16:44:43 crc kubenswrapper[5050]: I1123 16:44:43.912331 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-qfcbd" Nov 23 16:44:44 crc kubenswrapper[5050]: I1123 16:44:44.552269 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-qfcbd"] Nov 23 16:44:45 crc kubenswrapper[5050]: I1123 16:44:45.445206 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-qfcbd" event={"ID":"10783393-ff0d-420a-9c89-913da2a4d3e0","Type":"ContainerStarted","Data":"f0130e0e4ee37061d28b9dd268845899571e9fd1310e82595200300593dffeb6"} Nov 23 16:44:46 crc kubenswrapper[5050]: I1123 16:44:46.458225 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-qfcbd" event={"ID":"10783393-ff0d-420a-9c89-913da2a4d3e0","Type":"ContainerStarted","Data":"8a73df0935f26e045934da21bb71953dc0973f60300484589d21c24dc98012f1"} Nov 23 16:44:46 crc kubenswrapper[5050]: I1123 16:44:46.492698 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-openstack-openstack-cell1-qfcbd" podStartSLOduration=2.596604493 podStartE2EDuration="3.49267408s" podCreationTimestamp="2025-11-23 16:44:43 +0000 UTC" firstStartedPulling="2025-11-23 16:44:44.558605101 +0000 UTC m=+7379.725601596" lastFinishedPulling="2025-11-23 16:44:45.454674698 +0000 UTC m=+7380.621671183" observedRunningTime="2025-11-23 16:44:46.478704595 +0000 UTC m=+7381.645701080" watchObservedRunningTime="2025-11-23 16:44:46.49267408 +0000 UTC m=+7381.659670555" Nov 23 16:44:59 crc kubenswrapper[5050]: I1123 16:44:59.224280 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:44:59 crc kubenswrapper[5050]: I1123 16:44:59.225126 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:45:00 crc kubenswrapper[5050]: I1123 16:45:00.171210 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398605-zv8dr"] Nov 23 16:45:00 crc kubenswrapper[5050]: I1123 16:45:00.173694 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398605-zv8dr" Nov 23 16:45:00 crc kubenswrapper[5050]: I1123 16:45:00.177586 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 23 16:45:00 crc kubenswrapper[5050]: I1123 16:45:00.177840 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 23 16:45:00 crc kubenswrapper[5050]: I1123 16:45:00.187909 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398605-zv8dr"] Nov 23 16:45:00 crc kubenswrapper[5050]: I1123 16:45:00.319239 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-292r5\" (UniqueName: \"kubernetes.io/projected/af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc-kube-api-access-292r5\") pod \"collect-profiles-29398605-zv8dr\" (UID: \"af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398605-zv8dr" Nov 23 16:45:00 crc kubenswrapper[5050]: I1123 16:45:00.319572 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc-secret-volume\") pod \"collect-profiles-29398605-zv8dr\" (UID: \"af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398605-zv8dr" Nov 23 16:45:00 crc kubenswrapper[5050]: I1123 16:45:00.319925 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc-config-volume\") pod \"collect-profiles-29398605-zv8dr\" (UID: \"af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398605-zv8dr" Nov 23 16:45:00 crc kubenswrapper[5050]: I1123 16:45:00.422676 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc-secret-volume\") pod \"collect-profiles-29398605-zv8dr\" (UID: \"af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398605-zv8dr" Nov 23 16:45:00 crc kubenswrapper[5050]: I1123 16:45:00.422765 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc-config-volume\") pod \"collect-profiles-29398605-zv8dr\" (UID: \"af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398605-zv8dr" Nov 23 16:45:00 crc kubenswrapper[5050]: I1123 16:45:00.422821 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-292r5\" (UniqueName: \"kubernetes.io/projected/af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc-kube-api-access-292r5\") pod \"collect-profiles-29398605-zv8dr\" (UID: \"af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398605-zv8dr" Nov 23 16:45:00 crc kubenswrapper[5050]: I1123 16:45:00.423935 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc-config-volume\") pod \"collect-profiles-29398605-zv8dr\" (UID: \"af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398605-zv8dr" Nov 23 16:45:00 crc kubenswrapper[5050]: I1123 16:45:00.430367 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc-secret-volume\") pod \"collect-profiles-29398605-zv8dr\" (UID: \"af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398605-zv8dr" Nov 23 16:45:00 crc kubenswrapper[5050]: I1123 16:45:00.443022 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-292r5\" (UniqueName: \"kubernetes.io/projected/af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc-kube-api-access-292r5\") pod \"collect-profiles-29398605-zv8dr\" (UID: \"af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398605-zv8dr" Nov 23 16:45:00 crc kubenswrapper[5050]: I1123 16:45:00.503333 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398605-zv8dr" Nov 23 16:45:01 crc kubenswrapper[5050]: I1123 16:45:01.048223 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398605-zv8dr"] Nov 23 16:45:01 crc kubenswrapper[5050]: W1123 16:45:01.055869 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaf3c3024_0b36_4ac3_bbcc_82ecd80d7dfc.slice/crio-00cd8eeac0641a6d7deadc56efba9b162b6adbc339847320a800eec5cdbcc0e8 WatchSource:0}: Error finding container 00cd8eeac0641a6d7deadc56efba9b162b6adbc339847320a800eec5cdbcc0e8: Status 404 returned error can't find the container with id 00cd8eeac0641a6d7deadc56efba9b162b6adbc339847320a800eec5cdbcc0e8 Nov 23 16:45:01 crc kubenswrapper[5050]: I1123 16:45:01.644100 5050 generic.go:334] "Generic (PLEG): container finished" podID="af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc" containerID="80aaff31eae62146d857f1d4a917bbdd70484c35d40e8c821c0e0cc090a79a8b" exitCode=0 Nov 23 16:45:01 crc kubenswrapper[5050]: I1123 16:45:01.644244 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398605-zv8dr" event={"ID":"af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc","Type":"ContainerDied","Data":"80aaff31eae62146d857f1d4a917bbdd70484c35d40e8c821c0e0cc090a79a8b"} Nov 23 16:45:01 crc kubenswrapper[5050]: I1123 16:45:01.644667 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398605-zv8dr" event={"ID":"af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc","Type":"ContainerStarted","Data":"00cd8eeac0641a6d7deadc56efba9b162b6adbc339847320a800eec5cdbcc0e8"} Nov 23 16:45:03 crc kubenswrapper[5050]: I1123 16:45:03.176696 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398605-zv8dr" Nov 23 16:45:03 crc kubenswrapper[5050]: I1123 16:45:03.305102 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc-secret-volume\") pod \"af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc\" (UID: \"af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc\") " Nov 23 16:45:03 crc kubenswrapper[5050]: I1123 16:45:03.305250 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-292r5\" (UniqueName: \"kubernetes.io/projected/af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc-kube-api-access-292r5\") pod \"af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc\" (UID: \"af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc\") " Nov 23 16:45:03 crc kubenswrapper[5050]: I1123 16:45:03.305506 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc-config-volume\") pod \"af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc\" (UID: \"af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc\") " Nov 23 16:45:03 crc kubenswrapper[5050]: I1123 16:45:03.306798 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc-config-volume" (OuterVolumeSpecName: "config-volume") pod "af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc" (UID: "af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:45:03 crc kubenswrapper[5050]: I1123 16:45:03.314510 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc-kube-api-access-292r5" (OuterVolumeSpecName: "kube-api-access-292r5") pod "af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc" (UID: "af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc"). InnerVolumeSpecName "kube-api-access-292r5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:45:03 crc kubenswrapper[5050]: I1123 16:45:03.315132 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc" (UID: "af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:45:03 crc kubenswrapper[5050]: I1123 16:45:03.408762 5050 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 23 16:45:03 crc kubenswrapper[5050]: I1123 16:45:03.408814 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-292r5\" (UniqueName: \"kubernetes.io/projected/af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc-kube-api-access-292r5\") on node \"crc\" DevicePath \"\"" Nov 23 16:45:03 crc kubenswrapper[5050]: I1123 16:45:03.408828 5050 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc-config-volume\") on node \"crc\" DevicePath \"\"" Nov 23 16:45:03 crc kubenswrapper[5050]: I1123 16:45:03.670348 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398605-zv8dr" event={"ID":"af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc","Type":"ContainerDied","Data":"00cd8eeac0641a6d7deadc56efba9b162b6adbc339847320a800eec5cdbcc0e8"} Nov 23 16:45:03 crc kubenswrapper[5050]: I1123 16:45:03.670427 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="00cd8eeac0641a6d7deadc56efba9b162b6adbc339847320a800eec5cdbcc0e8" Nov 23 16:45:03 crc kubenswrapper[5050]: I1123 16:45:03.670441 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398605-zv8dr" Nov 23 16:45:04 crc kubenswrapper[5050]: I1123 16:45:04.306134 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398560-6q4s6"] Nov 23 16:45:04 crc kubenswrapper[5050]: I1123 16:45:04.318611 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398560-6q4s6"] Nov 23 16:45:05 crc kubenswrapper[5050]: I1123 16:45:05.563843 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a742030-c300-4bc7-b673-8c6411f3c837" path="/var/lib/kubelet/pods/6a742030-c300-4bc7-b673-8c6411f3c837/volumes" Nov 23 16:45:20 crc kubenswrapper[5050]: I1123 16:45:20.361837 5050 scope.go:117] "RemoveContainer" containerID="73357890db28dd98f4f807fe52d57d93b84a87a0529d25b2261e7e7b6c764d36" Nov 23 16:45:29 crc kubenswrapper[5050]: I1123 16:45:29.224572 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:45:29 crc kubenswrapper[5050]: I1123 16:45:29.225599 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:45:59 crc kubenswrapper[5050]: I1123 16:45:59.225060 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:45:59 crc kubenswrapper[5050]: I1123 16:45:59.225894 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:45:59 crc kubenswrapper[5050]: I1123 16:45:59.225968 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 16:45:59 crc kubenswrapper[5050]: I1123 16:45:59.227593 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1bfefb3171b8cf9d17cc642f8ef9433deda5bd5290186a212a4ff2a6117146a9"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 16:45:59 crc kubenswrapper[5050]: I1123 16:45:59.227702 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://1bfefb3171b8cf9d17cc642f8ef9433deda5bd5290186a212a4ff2a6117146a9" gracePeriod=600 Nov 23 16:45:59 crc kubenswrapper[5050]: I1123 16:45:59.646968 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="1bfefb3171b8cf9d17cc642f8ef9433deda5bd5290186a212a4ff2a6117146a9" exitCode=0 Nov 23 16:45:59 crc kubenswrapper[5050]: I1123 16:45:59.647097 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"1bfefb3171b8cf9d17cc642f8ef9433deda5bd5290186a212a4ff2a6117146a9"} Nov 23 16:45:59 crc kubenswrapper[5050]: I1123 16:45:59.647756 5050 scope.go:117] "RemoveContainer" containerID="88b96523e5d1f1e894f0c03539e34cde758cdfd39f9607c3373d355cc9a96f2f" Nov 23 16:46:00 crc kubenswrapper[5050]: I1123 16:46:00.661314 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76"} Nov 23 16:46:07 crc kubenswrapper[5050]: I1123 16:46:07.781874 5050 generic.go:334] "Generic (PLEG): container finished" podID="10783393-ff0d-420a-9c89-913da2a4d3e0" containerID="8a73df0935f26e045934da21bb71953dc0973f60300484589d21c24dc98012f1" exitCode=0 Nov 23 16:46:07 crc kubenswrapper[5050]: I1123 16:46:07.781944 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-qfcbd" event={"ID":"10783393-ff0d-420a-9c89-913da2a4d3e0","Type":"ContainerDied","Data":"8a73df0935f26e045934da21bb71953dc0973f60300484589d21c24dc98012f1"} Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.396077 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-qfcbd" Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.544731 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lpp22\" (UniqueName: \"kubernetes.io/projected/10783393-ff0d-420a-9c89-913da2a4d3e0-kube-api-access-lpp22\") pod \"10783393-ff0d-420a-9c89-913da2a4d3e0\" (UID: \"10783393-ff0d-420a-9c89-913da2a4d3e0\") " Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.544876 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/10783393-ff0d-420a-9c89-913da2a4d3e0-inventory\") pod \"10783393-ff0d-420a-9c89-913da2a4d3e0\" (UID: \"10783393-ff0d-420a-9c89-913da2a4d3e0\") " Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.545123 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/10783393-ff0d-420a-9c89-913da2a4d3e0-ceph\") pod \"10783393-ff0d-420a-9c89-913da2a4d3e0\" (UID: \"10783393-ff0d-420a-9c89-913da2a4d3e0\") " Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.545181 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/10783393-ff0d-420a-9c89-913da2a4d3e0-ssh-key\") pod \"10783393-ff0d-420a-9c89-913da2a4d3e0\" (UID: \"10783393-ff0d-420a-9c89-913da2a4d3e0\") " Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.553810 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10783393-ff0d-420a-9c89-913da2a4d3e0-kube-api-access-lpp22" (OuterVolumeSpecName: "kube-api-access-lpp22") pod "10783393-ff0d-420a-9c89-913da2a4d3e0" (UID: "10783393-ff0d-420a-9c89-913da2a4d3e0"). InnerVolumeSpecName "kube-api-access-lpp22". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.554597 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10783393-ff0d-420a-9c89-913da2a4d3e0-ceph" (OuterVolumeSpecName: "ceph") pod "10783393-ff0d-420a-9c89-913da2a4d3e0" (UID: "10783393-ff0d-420a-9c89-913da2a4d3e0"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.600759 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10783393-ff0d-420a-9c89-913da2a4d3e0-inventory" (OuterVolumeSpecName: "inventory") pod "10783393-ff0d-420a-9c89-913da2a4d3e0" (UID: "10783393-ff0d-420a-9c89-913da2a4d3e0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.622263 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10783393-ff0d-420a-9c89-913da2a4d3e0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "10783393-ff0d-420a-9c89-913da2a4d3e0" (UID: "10783393-ff0d-420a-9c89-913da2a4d3e0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.648609 5050 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/10783393-ff0d-420a-9c89-913da2a4d3e0-ceph\") on node \"crc\" DevicePath \"\"" Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.648670 5050 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/10783393-ff0d-420a-9c89-913da2a4d3e0-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.648697 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lpp22\" (UniqueName: \"kubernetes.io/projected/10783393-ff0d-420a-9c89-913da2a4d3e0-kube-api-access-lpp22\") on node \"crc\" DevicePath \"\"" Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.648717 5050 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/10783393-ff0d-420a-9c89-913da2a4d3e0-inventory\") on node \"crc\" DevicePath \"\"" Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.815985 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-qfcbd" event={"ID":"10783393-ff0d-420a-9c89-913da2a4d3e0","Type":"ContainerDied","Data":"f0130e0e4ee37061d28b9dd268845899571e9fd1310e82595200300593dffeb6"} Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.816070 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f0130e0e4ee37061d28b9dd268845899571e9fd1310e82595200300593dffeb6" Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.816622 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-qfcbd" Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.922257 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-v8qw8"] Nov 23 16:46:09 crc kubenswrapper[5050]: E1123 16:46:09.923079 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc" containerName="collect-profiles" Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.923105 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc" containerName="collect-profiles" Nov 23 16:46:09 crc kubenswrapper[5050]: E1123 16:46:09.923144 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10783393-ff0d-420a-9c89-913da2a4d3e0" containerName="configure-network-openstack-openstack-cell1" Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.923154 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="10783393-ff0d-420a-9c89-913da2a4d3e0" containerName="configure-network-openstack-openstack-cell1" Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.923403 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc" containerName="collect-profiles" Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.923417 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="10783393-ff0d-420a-9c89-913da2a4d3e0" containerName="configure-network-openstack-openstack-cell1" Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.924683 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-v8qw8" Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.927179 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.927753 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.928617 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-ckrpf" Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.930763 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.937925 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-v8qw8"] Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.960763 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/57262ba8-76c9-4627-a4f9-ba993e7e8ff8-inventory\") pod \"validate-network-openstack-openstack-cell1-v8qw8\" (UID: \"57262ba8-76c9-4627-a4f9-ba993e7e8ff8\") " pod="openstack/validate-network-openstack-openstack-cell1-v8qw8" Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.960895 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/57262ba8-76c9-4627-a4f9-ba993e7e8ff8-ssh-key\") pod \"validate-network-openstack-openstack-cell1-v8qw8\" (UID: \"57262ba8-76c9-4627-a4f9-ba993e7e8ff8\") " pod="openstack/validate-network-openstack-openstack-cell1-v8qw8" Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.961164 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/57262ba8-76c9-4627-a4f9-ba993e7e8ff8-ceph\") pod \"validate-network-openstack-openstack-cell1-v8qw8\" (UID: \"57262ba8-76c9-4627-a4f9-ba993e7e8ff8\") " pod="openstack/validate-network-openstack-openstack-cell1-v8qw8" Nov 23 16:46:09 crc kubenswrapper[5050]: I1123 16:46:09.961248 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rbvj\" (UniqueName: \"kubernetes.io/projected/57262ba8-76c9-4627-a4f9-ba993e7e8ff8-kube-api-access-6rbvj\") pod \"validate-network-openstack-openstack-cell1-v8qw8\" (UID: \"57262ba8-76c9-4627-a4f9-ba993e7e8ff8\") " pod="openstack/validate-network-openstack-openstack-cell1-v8qw8" Nov 23 16:46:10 crc kubenswrapper[5050]: I1123 16:46:10.063946 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/57262ba8-76c9-4627-a4f9-ba993e7e8ff8-inventory\") pod \"validate-network-openstack-openstack-cell1-v8qw8\" (UID: \"57262ba8-76c9-4627-a4f9-ba993e7e8ff8\") " pod="openstack/validate-network-openstack-openstack-cell1-v8qw8" Nov 23 16:46:10 crc kubenswrapper[5050]: I1123 16:46:10.064010 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/57262ba8-76c9-4627-a4f9-ba993e7e8ff8-ssh-key\") pod \"validate-network-openstack-openstack-cell1-v8qw8\" (UID: \"57262ba8-76c9-4627-a4f9-ba993e7e8ff8\") " pod="openstack/validate-network-openstack-openstack-cell1-v8qw8" Nov 23 16:46:10 crc kubenswrapper[5050]: I1123 16:46:10.064119 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/57262ba8-76c9-4627-a4f9-ba993e7e8ff8-ceph\") pod \"validate-network-openstack-openstack-cell1-v8qw8\" (UID: \"57262ba8-76c9-4627-a4f9-ba993e7e8ff8\") " pod="openstack/validate-network-openstack-openstack-cell1-v8qw8" Nov 23 16:46:10 crc kubenswrapper[5050]: I1123 16:46:10.064152 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rbvj\" (UniqueName: \"kubernetes.io/projected/57262ba8-76c9-4627-a4f9-ba993e7e8ff8-kube-api-access-6rbvj\") pod \"validate-network-openstack-openstack-cell1-v8qw8\" (UID: \"57262ba8-76c9-4627-a4f9-ba993e7e8ff8\") " pod="openstack/validate-network-openstack-openstack-cell1-v8qw8" Nov 23 16:46:10 crc kubenswrapper[5050]: I1123 16:46:10.069695 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/57262ba8-76c9-4627-a4f9-ba993e7e8ff8-ssh-key\") pod \"validate-network-openstack-openstack-cell1-v8qw8\" (UID: \"57262ba8-76c9-4627-a4f9-ba993e7e8ff8\") " pod="openstack/validate-network-openstack-openstack-cell1-v8qw8" Nov 23 16:46:10 crc kubenswrapper[5050]: I1123 16:46:10.069778 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/57262ba8-76c9-4627-a4f9-ba993e7e8ff8-ceph\") pod \"validate-network-openstack-openstack-cell1-v8qw8\" (UID: \"57262ba8-76c9-4627-a4f9-ba993e7e8ff8\") " pod="openstack/validate-network-openstack-openstack-cell1-v8qw8" Nov 23 16:46:10 crc kubenswrapper[5050]: I1123 16:46:10.072194 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/57262ba8-76c9-4627-a4f9-ba993e7e8ff8-inventory\") pod \"validate-network-openstack-openstack-cell1-v8qw8\" (UID: \"57262ba8-76c9-4627-a4f9-ba993e7e8ff8\") " pod="openstack/validate-network-openstack-openstack-cell1-v8qw8" Nov 23 16:46:10 crc kubenswrapper[5050]: I1123 16:46:10.121836 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rbvj\" (UniqueName: \"kubernetes.io/projected/57262ba8-76c9-4627-a4f9-ba993e7e8ff8-kube-api-access-6rbvj\") pod \"validate-network-openstack-openstack-cell1-v8qw8\" (UID: \"57262ba8-76c9-4627-a4f9-ba993e7e8ff8\") " pod="openstack/validate-network-openstack-openstack-cell1-v8qw8" Nov 23 16:46:10 crc kubenswrapper[5050]: I1123 16:46:10.262165 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-v8qw8" Nov 23 16:46:10 crc kubenswrapper[5050]: I1123 16:46:10.846937 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-v8qw8"] Nov 23 16:46:11 crc kubenswrapper[5050]: I1123 16:46:11.840668 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-v8qw8" event={"ID":"57262ba8-76c9-4627-a4f9-ba993e7e8ff8","Type":"ContainerStarted","Data":"2c889d191d1c2e3c6307c656f4808e5400003e496444db905fc0d81e48e72797"} Nov 23 16:46:12 crc kubenswrapper[5050]: I1123 16:46:12.855362 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-v8qw8" event={"ID":"57262ba8-76c9-4627-a4f9-ba993e7e8ff8","Type":"ContainerStarted","Data":"d0cf6f386b4eff9b310584074e0b1c7afaa3388a22f8351033197c91ccfe59db"} Nov 23 16:46:12 crc kubenswrapper[5050]: I1123 16:46:12.883100 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-openstack-openstack-cell1-v8qw8" podStartSLOduration=2.870851019 podStartE2EDuration="3.883079792s" podCreationTimestamp="2025-11-23 16:46:09 +0000 UTC" firstStartedPulling="2025-11-23 16:46:10.846788531 +0000 UTC m=+7466.013785006" lastFinishedPulling="2025-11-23 16:46:11.859017294 +0000 UTC m=+7467.026013779" observedRunningTime="2025-11-23 16:46:12.878045769 +0000 UTC m=+7468.045042254" watchObservedRunningTime="2025-11-23 16:46:12.883079792 +0000 UTC m=+7468.050076287" Nov 23 16:46:17 crc kubenswrapper[5050]: I1123 16:46:17.931977 5050 generic.go:334] "Generic (PLEG): container finished" podID="57262ba8-76c9-4627-a4f9-ba993e7e8ff8" containerID="d0cf6f386b4eff9b310584074e0b1c7afaa3388a22f8351033197c91ccfe59db" exitCode=0 Nov 23 16:46:17 crc kubenswrapper[5050]: I1123 16:46:17.932168 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-v8qw8" event={"ID":"57262ba8-76c9-4627-a4f9-ba993e7e8ff8","Type":"ContainerDied","Data":"d0cf6f386b4eff9b310584074e0b1c7afaa3388a22f8351033197c91ccfe59db"} Nov 23 16:46:19 crc kubenswrapper[5050]: I1123 16:46:19.435459 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-v8qw8" Nov 23 16:46:19 crc kubenswrapper[5050]: I1123 16:46:19.556068 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/57262ba8-76c9-4627-a4f9-ba993e7e8ff8-inventory\") pod \"57262ba8-76c9-4627-a4f9-ba993e7e8ff8\" (UID: \"57262ba8-76c9-4627-a4f9-ba993e7e8ff8\") " Nov 23 16:46:19 crc kubenswrapper[5050]: I1123 16:46:19.556519 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6rbvj\" (UniqueName: \"kubernetes.io/projected/57262ba8-76c9-4627-a4f9-ba993e7e8ff8-kube-api-access-6rbvj\") pod \"57262ba8-76c9-4627-a4f9-ba993e7e8ff8\" (UID: \"57262ba8-76c9-4627-a4f9-ba993e7e8ff8\") " Nov 23 16:46:19 crc kubenswrapper[5050]: I1123 16:46:19.556581 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/57262ba8-76c9-4627-a4f9-ba993e7e8ff8-ceph\") pod \"57262ba8-76c9-4627-a4f9-ba993e7e8ff8\" (UID: \"57262ba8-76c9-4627-a4f9-ba993e7e8ff8\") " Nov 23 16:46:19 crc kubenswrapper[5050]: I1123 16:46:19.556635 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/57262ba8-76c9-4627-a4f9-ba993e7e8ff8-ssh-key\") pod \"57262ba8-76c9-4627-a4f9-ba993e7e8ff8\" (UID: \"57262ba8-76c9-4627-a4f9-ba993e7e8ff8\") " Nov 23 16:46:19 crc kubenswrapper[5050]: I1123 16:46:19.564971 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57262ba8-76c9-4627-a4f9-ba993e7e8ff8-kube-api-access-6rbvj" (OuterVolumeSpecName: "kube-api-access-6rbvj") pod "57262ba8-76c9-4627-a4f9-ba993e7e8ff8" (UID: "57262ba8-76c9-4627-a4f9-ba993e7e8ff8"). InnerVolumeSpecName "kube-api-access-6rbvj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:46:19 crc kubenswrapper[5050]: I1123 16:46:19.565021 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57262ba8-76c9-4627-a4f9-ba993e7e8ff8-ceph" (OuterVolumeSpecName: "ceph") pod "57262ba8-76c9-4627-a4f9-ba993e7e8ff8" (UID: "57262ba8-76c9-4627-a4f9-ba993e7e8ff8"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:46:19 crc kubenswrapper[5050]: I1123 16:46:19.594244 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57262ba8-76c9-4627-a4f9-ba993e7e8ff8-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "57262ba8-76c9-4627-a4f9-ba993e7e8ff8" (UID: "57262ba8-76c9-4627-a4f9-ba993e7e8ff8"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:46:19 crc kubenswrapper[5050]: I1123 16:46:19.625335 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57262ba8-76c9-4627-a4f9-ba993e7e8ff8-inventory" (OuterVolumeSpecName: "inventory") pod "57262ba8-76c9-4627-a4f9-ba993e7e8ff8" (UID: "57262ba8-76c9-4627-a4f9-ba993e7e8ff8"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:46:19 crc kubenswrapper[5050]: I1123 16:46:19.663089 5050 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/57262ba8-76c9-4627-a4f9-ba993e7e8ff8-inventory\") on node \"crc\" DevicePath \"\"" Nov 23 16:46:19 crc kubenswrapper[5050]: I1123 16:46:19.663139 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6rbvj\" (UniqueName: \"kubernetes.io/projected/57262ba8-76c9-4627-a4f9-ba993e7e8ff8-kube-api-access-6rbvj\") on node \"crc\" DevicePath \"\"" Nov 23 16:46:19 crc kubenswrapper[5050]: I1123 16:46:19.663158 5050 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/57262ba8-76c9-4627-a4f9-ba993e7e8ff8-ceph\") on node \"crc\" DevicePath \"\"" Nov 23 16:46:19 crc kubenswrapper[5050]: I1123 16:46:19.663172 5050 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/57262ba8-76c9-4627-a4f9-ba993e7e8ff8-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 23 16:46:19 crc kubenswrapper[5050]: I1123 16:46:19.964584 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-v8qw8" event={"ID":"57262ba8-76c9-4627-a4f9-ba993e7e8ff8","Type":"ContainerDied","Data":"2c889d191d1c2e3c6307c656f4808e5400003e496444db905fc0d81e48e72797"} Nov 23 16:46:19 crc kubenswrapper[5050]: I1123 16:46:19.964647 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2c889d191d1c2e3c6307c656f4808e5400003e496444db905fc0d81e48e72797" Nov 23 16:46:19 crc kubenswrapper[5050]: I1123 16:46:19.964753 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-v8qw8" Nov 23 16:46:20 crc kubenswrapper[5050]: I1123 16:46:20.065977 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-openstack-openstack-cell1-tcjmm"] Nov 23 16:46:20 crc kubenswrapper[5050]: E1123 16:46:20.066892 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57262ba8-76c9-4627-a4f9-ba993e7e8ff8" containerName="validate-network-openstack-openstack-cell1" Nov 23 16:46:20 crc kubenswrapper[5050]: I1123 16:46:20.066914 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="57262ba8-76c9-4627-a4f9-ba993e7e8ff8" containerName="validate-network-openstack-openstack-cell1" Nov 23 16:46:20 crc kubenswrapper[5050]: I1123 16:46:20.067233 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="57262ba8-76c9-4627-a4f9-ba993e7e8ff8" containerName="validate-network-openstack-openstack-cell1" Nov 23 16:46:20 crc kubenswrapper[5050]: I1123 16:46:20.068387 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-tcjmm" Nov 23 16:46:20 crc kubenswrapper[5050]: I1123 16:46:20.072639 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-ckrpf" Nov 23 16:46:20 crc kubenswrapper[5050]: I1123 16:46:20.072999 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 23 16:46:20 crc kubenswrapper[5050]: I1123 16:46:20.073057 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 23 16:46:20 crc kubenswrapper[5050]: I1123 16:46:20.073310 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 23 16:46:20 crc kubenswrapper[5050]: I1123 16:46:20.075765 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-openstack-openstack-cell1-tcjmm"] Nov 23 16:46:20 crc kubenswrapper[5050]: I1123 16:46:20.178858 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62hhf\" (UniqueName: \"kubernetes.io/projected/e6f245e8-504a-41a3-9d7d-2abe26a645ba-kube-api-access-62hhf\") pod \"install-os-openstack-openstack-cell1-tcjmm\" (UID: \"e6f245e8-504a-41a3-9d7d-2abe26a645ba\") " pod="openstack/install-os-openstack-openstack-cell1-tcjmm" Nov 23 16:46:20 crc kubenswrapper[5050]: I1123 16:46:20.179299 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e6f245e8-504a-41a3-9d7d-2abe26a645ba-ceph\") pod \"install-os-openstack-openstack-cell1-tcjmm\" (UID: \"e6f245e8-504a-41a3-9d7d-2abe26a645ba\") " pod="openstack/install-os-openstack-openstack-cell1-tcjmm" Nov 23 16:46:20 crc kubenswrapper[5050]: I1123 16:46:20.179746 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e6f245e8-504a-41a3-9d7d-2abe26a645ba-inventory\") pod \"install-os-openstack-openstack-cell1-tcjmm\" (UID: \"e6f245e8-504a-41a3-9d7d-2abe26a645ba\") " pod="openstack/install-os-openstack-openstack-cell1-tcjmm" Nov 23 16:46:20 crc kubenswrapper[5050]: I1123 16:46:20.179910 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e6f245e8-504a-41a3-9d7d-2abe26a645ba-ssh-key\") pod \"install-os-openstack-openstack-cell1-tcjmm\" (UID: \"e6f245e8-504a-41a3-9d7d-2abe26a645ba\") " pod="openstack/install-os-openstack-openstack-cell1-tcjmm" Nov 23 16:46:20 crc kubenswrapper[5050]: I1123 16:46:20.282753 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62hhf\" (UniqueName: \"kubernetes.io/projected/e6f245e8-504a-41a3-9d7d-2abe26a645ba-kube-api-access-62hhf\") pod \"install-os-openstack-openstack-cell1-tcjmm\" (UID: \"e6f245e8-504a-41a3-9d7d-2abe26a645ba\") " pod="openstack/install-os-openstack-openstack-cell1-tcjmm" Nov 23 16:46:20 crc kubenswrapper[5050]: I1123 16:46:20.282873 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e6f245e8-504a-41a3-9d7d-2abe26a645ba-ceph\") pod \"install-os-openstack-openstack-cell1-tcjmm\" (UID: \"e6f245e8-504a-41a3-9d7d-2abe26a645ba\") " pod="openstack/install-os-openstack-openstack-cell1-tcjmm" Nov 23 16:46:20 crc kubenswrapper[5050]: I1123 16:46:20.282958 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e6f245e8-504a-41a3-9d7d-2abe26a645ba-inventory\") pod \"install-os-openstack-openstack-cell1-tcjmm\" (UID: \"e6f245e8-504a-41a3-9d7d-2abe26a645ba\") " pod="openstack/install-os-openstack-openstack-cell1-tcjmm" Nov 23 16:46:20 crc kubenswrapper[5050]: I1123 16:46:20.282982 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e6f245e8-504a-41a3-9d7d-2abe26a645ba-ssh-key\") pod \"install-os-openstack-openstack-cell1-tcjmm\" (UID: \"e6f245e8-504a-41a3-9d7d-2abe26a645ba\") " pod="openstack/install-os-openstack-openstack-cell1-tcjmm" Nov 23 16:46:20 crc kubenswrapper[5050]: I1123 16:46:20.288025 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e6f245e8-504a-41a3-9d7d-2abe26a645ba-ssh-key\") pod \"install-os-openstack-openstack-cell1-tcjmm\" (UID: \"e6f245e8-504a-41a3-9d7d-2abe26a645ba\") " pod="openstack/install-os-openstack-openstack-cell1-tcjmm" Nov 23 16:46:20 crc kubenswrapper[5050]: I1123 16:46:20.288588 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e6f245e8-504a-41a3-9d7d-2abe26a645ba-inventory\") pod \"install-os-openstack-openstack-cell1-tcjmm\" (UID: \"e6f245e8-504a-41a3-9d7d-2abe26a645ba\") " pod="openstack/install-os-openstack-openstack-cell1-tcjmm" Nov 23 16:46:20 crc kubenswrapper[5050]: I1123 16:46:20.289488 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e6f245e8-504a-41a3-9d7d-2abe26a645ba-ceph\") pod \"install-os-openstack-openstack-cell1-tcjmm\" (UID: \"e6f245e8-504a-41a3-9d7d-2abe26a645ba\") " pod="openstack/install-os-openstack-openstack-cell1-tcjmm" Nov 23 16:46:20 crc kubenswrapper[5050]: I1123 16:46:20.303410 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62hhf\" (UniqueName: \"kubernetes.io/projected/e6f245e8-504a-41a3-9d7d-2abe26a645ba-kube-api-access-62hhf\") pod \"install-os-openstack-openstack-cell1-tcjmm\" (UID: \"e6f245e8-504a-41a3-9d7d-2abe26a645ba\") " pod="openstack/install-os-openstack-openstack-cell1-tcjmm" Nov 23 16:46:20 crc kubenswrapper[5050]: I1123 16:46:20.415108 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-tcjmm" Nov 23 16:46:21 crc kubenswrapper[5050]: I1123 16:46:21.098021 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-openstack-openstack-cell1-tcjmm"] Nov 23 16:46:21 crc kubenswrapper[5050]: W1123 16:46:21.111646 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode6f245e8_504a_41a3_9d7d_2abe26a645ba.slice/crio-997337ea7f0b3f8d70b701554dcaac4462a6bf8f83f04a01357b28a66657b78f WatchSource:0}: Error finding container 997337ea7f0b3f8d70b701554dcaac4462a6bf8f83f04a01357b28a66657b78f: Status 404 returned error can't find the container with id 997337ea7f0b3f8d70b701554dcaac4462a6bf8f83f04a01357b28a66657b78f Nov 23 16:46:21 crc kubenswrapper[5050]: I1123 16:46:21.993334 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-tcjmm" event={"ID":"e6f245e8-504a-41a3-9d7d-2abe26a645ba","Type":"ContainerStarted","Data":"b65f801fad04c1b5b0f949b98629bf22924d1c1bfeaf30aeae8f22668c776482"} Nov 23 16:46:21 crc kubenswrapper[5050]: I1123 16:46:21.993764 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-tcjmm" event={"ID":"e6f245e8-504a-41a3-9d7d-2abe26a645ba","Type":"ContainerStarted","Data":"997337ea7f0b3f8d70b701554dcaac4462a6bf8f83f04a01357b28a66657b78f"} Nov 23 16:46:22 crc kubenswrapper[5050]: I1123 16:46:22.037342 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-openstack-openstack-cell1-tcjmm" podStartSLOduration=1.561495039 podStartE2EDuration="2.037309288s" podCreationTimestamp="2025-11-23 16:46:20 +0000 UTC" firstStartedPulling="2025-11-23 16:46:21.114612218 +0000 UTC m=+7476.281608703" lastFinishedPulling="2025-11-23 16:46:21.590426467 +0000 UTC m=+7476.757422952" observedRunningTime="2025-11-23 16:46:22.016472659 +0000 UTC m=+7477.183469184" watchObservedRunningTime="2025-11-23 16:46:22.037309288 +0000 UTC m=+7477.204305813" Nov 23 16:47:08 crc kubenswrapper[5050]: I1123 16:47:08.659053 5050 generic.go:334] "Generic (PLEG): container finished" podID="e6f245e8-504a-41a3-9d7d-2abe26a645ba" containerID="b65f801fad04c1b5b0f949b98629bf22924d1c1bfeaf30aeae8f22668c776482" exitCode=0 Nov 23 16:47:08 crc kubenswrapper[5050]: I1123 16:47:08.659171 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-tcjmm" event={"ID":"e6f245e8-504a-41a3-9d7d-2abe26a645ba","Type":"ContainerDied","Data":"b65f801fad04c1b5b0f949b98629bf22924d1c1bfeaf30aeae8f22668c776482"} Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.248483 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-tcjmm" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.341622 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e6f245e8-504a-41a3-9d7d-2abe26a645ba-inventory\") pod \"e6f245e8-504a-41a3-9d7d-2abe26a645ba\" (UID: \"e6f245e8-504a-41a3-9d7d-2abe26a645ba\") " Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.341698 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e6f245e8-504a-41a3-9d7d-2abe26a645ba-ssh-key\") pod \"e6f245e8-504a-41a3-9d7d-2abe26a645ba\" (UID: \"e6f245e8-504a-41a3-9d7d-2abe26a645ba\") " Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.341743 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e6f245e8-504a-41a3-9d7d-2abe26a645ba-ceph\") pod \"e6f245e8-504a-41a3-9d7d-2abe26a645ba\" (UID: \"e6f245e8-504a-41a3-9d7d-2abe26a645ba\") " Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.341818 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-62hhf\" (UniqueName: \"kubernetes.io/projected/e6f245e8-504a-41a3-9d7d-2abe26a645ba-kube-api-access-62hhf\") pod \"e6f245e8-504a-41a3-9d7d-2abe26a645ba\" (UID: \"e6f245e8-504a-41a3-9d7d-2abe26a645ba\") " Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.354787 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6f245e8-504a-41a3-9d7d-2abe26a645ba-kube-api-access-62hhf" (OuterVolumeSpecName: "kube-api-access-62hhf") pod "e6f245e8-504a-41a3-9d7d-2abe26a645ba" (UID: "e6f245e8-504a-41a3-9d7d-2abe26a645ba"). InnerVolumeSpecName "kube-api-access-62hhf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.363819 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6f245e8-504a-41a3-9d7d-2abe26a645ba-ceph" (OuterVolumeSpecName: "ceph") pod "e6f245e8-504a-41a3-9d7d-2abe26a645ba" (UID: "e6f245e8-504a-41a3-9d7d-2abe26a645ba"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.389980 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6f245e8-504a-41a3-9d7d-2abe26a645ba-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e6f245e8-504a-41a3-9d7d-2abe26a645ba" (UID: "e6f245e8-504a-41a3-9d7d-2abe26a645ba"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.393164 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6f245e8-504a-41a3-9d7d-2abe26a645ba-inventory" (OuterVolumeSpecName: "inventory") pod "e6f245e8-504a-41a3-9d7d-2abe26a645ba" (UID: "e6f245e8-504a-41a3-9d7d-2abe26a645ba"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.447749 5050 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e6f245e8-504a-41a3-9d7d-2abe26a645ba-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.447782 5050 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e6f245e8-504a-41a3-9d7d-2abe26a645ba-ceph\") on node \"crc\" DevicePath \"\"" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.447793 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-62hhf\" (UniqueName: \"kubernetes.io/projected/e6f245e8-504a-41a3-9d7d-2abe26a645ba-kube-api-access-62hhf\") on node \"crc\" DevicePath \"\"" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.447804 5050 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e6f245e8-504a-41a3-9d7d-2abe26a645ba-inventory\") on node \"crc\" DevicePath \"\"" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.689361 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-tcjmm" event={"ID":"e6f245e8-504a-41a3-9d7d-2abe26a645ba","Type":"ContainerDied","Data":"997337ea7f0b3f8d70b701554dcaac4462a6bf8f83f04a01357b28a66657b78f"} Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.689949 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="997337ea7f0b3f8d70b701554dcaac4462a6bf8f83f04a01357b28a66657b78f" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.689558 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-tcjmm" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.794286 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-rzgx2"] Nov 23 16:47:10 crc kubenswrapper[5050]: E1123 16:47:10.795424 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6f245e8-504a-41a3-9d7d-2abe26a645ba" containerName="install-os-openstack-openstack-cell1" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.795553 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6f245e8-504a-41a3-9d7d-2abe26a645ba" containerName="install-os-openstack-openstack-cell1" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.796005 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6f245e8-504a-41a3-9d7d-2abe26a645ba" containerName="install-os-openstack-openstack-cell1" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.797354 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-rzgx2" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.801667 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.801804 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.801877 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-ckrpf" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.801936 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.824091 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-rzgx2"] Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.865097 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wlfmj\" (UniqueName: \"kubernetes.io/projected/f78c1a10-b5dc-40c8-8a62-f8e3955645cc-kube-api-access-wlfmj\") pod \"configure-os-openstack-openstack-cell1-rzgx2\" (UID: \"f78c1a10-b5dc-40c8-8a62-f8e3955645cc\") " pod="openstack/configure-os-openstack-openstack-cell1-rzgx2" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.865173 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f78c1a10-b5dc-40c8-8a62-f8e3955645cc-inventory\") pod \"configure-os-openstack-openstack-cell1-rzgx2\" (UID: \"f78c1a10-b5dc-40c8-8a62-f8e3955645cc\") " pod="openstack/configure-os-openstack-openstack-cell1-rzgx2" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.865499 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f78c1a10-b5dc-40c8-8a62-f8e3955645cc-ssh-key\") pod \"configure-os-openstack-openstack-cell1-rzgx2\" (UID: \"f78c1a10-b5dc-40c8-8a62-f8e3955645cc\") " pod="openstack/configure-os-openstack-openstack-cell1-rzgx2" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.865580 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f78c1a10-b5dc-40c8-8a62-f8e3955645cc-ceph\") pod \"configure-os-openstack-openstack-cell1-rzgx2\" (UID: \"f78c1a10-b5dc-40c8-8a62-f8e3955645cc\") " pod="openstack/configure-os-openstack-openstack-cell1-rzgx2" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.967068 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wlfmj\" (UniqueName: \"kubernetes.io/projected/f78c1a10-b5dc-40c8-8a62-f8e3955645cc-kube-api-access-wlfmj\") pod \"configure-os-openstack-openstack-cell1-rzgx2\" (UID: \"f78c1a10-b5dc-40c8-8a62-f8e3955645cc\") " pod="openstack/configure-os-openstack-openstack-cell1-rzgx2" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.967119 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f78c1a10-b5dc-40c8-8a62-f8e3955645cc-inventory\") pod \"configure-os-openstack-openstack-cell1-rzgx2\" (UID: \"f78c1a10-b5dc-40c8-8a62-f8e3955645cc\") " pod="openstack/configure-os-openstack-openstack-cell1-rzgx2" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.967252 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f78c1a10-b5dc-40c8-8a62-f8e3955645cc-ssh-key\") pod \"configure-os-openstack-openstack-cell1-rzgx2\" (UID: \"f78c1a10-b5dc-40c8-8a62-f8e3955645cc\") " pod="openstack/configure-os-openstack-openstack-cell1-rzgx2" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.967301 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f78c1a10-b5dc-40c8-8a62-f8e3955645cc-ceph\") pod \"configure-os-openstack-openstack-cell1-rzgx2\" (UID: \"f78c1a10-b5dc-40c8-8a62-f8e3955645cc\") " pod="openstack/configure-os-openstack-openstack-cell1-rzgx2" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.974209 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f78c1a10-b5dc-40c8-8a62-f8e3955645cc-inventory\") pod \"configure-os-openstack-openstack-cell1-rzgx2\" (UID: \"f78c1a10-b5dc-40c8-8a62-f8e3955645cc\") " pod="openstack/configure-os-openstack-openstack-cell1-rzgx2" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.974583 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f78c1a10-b5dc-40c8-8a62-f8e3955645cc-ceph\") pod \"configure-os-openstack-openstack-cell1-rzgx2\" (UID: \"f78c1a10-b5dc-40c8-8a62-f8e3955645cc\") " pod="openstack/configure-os-openstack-openstack-cell1-rzgx2" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.977073 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f78c1a10-b5dc-40c8-8a62-f8e3955645cc-ssh-key\") pod \"configure-os-openstack-openstack-cell1-rzgx2\" (UID: \"f78c1a10-b5dc-40c8-8a62-f8e3955645cc\") " pod="openstack/configure-os-openstack-openstack-cell1-rzgx2" Nov 23 16:47:10 crc kubenswrapper[5050]: I1123 16:47:10.996361 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wlfmj\" (UniqueName: \"kubernetes.io/projected/f78c1a10-b5dc-40c8-8a62-f8e3955645cc-kube-api-access-wlfmj\") pod \"configure-os-openstack-openstack-cell1-rzgx2\" (UID: \"f78c1a10-b5dc-40c8-8a62-f8e3955645cc\") " pod="openstack/configure-os-openstack-openstack-cell1-rzgx2" Nov 23 16:47:11 crc kubenswrapper[5050]: I1123 16:47:11.129062 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-rzgx2" Nov 23 16:47:11 crc kubenswrapper[5050]: I1123 16:47:11.804455 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-rzgx2"] Nov 23 16:47:12 crc kubenswrapper[5050]: I1123 16:47:12.722950 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-rzgx2" event={"ID":"f78c1a10-b5dc-40c8-8a62-f8e3955645cc","Type":"ContainerStarted","Data":"1477bc262c2dee95e058ae578fe41140fc427fe808225d95b2f52384efa21c24"} Nov 23 16:47:12 crc kubenswrapper[5050]: I1123 16:47:12.723650 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-rzgx2" event={"ID":"f78c1a10-b5dc-40c8-8a62-f8e3955645cc","Type":"ContainerStarted","Data":"9707a5c8669e079c22c12d8d2cd6d0050ffc196abe295073f407d1cc49db7a20"} Nov 23 16:47:12 crc kubenswrapper[5050]: I1123 16:47:12.770789 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-openstack-openstack-cell1-rzgx2" podStartSLOduration=2.217195645 podStartE2EDuration="2.770758954s" podCreationTimestamp="2025-11-23 16:47:10 +0000 UTC" firstStartedPulling="2025-11-23 16:47:11.81930362 +0000 UTC m=+7526.986300105" lastFinishedPulling="2025-11-23 16:47:12.372866889 +0000 UTC m=+7527.539863414" observedRunningTime="2025-11-23 16:47:12.754895396 +0000 UTC m=+7527.921891901" watchObservedRunningTime="2025-11-23 16:47:12.770758954 +0000 UTC m=+7527.937755449" Nov 23 16:47:58 crc kubenswrapper[5050]: I1123 16:47:58.344137 5050 generic.go:334] "Generic (PLEG): container finished" podID="f78c1a10-b5dc-40c8-8a62-f8e3955645cc" containerID="1477bc262c2dee95e058ae578fe41140fc427fe808225d95b2f52384efa21c24" exitCode=0 Nov 23 16:47:58 crc kubenswrapper[5050]: I1123 16:47:58.344240 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-rzgx2" event={"ID":"f78c1a10-b5dc-40c8-8a62-f8e3955645cc","Type":"ContainerDied","Data":"1477bc262c2dee95e058ae578fe41140fc427fe808225d95b2f52384efa21c24"} Nov 23 16:47:59 crc kubenswrapper[5050]: I1123 16:47:59.224900 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:47:59 crc kubenswrapper[5050]: I1123 16:47:59.225607 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:47:59 crc kubenswrapper[5050]: I1123 16:47:59.982859 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-rzgx2" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.029958 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f78c1a10-b5dc-40c8-8a62-f8e3955645cc-ceph\") pod \"f78c1a10-b5dc-40c8-8a62-f8e3955645cc\" (UID: \"f78c1a10-b5dc-40c8-8a62-f8e3955645cc\") " Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.030136 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f78c1a10-b5dc-40c8-8a62-f8e3955645cc-ssh-key\") pod \"f78c1a10-b5dc-40c8-8a62-f8e3955645cc\" (UID: \"f78c1a10-b5dc-40c8-8a62-f8e3955645cc\") " Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.030198 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wlfmj\" (UniqueName: \"kubernetes.io/projected/f78c1a10-b5dc-40c8-8a62-f8e3955645cc-kube-api-access-wlfmj\") pod \"f78c1a10-b5dc-40c8-8a62-f8e3955645cc\" (UID: \"f78c1a10-b5dc-40c8-8a62-f8e3955645cc\") " Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.030247 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f78c1a10-b5dc-40c8-8a62-f8e3955645cc-inventory\") pod \"f78c1a10-b5dc-40c8-8a62-f8e3955645cc\" (UID: \"f78c1a10-b5dc-40c8-8a62-f8e3955645cc\") " Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.040061 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f78c1a10-b5dc-40c8-8a62-f8e3955645cc-ceph" (OuterVolumeSpecName: "ceph") pod "f78c1a10-b5dc-40c8-8a62-f8e3955645cc" (UID: "f78c1a10-b5dc-40c8-8a62-f8e3955645cc"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.040834 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f78c1a10-b5dc-40c8-8a62-f8e3955645cc-kube-api-access-wlfmj" (OuterVolumeSpecName: "kube-api-access-wlfmj") pod "f78c1a10-b5dc-40c8-8a62-f8e3955645cc" (UID: "f78c1a10-b5dc-40c8-8a62-f8e3955645cc"). InnerVolumeSpecName "kube-api-access-wlfmj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.070754 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f78c1a10-b5dc-40c8-8a62-f8e3955645cc-inventory" (OuterVolumeSpecName: "inventory") pod "f78c1a10-b5dc-40c8-8a62-f8e3955645cc" (UID: "f78c1a10-b5dc-40c8-8a62-f8e3955645cc"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.076128 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f78c1a10-b5dc-40c8-8a62-f8e3955645cc-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "f78c1a10-b5dc-40c8-8a62-f8e3955645cc" (UID: "f78c1a10-b5dc-40c8-8a62-f8e3955645cc"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.133693 5050 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f78c1a10-b5dc-40c8-8a62-f8e3955645cc-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.134032 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wlfmj\" (UniqueName: \"kubernetes.io/projected/f78c1a10-b5dc-40c8-8a62-f8e3955645cc-kube-api-access-wlfmj\") on node \"crc\" DevicePath \"\"" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.134161 5050 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f78c1a10-b5dc-40c8-8a62-f8e3955645cc-inventory\") on node \"crc\" DevicePath \"\"" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.134260 5050 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f78c1a10-b5dc-40c8-8a62-f8e3955645cc-ceph\") on node \"crc\" DevicePath \"\"" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.373967 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-rzgx2" event={"ID":"f78c1a10-b5dc-40c8-8a62-f8e3955645cc","Type":"ContainerDied","Data":"9707a5c8669e079c22c12d8d2cd6d0050ffc196abe295073f407d1cc49db7a20"} Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.374023 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9707a5c8669e079c22c12d8d2cd6d0050ffc196abe295073f407d1cc49db7a20" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.374055 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-rzgx2" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.474416 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-openstack-s6dvt"] Nov 23 16:48:00 crc kubenswrapper[5050]: E1123 16:48:00.474954 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f78c1a10-b5dc-40c8-8a62-f8e3955645cc" containerName="configure-os-openstack-openstack-cell1" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.474969 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f78c1a10-b5dc-40c8-8a62-f8e3955645cc" containerName="configure-os-openstack-openstack-cell1" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.475194 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="f78c1a10-b5dc-40c8-8a62-f8e3955645cc" containerName="configure-os-openstack-openstack-cell1" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.476030 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-s6dvt" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.478528 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.479023 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.479156 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-ckrpf" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.479322 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.491727 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-openstack-s6dvt"] Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.545533 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/8beee0e1-ccc1-48f2-83ae-a59422187fac-ssh-key-openstack-cell1\") pod \"ssh-known-hosts-openstack-s6dvt\" (UID: \"8beee0e1-ccc1-48f2-83ae-a59422187fac\") " pod="openstack/ssh-known-hosts-openstack-s6dvt" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.545631 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8beee0e1-ccc1-48f2-83ae-a59422187fac-ceph\") pod \"ssh-known-hosts-openstack-s6dvt\" (UID: \"8beee0e1-ccc1-48f2-83ae-a59422187fac\") " pod="openstack/ssh-known-hosts-openstack-s6dvt" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.546704 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/8beee0e1-ccc1-48f2-83ae-a59422187fac-inventory-0\") pod \"ssh-known-hosts-openstack-s6dvt\" (UID: \"8beee0e1-ccc1-48f2-83ae-a59422187fac\") " pod="openstack/ssh-known-hosts-openstack-s6dvt" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.546835 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rb4ps\" (UniqueName: \"kubernetes.io/projected/8beee0e1-ccc1-48f2-83ae-a59422187fac-kube-api-access-rb4ps\") pod \"ssh-known-hosts-openstack-s6dvt\" (UID: \"8beee0e1-ccc1-48f2-83ae-a59422187fac\") " pod="openstack/ssh-known-hosts-openstack-s6dvt" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.650081 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/8beee0e1-ccc1-48f2-83ae-a59422187fac-inventory-0\") pod \"ssh-known-hosts-openstack-s6dvt\" (UID: \"8beee0e1-ccc1-48f2-83ae-a59422187fac\") " pod="openstack/ssh-known-hosts-openstack-s6dvt" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.651204 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rb4ps\" (UniqueName: \"kubernetes.io/projected/8beee0e1-ccc1-48f2-83ae-a59422187fac-kube-api-access-rb4ps\") pod \"ssh-known-hosts-openstack-s6dvt\" (UID: \"8beee0e1-ccc1-48f2-83ae-a59422187fac\") " pod="openstack/ssh-known-hosts-openstack-s6dvt" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.651272 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/8beee0e1-ccc1-48f2-83ae-a59422187fac-ssh-key-openstack-cell1\") pod \"ssh-known-hosts-openstack-s6dvt\" (UID: \"8beee0e1-ccc1-48f2-83ae-a59422187fac\") " pod="openstack/ssh-known-hosts-openstack-s6dvt" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.651338 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8beee0e1-ccc1-48f2-83ae-a59422187fac-ceph\") pod \"ssh-known-hosts-openstack-s6dvt\" (UID: \"8beee0e1-ccc1-48f2-83ae-a59422187fac\") " pod="openstack/ssh-known-hosts-openstack-s6dvt" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.657585 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/8beee0e1-ccc1-48f2-83ae-a59422187fac-inventory-0\") pod \"ssh-known-hosts-openstack-s6dvt\" (UID: \"8beee0e1-ccc1-48f2-83ae-a59422187fac\") " pod="openstack/ssh-known-hosts-openstack-s6dvt" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.664464 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/8beee0e1-ccc1-48f2-83ae-a59422187fac-ssh-key-openstack-cell1\") pod \"ssh-known-hosts-openstack-s6dvt\" (UID: \"8beee0e1-ccc1-48f2-83ae-a59422187fac\") " pod="openstack/ssh-known-hosts-openstack-s6dvt" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.667475 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8beee0e1-ccc1-48f2-83ae-a59422187fac-ceph\") pod \"ssh-known-hosts-openstack-s6dvt\" (UID: \"8beee0e1-ccc1-48f2-83ae-a59422187fac\") " pod="openstack/ssh-known-hosts-openstack-s6dvt" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.669630 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rb4ps\" (UniqueName: \"kubernetes.io/projected/8beee0e1-ccc1-48f2-83ae-a59422187fac-kube-api-access-rb4ps\") pod \"ssh-known-hosts-openstack-s6dvt\" (UID: \"8beee0e1-ccc1-48f2-83ae-a59422187fac\") " pod="openstack/ssh-known-hosts-openstack-s6dvt" Nov 23 16:48:00 crc kubenswrapper[5050]: I1123 16:48:00.850221 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-s6dvt" Nov 23 16:48:01 crc kubenswrapper[5050]: I1123 16:48:01.474236 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-openstack-s6dvt"] Nov 23 16:48:02 crc kubenswrapper[5050]: I1123 16:48:02.439613 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-s6dvt" event={"ID":"8beee0e1-ccc1-48f2-83ae-a59422187fac","Type":"ContainerStarted","Data":"71338370febb88d4eb69377debf0f5a933db80bfe837bb82f45dfe4b479dea66"} Nov 23 16:48:02 crc kubenswrapper[5050]: I1123 16:48:02.440147 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-s6dvt" event={"ID":"8beee0e1-ccc1-48f2-83ae-a59422187fac","Type":"ContainerStarted","Data":"f10f47d6a8871aa6ea0199b741f27fa87e2972a55953cbc045f440928870774b"} Nov 23 16:48:02 crc kubenswrapper[5050]: I1123 16:48:02.467153 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-openstack-s6dvt" podStartSLOduration=2.040280229 podStartE2EDuration="2.467125083s" podCreationTimestamp="2025-11-23 16:48:00 +0000 UTC" firstStartedPulling="2025-11-23 16:48:01.490935299 +0000 UTC m=+7576.657931794" lastFinishedPulling="2025-11-23 16:48:01.917780153 +0000 UTC m=+7577.084776648" observedRunningTime="2025-11-23 16:48:02.462817731 +0000 UTC m=+7577.629814226" watchObservedRunningTime="2025-11-23 16:48:02.467125083 +0000 UTC m=+7577.634121568" Nov 23 16:48:11 crc kubenswrapper[5050]: I1123 16:48:11.563517 5050 generic.go:334] "Generic (PLEG): container finished" podID="8beee0e1-ccc1-48f2-83ae-a59422187fac" containerID="71338370febb88d4eb69377debf0f5a933db80bfe837bb82f45dfe4b479dea66" exitCode=0 Nov 23 16:48:11 crc kubenswrapper[5050]: I1123 16:48:11.567587 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-s6dvt" event={"ID":"8beee0e1-ccc1-48f2-83ae-a59422187fac","Type":"ContainerDied","Data":"71338370febb88d4eb69377debf0f5a933db80bfe837bb82f45dfe4b479dea66"} Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.169813 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-s6dvt" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.304033 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/8beee0e1-ccc1-48f2-83ae-a59422187fac-inventory-0\") pod \"8beee0e1-ccc1-48f2-83ae-a59422187fac\" (UID: \"8beee0e1-ccc1-48f2-83ae-a59422187fac\") " Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.304285 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rb4ps\" (UniqueName: \"kubernetes.io/projected/8beee0e1-ccc1-48f2-83ae-a59422187fac-kube-api-access-rb4ps\") pod \"8beee0e1-ccc1-48f2-83ae-a59422187fac\" (UID: \"8beee0e1-ccc1-48f2-83ae-a59422187fac\") " Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.304349 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8beee0e1-ccc1-48f2-83ae-a59422187fac-ceph\") pod \"8beee0e1-ccc1-48f2-83ae-a59422187fac\" (UID: \"8beee0e1-ccc1-48f2-83ae-a59422187fac\") " Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.304422 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/8beee0e1-ccc1-48f2-83ae-a59422187fac-ssh-key-openstack-cell1\") pod \"8beee0e1-ccc1-48f2-83ae-a59422187fac\" (UID: \"8beee0e1-ccc1-48f2-83ae-a59422187fac\") " Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.326065 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8beee0e1-ccc1-48f2-83ae-a59422187fac-kube-api-access-rb4ps" (OuterVolumeSpecName: "kube-api-access-rb4ps") pod "8beee0e1-ccc1-48f2-83ae-a59422187fac" (UID: "8beee0e1-ccc1-48f2-83ae-a59422187fac"). InnerVolumeSpecName "kube-api-access-rb4ps". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.330646 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8beee0e1-ccc1-48f2-83ae-a59422187fac-ceph" (OuterVolumeSpecName: "ceph") pod "8beee0e1-ccc1-48f2-83ae-a59422187fac" (UID: "8beee0e1-ccc1-48f2-83ae-a59422187fac"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.368767 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8beee0e1-ccc1-48f2-83ae-a59422187fac-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "8beee0e1-ccc1-48f2-83ae-a59422187fac" (UID: "8beee0e1-ccc1-48f2-83ae-a59422187fac"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.377543 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8beee0e1-ccc1-48f2-83ae-a59422187fac-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "8beee0e1-ccc1-48f2-83ae-a59422187fac" (UID: "8beee0e1-ccc1-48f2-83ae-a59422187fac"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.411099 5050 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/8beee0e1-ccc1-48f2-83ae-a59422187fac-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.411135 5050 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/8beee0e1-ccc1-48f2-83ae-a59422187fac-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.411153 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rb4ps\" (UniqueName: \"kubernetes.io/projected/8beee0e1-ccc1-48f2-83ae-a59422187fac-kube-api-access-rb4ps\") on node \"crc\" DevicePath \"\"" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.411167 5050 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8beee0e1-ccc1-48f2-83ae-a59422187fac-ceph\") on node \"crc\" DevicePath \"\"" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.589391 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-s6dvt" event={"ID":"8beee0e1-ccc1-48f2-83ae-a59422187fac","Type":"ContainerDied","Data":"f10f47d6a8871aa6ea0199b741f27fa87e2972a55953cbc045f440928870774b"} Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.589460 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f10f47d6a8871aa6ea0199b741f27fa87e2972a55953cbc045f440928870774b" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.589483 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-s6dvt" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.682765 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-openstack-openstack-cell1-mx2l6"] Nov 23 16:48:13 crc kubenswrapper[5050]: E1123 16:48:13.683269 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8beee0e1-ccc1-48f2-83ae-a59422187fac" containerName="ssh-known-hosts-openstack" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.683289 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="8beee0e1-ccc1-48f2-83ae-a59422187fac" containerName="ssh-known-hosts-openstack" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.683567 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="8beee0e1-ccc1-48f2-83ae-a59422187fac" containerName="ssh-known-hosts-openstack" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.684492 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-mx2l6" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.687558 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.687827 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.689901 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-ckrpf" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.698789 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.700360 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-openstack-openstack-cell1-mx2l6"] Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.821970 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-25vtx\" (UniqueName: \"kubernetes.io/projected/12cb88f4-57df-468f-b59a-5615d496ee52-kube-api-access-25vtx\") pod \"run-os-openstack-openstack-cell1-mx2l6\" (UID: \"12cb88f4-57df-468f-b59a-5615d496ee52\") " pod="openstack/run-os-openstack-openstack-cell1-mx2l6" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.822711 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/12cb88f4-57df-468f-b59a-5615d496ee52-ssh-key\") pod \"run-os-openstack-openstack-cell1-mx2l6\" (UID: \"12cb88f4-57df-468f-b59a-5615d496ee52\") " pod="openstack/run-os-openstack-openstack-cell1-mx2l6" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.822743 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/12cb88f4-57df-468f-b59a-5615d496ee52-inventory\") pod \"run-os-openstack-openstack-cell1-mx2l6\" (UID: \"12cb88f4-57df-468f-b59a-5615d496ee52\") " pod="openstack/run-os-openstack-openstack-cell1-mx2l6" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.822773 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/12cb88f4-57df-468f-b59a-5615d496ee52-ceph\") pod \"run-os-openstack-openstack-cell1-mx2l6\" (UID: \"12cb88f4-57df-468f-b59a-5615d496ee52\") " pod="openstack/run-os-openstack-openstack-cell1-mx2l6" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.857826 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-hmn6v"] Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.860596 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hmn6v" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.872871 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hmn6v"] Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.924504 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/12cb88f4-57df-468f-b59a-5615d496ee52-ssh-key\") pod \"run-os-openstack-openstack-cell1-mx2l6\" (UID: \"12cb88f4-57df-468f-b59a-5615d496ee52\") " pod="openstack/run-os-openstack-openstack-cell1-mx2l6" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.924546 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/12cb88f4-57df-468f-b59a-5615d496ee52-inventory\") pod \"run-os-openstack-openstack-cell1-mx2l6\" (UID: \"12cb88f4-57df-468f-b59a-5615d496ee52\") " pod="openstack/run-os-openstack-openstack-cell1-mx2l6" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.924581 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/12cb88f4-57df-468f-b59a-5615d496ee52-ceph\") pod \"run-os-openstack-openstack-cell1-mx2l6\" (UID: \"12cb88f4-57df-468f-b59a-5615d496ee52\") " pod="openstack/run-os-openstack-openstack-cell1-mx2l6" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.924645 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-25vtx\" (UniqueName: \"kubernetes.io/projected/12cb88f4-57df-468f-b59a-5615d496ee52-kube-api-access-25vtx\") pod \"run-os-openstack-openstack-cell1-mx2l6\" (UID: \"12cb88f4-57df-468f-b59a-5615d496ee52\") " pod="openstack/run-os-openstack-openstack-cell1-mx2l6" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.929327 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/12cb88f4-57df-468f-b59a-5615d496ee52-ceph\") pod \"run-os-openstack-openstack-cell1-mx2l6\" (UID: \"12cb88f4-57df-468f-b59a-5615d496ee52\") " pod="openstack/run-os-openstack-openstack-cell1-mx2l6" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.929774 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/12cb88f4-57df-468f-b59a-5615d496ee52-ssh-key\") pod \"run-os-openstack-openstack-cell1-mx2l6\" (UID: \"12cb88f4-57df-468f-b59a-5615d496ee52\") " pod="openstack/run-os-openstack-openstack-cell1-mx2l6" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.931948 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/12cb88f4-57df-468f-b59a-5615d496ee52-inventory\") pod \"run-os-openstack-openstack-cell1-mx2l6\" (UID: \"12cb88f4-57df-468f-b59a-5615d496ee52\") " pod="openstack/run-os-openstack-openstack-cell1-mx2l6" Nov 23 16:48:13 crc kubenswrapper[5050]: I1123 16:48:13.953596 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-25vtx\" (UniqueName: \"kubernetes.io/projected/12cb88f4-57df-468f-b59a-5615d496ee52-kube-api-access-25vtx\") pod \"run-os-openstack-openstack-cell1-mx2l6\" (UID: \"12cb88f4-57df-468f-b59a-5615d496ee52\") " pod="openstack/run-os-openstack-openstack-cell1-mx2l6" Nov 23 16:48:14 crc kubenswrapper[5050]: I1123 16:48:14.011069 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-mx2l6" Nov 23 16:48:14 crc kubenswrapper[5050]: I1123 16:48:14.027050 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03963976-3954-4975-bbcc-0fa4cf995857-catalog-content\") pod \"redhat-marketplace-hmn6v\" (UID: \"03963976-3954-4975-bbcc-0fa4cf995857\") " pod="openshift-marketplace/redhat-marketplace-hmn6v" Nov 23 16:48:14 crc kubenswrapper[5050]: I1123 16:48:14.027320 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03963976-3954-4975-bbcc-0fa4cf995857-utilities\") pod \"redhat-marketplace-hmn6v\" (UID: \"03963976-3954-4975-bbcc-0fa4cf995857\") " pod="openshift-marketplace/redhat-marketplace-hmn6v" Nov 23 16:48:14 crc kubenswrapper[5050]: I1123 16:48:14.027991 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tgwx7\" (UniqueName: \"kubernetes.io/projected/03963976-3954-4975-bbcc-0fa4cf995857-kube-api-access-tgwx7\") pod \"redhat-marketplace-hmn6v\" (UID: \"03963976-3954-4975-bbcc-0fa4cf995857\") " pod="openshift-marketplace/redhat-marketplace-hmn6v" Nov 23 16:48:14 crc kubenswrapper[5050]: I1123 16:48:14.130618 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tgwx7\" (UniqueName: \"kubernetes.io/projected/03963976-3954-4975-bbcc-0fa4cf995857-kube-api-access-tgwx7\") pod \"redhat-marketplace-hmn6v\" (UID: \"03963976-3954-4975-bbcc-0fa4cf995857\") " pod="openshift-marketplace/redhat-marketplace-hmn6v" Nov 23 16:48:14 crc kubenswrapper[5050]: I1123 16:48:14.130760 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03963976-3954-4975-bbcc-0fa4cf995857-catalog-content\") pod \"redhat-marketplace-hmn6v\" (UID: \"03963976-3954-4975-bbcc-0fa4cf995857\") " pod="openshift-marketplace/redhat-marketplace-hmn6v" Nov 23 16:48:14 crc kubenswrapper[5050]: I1123 16:48:14.130822 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03963976-3954-4975-bbcc-0fa4cf995857-utilities\") pod \"redhat-marketplace-hmn6v\" (UID: \"03963976-3954-4975-bbcc-0fa4cf995857\") " pod="openshift-marketplace/redhat-marketplace-hmn6v" Nov 23 16:48:14 crc kubenswrapper[5050]: I1123 16:48:14.131358 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03963976-3954-4975-bbcc-0fa4cf995857-utilities\") pod \"redhat-marketplace-hmn6v\" (UID: \"03963976-3954-4975-bbcc-0fa4cf995857\") " pod="openshift-marketplace/redhat-marketplace-hmn6v" Nov 23 16:48:14 crc kubenswrapper[5050]: I1123 16:48:14.132036 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03963976-3954-4975-bbcc-0fa4cf995857-catalog-content\") pod \"redhat-marketplace-hmn6v\" (UID: \"03963976-3954-4975-bbcc-0fa4cf995857\") " pod="openshift-marketplace/redhat-marketplace-hmn6v" Nov 23 16:48:14 crc kubenswrapper[5050]: I1123 16:48:14.173594 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tgwx7\" (UniqueName: \"kubernetes.io/projected/03963976-3954-4975-bbcc-0fa4cf995857-kube-api-access-tgwx7\") pod \"redhat-marketplace-hmn6v\" (UID: \"03963976-3954-4975-bbcc-0fa4cf995857\") " pod="openshift-marketplace/redhat-marketplace-hmn6v" Nov 23 16:48:14 crc kubenswrapper[5050]: I1123 16:48:14.225277 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hmn6v" Nov 23 16:48:14 crc kubenswrapper[5050]: I1123 16:48:14.758969 5050 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 23 16:48:14 crc kubenswrapper[5050]: I1123 16:48:14.765195 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-openstack-openstack-cell1-mx2l6"] Nov 23 16:48:14 crc kubenswrapper[5050]: I1123 16:48:14.827543 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hmn6v"] Nov 23 16:48:14 crc kubenswrapper[5050]: W1123 16:48:14.831243 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod03963976_3954_4975_bbcc_0fa4cf995857.slice/crio-4508cf0acdcafaff79c012e67a67c11a12d0fc95a4b7e7e173da14f485520b77 WatchSource:0}: Error finding container 4508cf0acdcafaff79c012e67a67c11a12d0fc95a4b7e7e173da14f485520b77: Status 404 returned error can't find the container with id 4508cf0acdcafaff79c012e67a67c11a12d0fc95a4b7e7e173da14f485520b77 Nov 23 16:48:15 crc kubenswrapper[5050]: I1123 16:48:15.642165 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-mx2l6" event={"ID":"12cb88f4-57df-468f-b59a-5615d496ee52","Type":"ContainerStarted","Data":"3c65dd9def45241cbcea4cdd36d1b3d54d43fa6aacf40937b6b4962e633efa64"} Nov 23 16:48:15 crc kubenswrapper[5050]: I1123 16:48:15.645295 5050 generic.go:334] "Generic (PLEG): container finished" podID="03963976-3954-4975-bbcc-0fa4cf995857" containerID="68209911fdb876fcb7b629f6e6438aee854bab12199efcebad56180fe5e4b9e7" exitCode=0 Nov 23 16:48:15 crc kubenswrapper[5050]: I1123 16:48:15.645412 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hmn6v" event={"ID":"03963976-3954-4975-bbcc-0fa4cf995857","Type":"ContainerDied","Data":"68209911fdb876fcb7b629f6e6438aee854bab12199efcebad56180fe5e4b9e7"} Nov 23 16:48:15 crc kubenswrapper[5050]: I1123 16:48:15.645508 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hmn6v" event={"ID":"03963976-3954-4975-bbcc-0fa4cf995857","Type":"ContainerStarted","Data":"4508cf0acdcafaff79c012e67a67c11a12d0fc95a4b7e7e173da14f485520b77"} Nov 23 16:48:16 crc kubenswrapper[5050]: I1123 16:48:16.660075 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-mx2l6" event={"ID":"12cb88f4-57df-468f-b59a-5615d496ee52","Type":"ContainerStarted","Data":"8028da6516984682bf69d4228b2add88b26b5f86f61de4213b86826c89619109"} Nov 23 16:48:17 crc kubenswrapper[5050]: I1123 16:48:17.674223 5050 generic.go:334] "Generic (PLEG): container finished" podID="03963976-3954-4975-bbcc-0fa4cf995857" containerID="2a67f8d004df6b7a67fa9e20a6c6fe6b02b58be2132f5c43d3220e5e9c7de594" exitCode=0 Nov 23 16:48:17 crc kubenswrapper[5050]: I1123 16:48:17.674272 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hmn6v" event={"ID":"03963976-3954-4975-bbcc-0fa4cf995857","Type":"ContainerDied","Data":"2a67f8d004df6b7a67fa9e20a6c6fe6b02b58be2132f5c43d3220e5e9c7de594"} Nov 23 16:48:17 crc kubenswrapper[5050]: I1123 16:48:17.701598 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-openstack-openstack-cell1-mx2l6" podStartSLOduration=3.187947435 podStartE2EDuration="4.701549199s" podCreationTimestamp="2025-11-23 16:48:13 +0000 UTC" firstStartedPulling="2025-11-23 16:48:14.758655794 +0000 UTC m=+7589.925652279" lastFinishedPulling="2025-11-23 16:48:16.272257548 +0000 UTC m=+7591.439254043" observedRunningTime="2025-11-23 16:48:16.696228681 +0000 UTC m=+7591.863225176" watchObservedRunningTime="2025-11-23 16:48:17.701549199 +0000 UTC m=+7592.868545684" Nov 23 16:48:18 crc kubenswrapper[5050]: I1123 16:48:18.689677 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hmn6v" event={"ID":"03963976-3954-4975-bbcc-0fa4cf995857","Type":"ContainerStarted","Data":"8146e3ffad9e060cc7da4fab5de71488ad7ddd6e64b9ab41a41ba11019ab8798"} Nov 23 16:48:18 crc kubenswrapper[5050]: I1123 16:48:18.724423 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-hmn6v" podStartSLOduration=3.242124496 podStartE2EDuration="5.724396473s" podCreationTimestamp="2025-11-23 16:48:13 +0000 UTC" firstStartedPulling="2025-11-23 16:48:15.648227566 +0000 UTC m=+7590.815224091" lastFinishedPulling="2025-11-23 16:48:18.130499573 +0000 UTC m=+7593.297496068" observedRunningTime="2025-11-23 16:48:18.7229022 +0000 UTC m=+7593.889898685" watchObservedRunningTime="2025-11-23 16:48:18.724396473 +0000 UTC m=+7593.891392958" Nov 23 16:48:22 crc kubenswrapper[5050]: I1123 16:48:22.522536 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-nvc6n"] Nov 23 16:48:22 crc kubenswrapper[5050]: I1123 16:48:22.544051 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nvc6n" Nov 23 16:48:22 crc kubenswrapper[5050]: I1123 16:48:22.600289 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nvc6n"] Nov 23 16:48:22 crc kubenswrapper[5050]: I1123 16:48:22.683188 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6gm7f\" (UniqueName: \"kubernetes.io/projected/4403282d-04b1-4e06-826e-2c847a6d40ac-kube-api-access-6gm7f\") pod \"redhat-operators-nvc6n\" (UID: \"4403282d-04b1-4e06-826e-2c847a6d40ac\") " pod="openshift-marketplace/redhat-operators-nvc6n" Nov 23 16:48:22 crc kubenswrapper[5050]: I1123 16:48:22.683248 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4403282d-04b1-4e06-826e-2c847a6d40ac-catalog-content\") pod \"redhat-operators-nvc6n\" (UID: \"4403282d-04b1-4e06-826e-2c847a6d40ac\") " pod="openshift-marketplace/redhat-operators-nvc6n" Nov 23 16:48:22 crc kubenswrapper[5050]: I1123 16:48:22.683353 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4403282d-04b1-4e06-826e-2c847a6d40ac-utilities\") pod \"redhat-operators-nvc6n\" (UID: \"4403282d-04b1-4e06-826e-2c847a6d40ac\") " pod="openshift-marketplace/redhat-operators-nvc6n" Nov 23 16:48:22 crc kubenswrapper[5050]: I1123 16:48:22.785936 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6gm7f\" (UniqueName: \"kubernetes.io/projected/4403282d-04b1-4e06-826e-2c847a6d40ac-kube-api-access-6gm7f\") pod \"redhat-operators-nvc6n\" (UID: \"4403282d-04b1-4e06-826e-2c847a6d40ac\") " pod="openshift-marketplace/redhat-operators-nvc6n" Nov 23 16:48:22 crc kubenswrapper[5050]: I1123 16:48:22.786016 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4403282d-04b1-4e06-826e-2c847a6d40ac-catalog-content\") pod \"redhat-operators-nvc6n\" (UID: \"4403282d-04b1-4e06-826e-2c847a6d40ac\") " pod="openshift-marketplace/redhat-operators-nvc6n" Nov 23 16:48:22 crc kubenswrapper[5050]: I1123 16:48:22.786117 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4403282d-04b1-4e06-826e-2c847a6d40ac-utilities\") pod \"redhat-operators-nvc6n\" (UID: \"4403282d-04b1-4e06-826e-2c847a6d40ac\") " pod="openshift-marketplace/redhat-operators-nvc6n" Nov 23 16:48:22 crc kubenswrapper[5050]: I1123 16:48:22.786833 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4403282d-04b1-4e06-826e-2c847a6d40ac-catalog-content\") pod \"redhat-operators-nvc6n\" (UID: \"4403282d-04b1-4e06-826e-2c847a6d40ac\") " pod="openshift-marketplace/redhat-operators-nvc6n" Nov 23 16:48:22 crc kubenswrapper[5050]: I1123 16:48:22.786930 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4403282d-04b1-4e06-826e-2c847a6d40ac-utilities\") pod \"redhat-operators-nvc6n\" (UID: \"4403282d-04b1-4e06-826e-2c847a6d40ac\") " pod="openshift-marketplace/redhat-operators-nvc6n" Nov 23 16:48:22 crc kubenswrapper[5050]: I1123 16:48:22.811271 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6gm7f\" (UniqueName: \"kubernetes.io/projected/4403282d-04b1-4e06-826e-2c847a6d40ac-kube-api-access-6gm7f\") pod \"redhat-operators-nvc6n\" (UID: \"4403282d-04b1-4e06-826e-2c847a6d40ac\") " pod="openshift-marketplace/redhat-operators-nvc6n" Nov 23 16:48:22 crc kubenswrapper[5050]: I1123 16:48:22.904710 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nvc6n" Nov 23 16:48:23 crc kubenswrapper[5050]: I1123 16:48:23.470900 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nvc6n"] Nov 23 16:48:23 crc kubenswrapper[5050]: I1123 16:48:23.756272 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nvc6n" event={"ID":"4403282d-04b1-4e06-826e-2c847a6d40ac","Type":"ContainerStarted","Data":"dc3fc0a76931d80089b173d393dd672fa055ef07daaceee68fac0c36247174cb"} Nov 23 16:48:23 crc kubenswrapper[5050]: I1123 16:48:23.756674 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nvc6n" event={"ID":"4403282d-04b1-4e06-826e-2c847a6d40ac","Type":"ContainerStarted","Data":"b79f2127760f207c7071413a110a7a250c5527e34ae5966c944e42467150692d"} Nov 23 16:48:24 crc kubenswrapper[5050]: I1123 16:48:24.226097 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-hmn6v" Nov 23 16:48:24 crc kubenswrapper[5050]: I1123 16:48:24.226311 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-hmn6v" Nov 23 16:48:24 crc kubenswrapper[5050]: I1123 16:48:24.287556 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-hmn6v" Nov 23 16:48:24 crc kubenswrapper[5050]: I1123 16:48:24.769639 5050 generic.go:334] "Generic (PLEG): container finished" podID="4403282d-04b1-4e06-826e-2c847a6d40ac" containerID="dc3fc0a76931d80089b173d393dd672fa055ef07daaceee68fac0c36247174cb" exitCode=0 Nov 23 16:48:24 crc kubenswrapper[5050]: I1123 16:48:24.769700 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nvc6n" event={"ID":"4403282d-04b1-4e06-826e-2c847a6d40ac","Type":"ContainerDied","Data":"dc3fc0a76931d80089b173d393dd672fa055ef07daaceee68fac0c36247174cb"} Nov 23 16:48:24 crc kubenswrapper[5050]: I1123 16:48:24.872373 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-hmn6v" Nov 23 16:48:25 crc kubenswrapper[5050]: I1123 16:48:25.791779 5050 generic.go:334] "Generic (PLEG): container finished" podID="12cb88f4-57df-468f-b59a-5615d496ee52" containerID="8028da6516984682bf69d4228b2add88b26b5f86f61de4213b86826c89619109" exitCode=0 Nov 23 16:48:25 crc kubenswrapper[5050]: I1123 16:48:25.791909 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-mx2l6" event={"ID":"12cb88f4-57df-468f-b59a-5615d496ee52","Type":"ContainerDied","Data":"8028da6516984682bf69d4228b2add88b26b5f86f61de4213b86826c89619109"} Nov 23 16:48:26 crc kubenswrapper[5050]: I1123 16:48:26.695409 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hmn6v"] Nov 23 16:48:26 crc kubenswrapper[5050]: I1123 16:48:26.805879 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nvc6n" event={"ID":"4403282d-04b1-4e06-826e-2c847a6d40ac","Type":"ContainerStarted","Data":"e0bee8e87b05da0d686a60a88ea56979cd48ad7cf5c017d05771a944563a28f8"} Nov 23 16:48:27 crc kubenswrapper[5050]: I1123 16:48:27.380738 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-mx2l6" Nov 23 16:48:27 crc kubenswrapper[5050]: I1123 16:48:27.515439 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/12cb88f4-57df-468f-b59a-5615d496ee52-ssh-key\") pod \"12cb88f4-57df-468f-b59a-5615d496ee52\" (UID: \"12cb88f4-57df-468f-b59a-5615d496ee52\") " Nov 23 16:48:27 crc kubenswrapper[5050]: I1123 16:48:27.515582 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/12cb88f4-57df-468f-b59a-5615d496ee52-inventory\") pod \"12cb88f4-57df-468f-b59a-5615d496ee52\" (UID: \"12cb88f4-57df-468f-b59a-5615d496ee52\") " Nov 23 16:48:27 crc kubenswrapper[5050]: I1123 16:48:27.515610 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/12cb88f4-57df-468f-b59a-5615d496ee52-ceph\") pod \"12cb88f4-57df-468f-b59a-5615d496ee52\" (UID: \"12cb88f4-57df-468f-b59a-5615d496ee52\") " Nov 23 16:48:27 crc kubenswrapper[5050]: I1123 16:48:27.515926 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-25vtx\" (UniqueName: \"kubernetes.io/projected/12cb88f4-57df-468f-b59a-5615d496ee52-kube-api-access-25vtx\") pod \"12cb88f4-57df-468f-b59a-5615d496ee52\" (UID: \"12cb88f4-57df-468f-b59a-5615d496ee52\") " Nov 23 16:48:27 crc kubenswrapper[5050]: I1123 16:48:27.523849 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12cb88f4-57df-468f-b59a-5615d496ee52-kube-api-access-25vtx" (OuterVolumeSpecName: "kube-api-access-25vtx") pod "12cb88f4-57df-468f-b59a-5615d496ee52" (UID: "12cb88f4-57df-468f-b59a-5615d496ee52"). InnerVolumeSpecName "kube-api-access-25vtx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:48:27 crc kubenswrapper[5050]: I1123 16:48:27.525722 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12cb88f4-57df-468f-b59a-5615d496ee52-ceph" (OuterVolumeSpecName: "ceph") pod "12cb88f4-57df-468f-b59a-5615d496ee52" (UID: "12cb88f4-57df-468f-b59a-5615d496ee52"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:48:27 crc kubenswrapper[5050]: I1123 16:48:27.551258 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12cb88f4-57df-468f-b59a-5615d496ee52-inventory" (OuterVolumeSpecName: "inventory") pod "12cb88f4-57df-468f-b59a-5615d496ee52" (UID: "12cb88f4-57df-468f-b59a-5615d496ee52"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:48:27 crc kubenswrapper[5050]: I1123 16:48:27.554826 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12cb88f4-57df-468f-b59a-5615d496ee52-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "12cb88f4-57df-468f-b59a-5615d496ee52" (UID: "12cb88f4-57df-468f-b59a-5615d496ee52"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:48:27 crc kubenswrapper[5050]: I1123 16:48:27.619496 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-25vtx\" (UniqueName: \"kubernetes.io/projected/12cb88f4-57df-468f-b59a-5615d496ee52-kube-api-access-25vtx\") on node \"crc\" DevicePath \"\"" Nov 23 16:48:27 crc kubenswrapper[5050]: I1123 16:48:27.619588 5050 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/12cb88f4-57df-468f-b59a-5615d496ee52-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 23 16:48:27 crc kubenswrapper[5050]: I1123 16:48:27.619609 5050 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/12cb88f4-57df-468f-b59a-5615d496ee52-inventory\") on node \"crc\" DevicePath \"\"" Nov 23 16:48:27 crc kubenswrapper[5050]: I1123 16:48:27.619633 5050 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/12cb88f4-57df-468f-b59a-5615d496ee52-ceph\") on node \"crc\" DevicePath \"\"" Nov 23 16:48:27 crc kubenswrapper[5050]: I1123 16:48:27.819149 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-mx2l6" event={"ID":"12cb88f4-57df-468f-b59a-5615d496ee52","Type":"ContainerDied","Data":"3c65dd9def45241cbcea4cdd36d1b3d54d43fa6aacf40937b6b4962e633efa64"} Nov 23 16:48:27 crc kubenswrapper[5050]: I1123 16:48:27.819704 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3c65dd9def45241cbcea4cdd36d1b3d54d43fa6aacf40937b6b4962e633efa64" Nov 23 16:48:27 crc kubenswrapper[5050]: I1123 16:48:27.819387 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-hmn6v" podUID="03963976-3954-4975-bbcc-0fa4cf995857" containerName="registry-server" containerID="cri-o://8146e3ffad9e060cc7da4fab5de71488ad7ddd6e64b9ab41a41ba11019ab8798" gracePeriod=2 Nov 23 16:48:27 crc kubenswrapper[5050]: I1123 16:48:27.819216 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-mx2l6" Nov 23 16:48:27 crc kubenswrapper[5050]: I1123 16:48:27.914762 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-openstack-openstack-cell1-q4pmx"] Nov 23 16:48:27 crc kubenswrapper[5050]: E1123 16:48:27.915417 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12cb88f4-57df-468f-b59a-5615d496ee52" containerName="run-os-openstack-openstack-cell1" Nov 23 16:48:27 crc kubenswrapper[5050]: I1123 16:48:27.915457 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="12cb88f4-57df-468f-b59a-5615d496ee52" containerName="run-os-openstack-openstack-cell1" Nov 23 16:48:27 crc kubenswrapper[5050]: I1123 16:48:27.915714 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="12cb88f4-57df-468f-b59a-5615d496ee52" containerName="run-os-openstack-openstack-cell1" Nov 23 16:48:27 crc kubenswrapper[5050]: I1123 16:48:27.916620 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-q4pmx" Nov 23 16:48:27 crc kubenswrapper[5050]: I1123 16:48:27.918802 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 23 16:48:27 crc kubenswrapper[5050]: I1123 16:48:27.919718 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 23 16:48:27 crc kubenswrapper[5050]: I1123 16:48:27.919766 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 23 16:48:27 crc kubenswrapper[5050]: I1123 16:48:27.930542 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-openstack-openstack-cell1-q4pmx"] Nov 23 16:48:27 crc kubenswrapper[5050]: I1123 16:48:27.941278 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-ckrpf" Nov 23 16:48:28 crc kubenswrapper[5050]: I1123 16:48:28.034118 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/98cb76c1-7d81-491f-bd07-037808d66926-inventory\") pod \"reboot-os-openstack-openstack-cell1-q4pmx\" (UID: \"98cb76c1-7d81-491f-bd07-037808d66926\") " pod="openstack/reboot-os-openstack-openstack-cell1-q4pmx" Nov 23 16:48:28 crc kubenswrapper[5050]: I1123 16:48:28.034177 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/98cb76c1-7d81-491f-bd07-037808d66926-ceph\") pod \"reboot-os-openstack-openstack-cell1-q4pmx\" (UID: \"98cb76c1-7d81-491f-bd07-037808d66926\") " pod="openstack/reboot-os-openstack-openstack-cell1-q4pmx" Nov 23 16:48:28 crc kubenswrapper[5050]: I1123 16:48:28.034527 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/98cb76c1-7d81-491f-bd07-037808d66926-ssh-key\") pod \"reboot-os-openstack-openstack-cell1-q4pmx\" (UID: \"98cb76c1-7d81-491f-bd07-037808d66926\") " pod="openstack/reboot-os-openstack-openstack-cell1-q4pmx" Nov 23 16:48:28 crc kubenswrapper[5050]: I1123 16:48:28.034626 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jfgxg\" (UniqueName: \"kubernetes.io/projected/98cb76c1-7d81-491f-bd07-037808d66926-kube-api-access-jfgxg\") pod \"reboot-os-openstack-openstack-cell1-q4pmx\" (UID: \"98cb76c1-7d81-491f-bd07-037808d66926\") " pod="openstack/reboot-os-openstack-openstack-cell1-q4pmx" Nov 23 16:48:28 crc kubenswrapper[5050]: I1123 16:48:28.137130 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/98cb76c1-7d81-491f-bd07-037808d66926-inventory\") pod \"reboot-os-openstack-openstack-cell1-q4pmx\" (UID: \"98cb76c1-7d81-491f-bd07-037808d66926\") " pod="openstack/reboot-os-openstack-openstack-cell1-q4pmx" Nov 23 16:48:28 crc kubenswrapper[5050]: I1123 16:48:28.137198 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/98cb76c1-7d81-491f-bd07-037808d66926-ceph\") pod \"reboot-os-openstack-openstack-cell1-q4pmx\" (UID: \"98cb76c1-7d81-491f-bd07-037808d66926\") " pod="openstack/reboot-os-openstack-openstack-cell1-q4pmx" Nov 23 16:48:28 crc kubenswrapper[5050]: I1123 16:48:28.137278 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/98cb76c1-7d81-491f-bd07-037808d66926-ssh-key\") pod \"reboot-os-openstack-openstack-cell1-q4pmx\" (UID: \"98cb76c1-7d81-491f-bd07-037808d66926\") " pod="openstack/reboot-os-openstack-openstack-cell1-q4pmx" Nov 23 16:48:28 crc kubenswrapper[5050]: I1123 16:48:28.137315 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jfgxg\" (UniqueName: \"kubernetes.io/projected/98cb76c1-7d81-491f-bd07-037808d66926-kube-api-access-jfgxg\") pod \"reboot-os-openstack-openstack-cell1-q4pmx\" (UID: \"98cb76c1-7d81-491f-bd07-037808d66926\") " pod="openstack/reboot-os-openstack-openstack-cell1-q4pmx" Nov 23 16:48:28 crc kubenswrapper[5050]: I1123 16:48:28.142003 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/98cb76c1-7d81-491f-bd07-037808d66926-ssh-key\") pod \"reboot-os-openstack-openstack-cell1-q4pmx\" (UID: \"98cb76c1-7d81-491f-bd07-037808d66926\") " pod="openstack/reboot-os-openstack-openstack-cell1-q4pmx" Nov 23 16:48:28 crc kubenswrapper[5050]: I1123 16:48:28.142005 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/98cb76c1-7d81-491f-bd07-037808d66926-inventory\") pod \"reboot-os-openstack-openstack-cell1-q4pmx\" (UID: \"98cb76c1-7d81-491f-bd07-037808d66926\") " pod="openstack/reboot-os-openstack-openstack-cell1-q4pmx" Nov 23 16:48:28 crc kubenswrapper[5050]: I1123 16:48:28.153043 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/98cb76c1-7d81-491f-bd07-037808d66926-ceph\") pod \"reboot-os-openstack-openstack-cell1-q4pmx\" (UID: \"98cb76c1-7d81-491f-bd07-037808d66926\") " pod="openstack/reboot-os-openstack-openstack-cell1-q4pmx" Nov 23 16:48:28 crc kubenswrapper[5050]: I1123 16:48:28.157542 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jfgxg\" (UniqueName: \"kubernetes.io/projected/98cb76c1-7d81-491f-bd07-037808d66926-kube-api-access-jfgxg\") pod \"reboot-os-openstack-openstack-cell1-q4pmx\" (UID: \"98cb76c1-7d81-491f-bd07-037808d66926\") " pod="openstack/reboot-os-openstack-openstack-cell1-q4pmx" Nov 23 16:48:28 crc kubenswrapper[5050]: I1123 16:48:28.298045 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-q4pmx" Nov 23 16:48:28 crc kubenswrapper[5050]: I1123 16:48:28.845115 5050 generic.go:334] "Generic (PLEG): container finished" podID="03963976-3954-4975-bbcc-0fa4cf995857" containerID="8146e3ffad9e060cc7da4fab5de71488ad7ddd6e64b9ab41a41ba11019ab8798" exitCode=0 Nov 23 16:48:28 crc kubenswrapper[5050]: I1123 16:48:28.845204 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hmn6v" event={"ID":"03963976-3954-4975-bbcc-0fa4cf995857","Type":"ContainerDied","Data":"8146e3ffad9e060cc7da4fab5de71488ad7ddd6e64b9ab41a41ba11019ab8798"} Nov 23 16:48:28 crc kubenswrapper[5050]: I1123 16:48:28.851646 5050 generic.go:334] "Generic (PLEG): container finished" podID="4403282d-04b1-4e06-826e-2c847a6d40ac" containerID="e0bee8e87b05da0d686a60a88ea56979cd48ad7cf5c017d05771a944563a28f8" exitCode=0 Nov 23 16:48:28 crc kubenswrapper[5050]: I1123 16:48:28.851749 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nvc6n" event={"ID":"4403282d-04b1-4e06-826e-2c847a6d40ac","Type":"ContainerDied","Data":"e0bee8e87b05da0d686a60a88ea56979cd48ad7cf5c017d05771a944563a28f8"} Nov 23 16:48:28 crc kubenswrapper[5050]: I1123 16:48:28.941104 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hmn6v" Nov 23 16:48:28 crc kubenswrapper[5050]: I1123 16:48:28.969353 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-openstack-openstack-cell1-q4pmx"] Nov 23 16:48:28 crc kubenswrapper[5050]: W1123 16:48:28.976051 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod98cb76c1_7d81_491f_bd07_037808d66926.slice/crio-69e89701482c056208edb1ed0d9a181ffbe85dfcfa185223c7a88c18bfd887b4 WatchSource:0}: Error finding container 69e89701482c056208edb1ed0d9a181ffbe85dfcfa185223c7a88c18bfd887b4: Status 404 returned error can't find the container with id 69e89701482c056208edb1ed0d9a181ffbe85dfcfa185223c7a88c18bfd887b4 Nov 23 16:48:29 crc kubenswrapper[5050]: I1123 16:48:29.064716 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tgwx7\" (UniqueName: \"kubernetes.io/projected/03963976-3954-4975-bbcc-0fa4cf995857-kube-api-access-tgwx7\") pod \"03963976-3954-4975-bbcc-0fa4cf995857\" (UID: \"03963976-3954-4975-bbcc-0fa4cf995857\") " Nov 23 16:48:29 crc kubenswrapper[5050]: I1123 16:48:29.065000 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03963976-3954-4975-bbcc-0fa4cf995857-catalog-content\") pod \"03963976-3954-4975-bbcc-0fa4cf995857\" (UID: \"03963976-3954-4975-bbcc-0fa4cf995857\") " Nov 23 16:48:29 crc kubenswrapper[5050]: I1123 16:48:29.065041 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03963976-3954-4975-bbcc-0fa4cf995857-utilities\") pod \"03963976-3954-4975-bbcc-0fa4cf995857\" (UID: \"03963976-3954-4975-bbcc-0fa4cf995857\") " Nov 23 16:48:29 crc kubenswrapper[5050]: I1123 16:48:29.066932 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/03963976-3954-4975-bbcc-0fa4cf995857-utilities" (OuterVolumeSpecName: "utilities") pod "03963976-3954-4975-bbcc-0fa4cf995857" (UID: "03963976-3954-4975-bbcc-0fa4cf995857"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:48:29 crc kubenswrapper[5050]: I1123 16:48:29.071095 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03963976-3954-4975-bbcc-0fa4cf995857-kube-api-access-tgwx7" (OuterVolumeSpecName: "kube-api-access-tgwx7") pod "03963976-3954-4975-bbcc-0fa4cf995857" (UID: "03963976-3954-4975-bbcc-0fa4cf995857"). InnerVolumeSpecName "kube-api-access-tgwx7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:48:29 crc kubenswrapper[5050]: I1123 16:48:29.081846 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/03963976-3954-4975-bbcc-0fa4cf995857-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "03963976-3954-4975-bbcc-0fa4cf995857" (UID: "03963976-3954-4975-bbcc-0fa4cf995857"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:48:29 crc kubenswrapper[5050]: I1123 16:48:29.169705 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03963976-3954-4975-bbcc-0fa4cf995857-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 16:48:29 crc kubenswrapper[5050]: I1123 16:48:29.169775 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03963976-3954-4975-bbcc-0fa4cf995857-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 16:48:29 crc kubenswrapper[5050]: I1123 16:48:29.169797 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tgwx7\" (UniqueName: \"kubernetes.io/projected/03963976-3954-4975-bbcc-0fa4cf995857-kube-api-access-tgwx7\") on node \"crc\" DevicePath \"\"" Nov 23 16:48:29 crc kubenswrapper[5050]: I1123 16:48:29.224153 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:48:29 crc kubenswrapper[5050]: I1123 16:48:29.224229 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:48:29 crc kubenswrapper[5050]: I1123 16:48:29.866682 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hmn6v" event={"ID":"03963976-3954-4975-bbcc-0fa4cf995857","Type":"ContainerDied","Data":"4508cf0acdcafaff79c012e67a67c11a12d0fc95a4b7e7e173da14f485520b77"} Nov 23 16:48:29 crc kubenswrapper[5050]: I1123 16:48:29.866751 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hmn6v" Nov 23 16:48:29 crc kubenswrapper[5050]: I1123 16:48:29.867229 5050 scope.go:117] "RemoveContainer" containerID="8146e3ffad9e060cc7da4fab5de71488ad7ddd6e64b9ab41a41ba11019ab8798" Nov 23 16:48:29 crc kubenswrapper[5050]: I1123 16:48:29.871493 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-q4pmx" event={"ID":"98cb76c1-7d81-491f-bd07-037808d66926","Type":"ContainerStarted","Data":"94965effc5ae7930cc6b286e40d0225dc7beb458498450855f6219b7bf9f9e97"} Nov 23 16:48:29 crc kubenswrapper[5050]: I1123 16:48:29.871558 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-q4pmx" event={"ID":"98cb76c1-7d81-491f-bd07-037808d66926","Type":"ContainerStarted","Data":"69e89701482c056208edb1ed0d9a181ffbe85dfcfa185223c7a88c18bfd887b4"} Nov 23 16:48:29 crc kubenswrapper[5050]: I1123 16:48:29.880866 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nvc6n" event={"ID":"4403282d-04b1-4e06-826e-2c847a6d40ac","Type":"ContainerStarted","Data":"145b35180199578b3a54f4abd9208634361cf7945b375f75b3160c04822795fa"} Nov 23 16:48:29 crc kubenswrapper[5050]: I1123 16:48:29.896025 5050 scope.go:117] "RemoveContainer" containerID="2a67f8d004df6b7a67fa9e20a6c6fe6b02b58be2132f5c43d3220e5e9c7de594" Nov 23 16:48:29 crc kubenswrapper[5050]: I1123 16:48:29.920038 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-openstack-openstack-cell1-q4pmx" podStartSLOduration=2.532729477 podStartE2EDuration="2.919999711s" podCreationTimestamp="2025-11-23 16:48:27 +0000 UTC" firstStartedPulling="2025-11-23 16:48:28.983405899 +0000 UTC m=+7604.150402384" lastFinishedPulling="2025-11-23 16:48:29.370676133 +0000 UTC m=+7604.537672618" observedRunningTime="2025-11-23 16:48:29.906034386 +0000 UTC m=+7605.073030881" watchObservedRunningTime="2025-11-23 16:48:29.919999711 +0000 UTC m=+7605.086996236" Nov 23 16:48:29 crc kubenswrapper[5050]: I1123 16:48:29.945042 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hmn6v"] Nov 23 16:48:29 crc kubenswrapper[5050]: I1123 16:48:29.959966 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-hmn6v"] Nov 23 16:48:29 crc kubenswrapper[5050]: I1123 16:48:29.961403 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-nvc6n" podStartSLOduration=3.459579719 podStartE2EDuration="7.961356851s" podCreationTimestamp="2025-11-23 16:48:22 +0000 UTC" firstStartedPulling="2025-11-23 16:48:24.775616403 +0000 UTC m=+7599.942612878" lastFinishedPulling="2025-11-23 16:48:29.277393505 +0000 UTC m=+7604.444390010" observedRunningTime="2025-11-23 16:48:29.959789427 +0000 UTC m=+7605.126785932" watchObservedRunningTime="2025-11-23 16:48:29.961356851 +0000 UTC m=+7605.128353346" Nov 23 16:48:29 crc kubenswrapper[5050]: I1123 16:48:29.975684 5050 scope.go:117] "RemoveContainer" containerID="68209911fdb876fcb7b629f6e6438aee854bab12199efcebad56180fe5e4b9e7" Nov 23 16:48:31 crc kubenswrapper[5050]: I1123 16:48:31.571889 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="03963976-3954-4975-bbcc-0fa4cf995857" path="/var/lib/kubelet/pods/03963976-3954-4975-bbcc-0fa4cf995857/volumes" Nov 23 16:48:32 crc kubenswrapper[5050]: I1123 16:48:32.906534 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-nvc6n" Nov 23 16:48:32 crc kubenswrapper[5050]: I1123 16:48:32.907106 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-nvc6n" Nov 23 16:48:33 crc kubenswrapper[5050]: I1123 16:48:33.968118 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-nvc6n" podUID="4403282d-04b1-4e06-826e-2c847a6d40ac" containerName="registry-server" probeResult="failure" output=< Nov 23 16:48:33 crc kubenswrapper[5050]: timeout: failed to connect service ":50051" within 1s Nov 23 16:48:33 crc kubenswrapper[5050]: > Nov 23 16:48:42 crc kubenswrapper[5050]: I1123 16:48:42.982712 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-nvc6n" Nov 23 16:48:43 crc kubenswrapper[5050]: I1123 16:48:43.069546 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-nvc6n" Nov 23 16:48:43 crc kubenswrapper[5050]: I1123 16:48:43.224336 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nvc6n"] Nov 23 16:48:44 crc kubenswrapper[5050]: I1123 16:48:44.066008 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-nvc6n" podUID="4403282d-04b1-4e06-826e-2c847a6d40ac" containerName="registry-server" containerID="cri-o://145b35180199578b3a54f4abd9208634361cf7945b375f75b3160c04822795fa" gracePeriod=2 Nov 23 16:48:44 crc kubenswrapper[5050]: I1123 16:48:44.633951 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nvc6n" Nov 23 16:48:44 crc kubenswrapper[5050]: I1123 16:48:44.730643 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6gm7f\" (UniqueName: \"kubernetes.io/projected/4403282d-04b1-4e06-826e-2c847a6d40ac-kube-api-access-6gm7f\") pod \"4403282d-04b1-4e06-826e-2c847a6d40ac\" (UID: \"4403282d-04b1-4e06-826e-2c847a6d40ac\") " Nov 23 16:48:44 crc kubenswrapper[5050]: I1123 16:48:44.730808 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4403282d-04b1-4e06-826e-2c847a6d40ac-utilities\") pod \"4403282d-04b1-4e06-826e-2c847a6d40ac\" (UID: \"4403282d-04b1-4e06-826e-2c847a6d40ac\") " Nov 23 16:48:44 crc kubenswrapper[5050]: I1123 16:48:44.731041 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4403282d-04b1-4e06-826e-2c847a6d40ac-catalog-content\") pod \"4403282d-04b1-4e06-826e-2c847a6d40ac\" (UID: \"4403282d-04b1-4e06-826e-2c847a6d40ac\") " Nov 23 16:48:44 crc kubenswrapper[5050]: I1123 16:48:44.735702 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4403282d-04b1-4e06-826e-2c847a6d40ac-utilities" (OuterVolumeSpecName: "utilities") pod "4403282d-04b1-4e06-826e-2c847a6d40ac" (UID: "4403282d-04b1-4e06-826e-2c847a6d40ac"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:48:44 crc kubenswrapper[5050]: I1123 16:48:44.739842 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4403282d-04b1-4e06-826e-2c847a6d40ac-kube-api-access-6gm7f" (OuterVolumeSpecName: "kube-api-access-6gm7f") pod "4403282d-04b1-4e06-826e-2c847a6d40ac" (UID: "4403282d-04b1-4e06-826e-2c847a6d40ac"). InnerVolumeSpecName "kube-api-access-6gm7f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:48:44 crc kubenswrapper[5050]: I1123 16:48:44.832560 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4403282d-04b1-4e06-826e-2c847a6d40ac-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4403282d-04b1-4e06-826e-2c847a6d40ac" (UID: "4403282d-04b1-4e06-826e-2c847a6d40ac"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:48:44 crc kubenswrapper[5050]: I1123 16:48:44.835126 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4403282d-04b1-4e06-826e-2c847a6d40ac-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 16:48:44 crc kubenswrapper[5050]: I1123 16:48:44.835157 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6gm7f\" (UniqueName: \"kubernetes.io/projected/4403282d-04b1-4e06-826e-2c847a6d40ac-kube-api-access-6gm7f\") on node \"crc\" DevicePath \"\"" Nov 23 16:48:44 crc kubenswrapper[5050]: I1123 16:48:44.835173 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4403282d-04b1-4e06-826e-2c847a6d40ac-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 16:48:45 crc kubenswrapper[5050]: I1123 16:48:45.080372 5050 generic.go:334] "Generic (PLEG): container finished" podID="4403282d-04b1-4e06-826e-2c847a6d40ac" containerID="145b35180199578b3a54f4abd9208634361cf7945b375f75b3160c04822795fa" exitCode=0 Nov 23 16:48:45 crc kubenswrapper[5050]: I1123 16:48:45.080436 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nvc6n" Nov 23 16:48:45 crc kubenswrapper[5050]: I1123 16:48:45.080476 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nvc6n" event={"ID":"4403282d-04b1-4e06-826e-2c847a6d40ac","Type":"ContainerDied","Data":"145b35180199578b3a54f4abd9208634361cf7945b375f75b3160c04822795fa"} Nov 23 16:48:45 crc kubenswrapper[5050]: I1123 16:48:45.080525 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nvc6n" event={"ID":"4403282d-04b1-4e06-826e-2c847a6d40ac","Type":"ContainerDied","Data":"b79f2127760f207c7071413a110a7a250c5527e34ae5966c944e42467150692d"} Nov 23 16:48:45 crc kubenswrapper[5050]: I1123 16:48:45.080570 5050 scope.go:117] "RemoveContainer" containerID="145b35180199578b3a54f4abd9208634361cf7945b375f75b3160c04822795fa" Nov 23 16:48:45 crc kubenswrapper[5050]: I1123 16:48:45.122801 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nvc6n"] Nov 23 16:48:45 crc kubenswrapper[5050]: I1123 16:48:45.130784 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-nvc6n"] Nov 23 16:48:45 crc kubenswrapper[5050]: I1123 16:48:45.131917 5050 scope.go:117] "RemoveContainer" containerID="e0bee8e87b05da0d686a60a88ea56979cd48ad7cf5c017d05771a944563a28f8" Nov 23 16:48:45 crc kubenswrapper[5050]: I1123 16:48:45.168420 5050 scope.go:117] "RemoveContainer" containerID="dc3fc0a76931d80089b173d393dd672fa055ef07daaceee68fac0c36247174cb" Nov 23 16:48:45 crc kubenswrapper[5050]: I1123 16:48:45.225534 5050 scope.go:117] "RemoveContainer" containerID="145b35180199578b3a54f4abd9208634361cf7945b375f75b3160c04822795fa" Nov 23 16:48:45 crc kubenswrapper[5050]: E1123 16:48:45.226144 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"145b35180199578b3a54f4abd9208634361cf7945b375f75b3160c04822795fa\": container with ID starting with 145b35180199578b3a54f4abd9208634361cf7945b375f75b3160c04822795fa not found: ID does not exist" containerID="145b35180199578b3a54f4abd9208634361cf7945b375f75b3160c04822795fa" Nov 23 16:48:45 crc kubenswrapper[5050]: I1123 16:48:45.226193 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"145b35180199578b3a54f4abd9208634361cf7945b375f75b3160c04822795fa"} err="failed to get container status \"145b35180199578b3a54f4abd9208634361cf7945b375f75b3160c04822795fa\": rpc error: code = NotFound desc = could not find container \"145b35180199578b3a54f4abd9208634361cf7945b375f75b3160c04822795fa\": container with ID starting with 145b35180199578b3a54f4abd9208634361cf7945b375f75b3160c04822795fa not found: ID does not exist" Nov 23 16:48:45 crc kubenswrapper[5050]: I1123 16:48:45.226224 5050 scope.go:117] "RemoveContainer" containerID="e0bee8e87b05da0d686a60a88ea56979cd48ad7cf5c017d05771a944563a28f8" Nov 23 16:48:45 crc kubenswrapper[5050]: E1123 16:48:45.226655 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e0bee8e87b05da0d686a60a88ea56979cd48ad7cf5c017d05771a944563a28f8\": container with ID starting with e0bee8e87b05da0d686a60a88ea56979cd48ad7cf5c017d05771a944563a28f8 not found: ID does not exist" containerID="e0bee8e87b05da0d686a60a88ea56979cd48ad7cf5c017d05771a944563a28f8" Nov 23 16:48:45 crc kubenswrapper[5050]: I1123 16:48:45.226723 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e0bee8e87b05da0d686a60a88ea56979cd48ad7cf5c017d05771a944563a28f8"} err="failed to get container status \"e0bee8e87b05da0d686a60a88ea56979cd48ad7cf5c017d05771a944563a28f8\": rpc error: code = NotFound desc = could not find container \"e0bee8e87b05da0d686a60a88ea56979cd48ad7cf5c017d05771a944563a28f8\": container with ID starting with e0bee8e87b05da0d686a60a88ea56979cd48ad7cf5c017d05771a944563a28f8 not found: ID does not exist" Nov 23 16:48:45 crc kubenswrapper[5050]: I1123 16:48:45.226749 5050 scope.go:117] "RemoveContainer" containerID="dc3fc0a76931d80089b173d393dd672fa055ef07daaceee68fac0c36247174cb" Nov 23 16:48:45 crc kubenswrapper[5050]: E1123 16:48:45.227206 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc3fc0a76931d80089b173d393dd672fa055ef07daaceee68fac0c36247174cb\": container with ID starting with dc3fc0a76931d80089b173d393dd672fa055ef07daaceee68fac0c36247174cb not found: ID does not exist" containerID="dc3fc0a76931d80089b173d393dd672fa055ef07daaceee68fac0c36247174cb" Nov 23 16:48:45 crc kubenswrapper[5050]: I1123 16:48:45.227288 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc3fc0a76931d80089b173d393dd672fa055ef07daaceee68fac0c36247174cb"} err="failed to get container status \"dc3fc0a76931d80089b173d393dd672fa055ef07daaceee68fac0c36247174cb\": rpc error: code = NotFound desc = could not find container \"dc3fc0a76931d80089b173d393dd672fa055ef07daaceee68fac0c36247174cb\": container with ID starting with dc3fc0a76931d80089b173d393dd672fa055ef07daaceee68fac0c36247174cb not found: ID does not exist" Nov 23 16:48:45 crc kubenswrapper[5050]: I1123 16:48:45.570530 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4403282d-04b1-4e06-826e-2c847a6d40ac" path="/var/lib/kubelet/pods/4403282d-04b1-4e06-826e-2c847a6d40ac/volumes" Nov 23 16:48:46 crc kubenswrapper[5050]: I1123 16:48:46.099320 5050 generic.go:334] "Generic (PLEG): container finished" podID="98cb76c1-7d81-491f-bd07-037808d66926" containerID="94965effc5ae7930cc6b286e40d0225dc7beb458498450855f6219b7bf9f9e97" exitCode=0 Nov 23 16:48:46 crc kubenswrapper[5050]: I1123 16:48:46.099402 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-q4pmx" event={"ID":"98cb76c1-7d81-491f-bd07-037808d66926","Type":"ContainerDied","Data":"94965effc5ae7930cc6b286e40d0225dc7beb458498450855f6219b7bf9f9e97"} Nov 23 16:48:47 crc kubenswrapper[5050]: I1123 16:48:47.662222 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-q4pmx" Nov 23 16:48:47 crc kubenswrapper[5050]: I1123 16:48:47.822096 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/98cb76c1-7d81-491f-bd07-037808d66926-ssh-key\") pod \"98cb76c1-7d81-491f-bd07-037808d66926\" (UID: \"98cb76c1-7d81-491f-bd07-037808d66926\") " Nov 23 16:48:47 crc kubenswrapper[5050]: I1123 16:48:47.822277 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/98cb76c1-7d81-491f-bd07-037808d66926-ceph\") pod \"98cb76c1-7d81-491f-bd07-037808d66926\" (UID: \"98cb76c1-7d81-491f-bd07-037808d66926\") " Nov 23 16:48:47 crc kubenswrapper[5050]: I1123 16:48:47.822490 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/98cb76c1-7d81-491f-bd07-037808d66926-inventory\") pod \"98cb76c1-7d81-491f-bd07-037808d66926\" (UID: \"98cb76c1-7d81-491f-bd07-037808d66926\") " Nov 23 16:48:47 crc kubenswrapper[5050]: I1123 16:48:47.822566 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jfgxg\" (UniqueName: \"kubernetes.io/projected/98cb76c1-7d81-491f-bd07-037808d66926-kube-api-access-jfgxg\") pod \"98cb76c1-7d81-491f-bd07-037808d66926\" (UID: \"98cb76c1-7d81-491f-bd07-037808d66926\") " Nov 23 16:48:47 crc kubenswrapper[5050]: I1123 16:48:47.830665 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98cb76c1-7d81-491f-bd07-037808d66926-kube-api-access-jfgxg" (OuterVolumeSpecName: "kube-api-access-jfgxg") pod "98cb76c1-7d81-491f-bd07-037808d66926" (UID: "98cb76c1-7d81-491f-bd07-037808d66926"). InnerVolumeSpecName "kube-api-access-jfgxg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:48:47 crc kubenswrapper[5050]: I1123 16:48:47.847360 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98cb76c1-7d81-491f-bd07-037808d66926-ceph" (OuterVolumeSpecName: "ceph") pod "98cb76c1-7d81-491f-bd07-037808d66926" (UID: "98cb76c1-7d81-491f-bd07-037808d66926"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:48:47 crc kubenswrapper[5050]: I1123 16:48:47.864580 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98cb76c1-7d81-491f-bd07-037808d66926-inventory" (OuterVolumeSpecName: "inventory") pod "98cb76c1-7d81-491f-bd07-037808d66926" (UID: "98cb76c1-7d81-491f-bd07-037808d66926"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:48:47 crc kubenswrapper[5050]: I1123 16:48:47.868216 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98cb76c1-7d81-491f-bd07-037808d66926-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "98cb76c1-7d81-491f-bd07-037808d66926" (UID: "98cb76c1-7d81-491f-bd07-037808d66926"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:48:47 crc kubenswrapper[5050]: I1123 16:48:47.925517 5050 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/98cb76c1-7d81-491f-bd07-037808d66926-inventory\") on node \"crc\" DevicePath \"\"" Nov 23 16:48:47 crc kubenswrapper[5050]: I1123 16:48:47.925548 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jfgxg\" (UniqueName: \"kubernetes.io/projected/98cb76c1-7d81-491f-bd07-037808d66926-kube-api-access-jfgxg\") on node \"crc\" DevicePath \"\"" Nov 23 16:48:47 crc kubenswrapper[5050]: I1123 16:48:47.925560 5050 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/98cb76c1-7d81-491f-bd07-037808d66926-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 23 16:48:47 crc kubenswrapper[5050]: I1123 16:48:47.925571 5050 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/98cb76c1-7d81-491f-bd07-037808d66926-ceph\") on node \"crc\" DevicePath \"\"" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.123470 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-q4pmx" event={"ID":"98cb76c1-7d81-491f-bd07-037808d66926","Type":"ContainerDied","Data":"69e89701482c056208edb1ed0d9a181ffbe85dfcfa185223c7a88c18bfd887b4"} Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.123805 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="69e89701482c056208edb1ed0d9a181ffbe85dfcfa185223c7a88c18bfd887b4" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.123517 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-q4pmx" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.251894 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-openstack-openstack-cell1-nxlp7"] Nov 23 16:48:48 crc kubenswrapper[5050]: E1123 16:48:48.252472 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98cb76c1-7d81-491f-bd07-037808d66926" containerName="reboot-os-openstack-openstack-cell1" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.252494 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="98cb76c1-7d81-491f-bd07-037808d66926" containerName="reboot-os-openstack-openstack-cell1" Nov 23 16:48:48 crc kubenswrapper[5050]: E1123 16:48:48.252512 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4403282d-04b1-4e06-826e-2c847a6d40ac" containerName="extract-content" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.252520 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4403282d-04b1-4e06-826e-2c847a6d40ac" containerName="extract-content" Nov 23 16:48:48 crc kubenswrapper[5050]: E1123 16:48:48.252535 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03963976-3954-4975-bbcc-0fa4cf995857" containerName="extract-content" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.252542 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="03963976-3954-4975-bbcc-0fa4cf995857" containerName="extract-content" Nov 23 16:48:48 crc kubenswrapper[5050]: E1123 16:48:48.252560 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4403282d-04b1-4e06-826e-2c847a6d40ac" containerName="extract-utilities" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.252568 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4403282d-04b1-4e06-826e-2c847a6d40ac" containerName="extract-utilities" Nov 23 16:48:48 crc kubenswrapper[5050]: E1123 16:48:48.252597 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03963976-3954-4975-bbcc-0fa4cf995857" containerName="extract-utilities" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.252606 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="03963976-3954-4975-bbcc-0fa4cf995857" containerName="extract-utilities" Nov 23 16:48:48 crc kubenswrapper[5050]: E1123 16:48:48.252633 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03963976-3954-4975-bbcc-0fa4cf995857" containerName="registry-server" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.252641 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="03963976-3954-4975-bbcc-0fa4cf995857" containerName="registry-server" Nov 23 16:48:48 crc kubenswrapper[5050]: E1123 16:48:48.252654 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4403282d-04b1-4e06-826e-2c847a6d40ac" containerName="registry-server" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.252661 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4403282d-04b1-4e06-826e-2c847a6d40ac" containerName="registry-server" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.252900 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="03963976-3954-4975-bbcc-0fa4cf995857" containerName="registry-server" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.252933 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="98cb76c1-7d81-491f-bd07-037808d66926" containerName="reboot-os-openstack-openstack-cell1" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.252952 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4403282d-04b1-4e06-826e-2c847a6d40ac" containerName="registry-server" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.253896 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.260807 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.261278 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.261419 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-ckrpf" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.261717 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.265381 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-openstack-openstack-cell1-nxlp7"] Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.334583 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-neutron-sriov-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.334696 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.334731 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hg554\" (UniqueName: \"kubernetes.io/projected/2baf454f-86dc-4e4b-9a53-db1eeec9a497-kube-api-access-hg554\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.334768 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-ceph\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.334858 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-telemetry-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.334885 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-inventory\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.334936 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.334966 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-libvirt-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.335635 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-neutron-dhcp-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.335694 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-nova-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.335854 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-ssh-key\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.335983 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.439144 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-telemetry-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.439199 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-inventory\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.439258 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.439286 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-libvirt-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.439412 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-neutron-dhcp-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.439466 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-nova-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.439506 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-ssh-key\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.439545 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.439612 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-neutron-sriov-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.439712 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hg554\" (UniqueName: \"kubernetes.io/projected/2baf454f-86dc-4e4b-9a53-db1eeec9a497-kube-api-access-hg554\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.439741 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.439768 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-ceph\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.445008 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-telemetry-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.445421 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-neutron-dhcp-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.446859 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-neutron-sriov-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.446917 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.447595 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.448924 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-inventory\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.449802 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-libvirt-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.454102 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-ssh-key\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.455564 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-ceph\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.455796 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.459259 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-nova-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.470525 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hg554\" (UniqueName: \"kubernetes.io/projected/2baf454f-86dc-4e4b-9a53-db1eeec9a497-kube-api-access-hg554\") pod \"install-certs-openstack-openstack-cell1-nxlp7\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:48 crc kubenswrapper[5050]: I1123 16:48:48.587257 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:48:49 crc kubenswrapper[5050]: I1123 16:48:49.206125 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-openstack-openstack-cell1-nxlp7"] Nov 23 16:48:50 crc kubenswrapper[5050]: I1123 16:48:50.148560 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" event={"ID":"2baf454f-86dc-4e4b-9a53-db1eeec9a497","Type":"ContainerStarted","Data":"6c9bd94d6cc045333d6ab09cbd1062078c8b586d6cf959ee7749820f58812547"} Nov 23 16:48:50 crc kubenswrapper[5050]: I1123 16:48:50.149322 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" event={"ID":"2baf454f-86dc-4e4b-9a53-db1eeec9a497","Type":"ContainerStarted","Data":"e9775996806ada748bedfc6dd4af6b4b01bbc50d91cce31ec224209d62ddc298"} Nov 23 16:48:50 crc kubenswrapper[5050]: I1123 16:48:50.182180 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" podStartSLOduration=1.706475762 podStartE2EDuration="2.182159598s" podCreationTimestamp="2025-11-23 16:48:48 +0000 UTC" firstStartedPulling="2025-11-23 16:48:49.223861481 +0000 UTC m=+7624.390857966" lastFinishedPulling="2025-11-23 16:48:49.699545267 +0000 UTC m=+7624.866541802" observedRunningTime="2025-11-23 16:48:50.167652958 +0000 UTC m=+7625.334649453" watchObservedRunningTime="2025-11-23 16:48:50.182159598 +0000 UTC m=+7625.349156083" Nov 23 16:48:59 crc kubenswrapper[5050]: I1123 16:48:59.225279 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:48:59 crc kubenswrapper[5050]: I1123 16:48:59.226929 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:48:59 crc kubenswrapper[5050]: I1123 16:48:59.227061 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 16:48:59 crc kubenswrapper[5050]: I1123 16:48:59.228646 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 16:48:59 crc kubenswrapper[5050]: I1123 16:48:59.228731 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76" gracePeriod=600 Nov 23 16:48:59 crc kubenswrapper[5050]: E1123 16:48:59.368418 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:49:00 crc kubenswrapper[5050]: I1123 16:49:00.275725 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76" exitCode=0 Nov 23 16:49:00 crc kubenswrapper[5050]: I1123 16:49:00.275859 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76"} Nov 23 16:49:00 crc kubenswrapper[5050]: I1123 16:49:00.276300 5050 scope.go:117] "RemoveContainer" containerID="1bfefb3171b8cf9d17cc642f8ef9433deda5bd5290186a212a4ff2a6117146a9" Nov 23 16:49:00 crc kubenswrapper[5050]: I1123 16:49:00.277155 5050 scope.go:117] "RemoveContainer" containerID="d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76" Nov 23 16:49:00 crc kubenswrapper[5050]: E1123 16:49:00.277641 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:49:10 crc kubenswrapper[5050]: I1123 16:49:10.430709 5050 generic.go:334] "Generic (PLEG): container finished" podID="2baf454f-86dc-4e4b-9a53-db1eeec9a497" containerID="6c9bd94d6cc045333d6ab09cbd1062078c8b586d6cf959ee7749820f58812547" exitCode=0 Nov 23 16:49:10 crc kubenswrapper[5050]: I1123 16:49:10.430906 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" event={"ID":"2baf454f-86dc-4e4b-9a53-db1eeec9a497","Type":"ContainerDied","Data":"6c9bd94d6cc045333d6ab09cbd1062078c8b586d6cf959ee7749820f58812547"} Nov 23 16:49:11 crc kubenswrapper[5050]: I1123 16:49:11.939982 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.038340 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-neutron-sriov-combined-ca-bundle\") pod \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.038420 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-telemetry-combined-ca-bundle\") pod \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.038488 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-neutron-metadata-combined-ca-bundle\") pod \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.038550 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hg554\" (UniqueName: \"kubernetes.io/projected/2baf454f-86dc-4e4b-9a53-db1eeec9a497-kube-api-access-hg554\") pod \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.038619 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-inventory\") pod \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.038646 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-nova-combined-ca-bundle\") pod \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.038692 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-neutron-dhcp-combined-ca-bundle\") pod \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.038729 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-ssh-key\") pod \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.038771 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-ceph\") pod \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.038792 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-libvirt-combined-ca-bundle\") pod \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.038821 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-bootstrap-combined-ca-bundle\") pod \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.038886 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-ovn-combined-ca-bundle\") pod \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\" (UID: \"2baf454f-86dc-4e4b-9a53-db1eeec9a497\") " Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.049381 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "2baf454f-86dc-4e4b-9a53-db1eeec9a497" (UID: "2baf454f-86dc-4e4b-9a53-db1eeec9a497"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.049973 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "2baf454f-86dc-4e4b-9a53-db1eeec9a497" (UID: "2baf454f-86dc-4e4b-9a53-db1eeec9a497"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.050846 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "2baf454f-86dc-4e4b-9a53-db1eeec9a497" (UID: "2baf454f-86dc-4e4b-9a53-db1eeec9a497"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.051365 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2baf454f-86dc-4e4b-9a53-db1eeec9a497-kube-api-access-hg554" (OuterVolumeSpecName: "kube-api-access-hg554") pod "2baf454f-86dc-4e4b-9a53-db1eeec9a497" (UID: "2baf454f-86dc-4e4b-9a53-db1eeec9a497"). InnerVolumeSpecName "kube-api-access-hg554". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.052960 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-ceph" (OuterVolumeSpecName: "ceph") pod "2baf454f-86dc-4e4b-9a53-db1eeec9a497" (UID: "2baf454f-86dc-4e4b-9a53-db1eeec9a497"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.054910 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-neutron-dhcp-combined-ca-bundle" (OuterVolumeSpecName: "neutron-dhcp-combined-ca-bundle") pod "2baf454f-86dc-4e4b-9a53-db1eeec9a497" (UID: "2baf454f-86dc-4e4b-9a53-db1eeec9a497"). InnerVolumeSpecName "neutron-dhcp-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.055647 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "2baf454f-86dc-4e4b-9a53-db1eeec9a497" (UID: "2baf454f-86dc-4e4b-9a53-db1eeec9a497"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.056403 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "2baf454f-86dc-4e4b-9a53-db1eeec9a497" (UID: "2baf454f-86dc-4e4b-9a53-db1eeec9a497"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.066691 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "2baf454f-86dc-4e4b-9a53-db1eeec9a497" (UID: "2baf454f-86dc-4e4b-9a53-db1eeec9a497"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.074590 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-neutron-sriov-combined-ca-bundle" (OuterVolumeSpecName: "neutron-sriov-combined-ca-bundle") pod "2baf454f-86dc-4e4b-9a53-db1eeec9a497" (UID: "2baf454f-86dc-4e4b-9a53-db1eeec9a497"). InnerVolumeSpecName "neutron-sriov-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.108942 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-inventory" (OuterVolumeSpecName: "inventory") pod "2baf454f-86dc-4e4b-9a53-db1eeec9a497" (UID: "2baf454f-86dc-4e4b-9a53-db1eeec9a497"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.109294 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "2baf454f-86dc-4e4b-9a53-db1eeec9a497" (UID: "2baf454f-86dc-4e4b-9a53-db1eeec9a497"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.140665 5050 reconciler_common.go:293] "Volume detached for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-neutron-dhcp-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.140987 5050 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.141185 5050 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.141317 5050 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-ceph\") on node \"crc\" DevicePath \"\"" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.141408 5050 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.141558 5050 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.141658 5050 reconciler_common.go:293] "Volume detached for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-neutron-sriov-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.141739 5050 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.141834 5050 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.141925 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hg554\" (UniqueName: \"kubernetes.io/projected/2baf454f-86dc-4e4b-9a53-db1eeec9a497-kube-api-access-hg554\") on node \"crc\" DevicePath \"\"" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.142025 5050 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-inventory\") on node \"crc\" DevicePath \"\"" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.142117 5050 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2baf454f-86dc-4e4b-9a53-db1eeec9a497-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.460909 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" event={"ID":"2baf454f-86dc-4e4b-9a53-db1eeec9a497","Type":"ContainerDied","Data":"e9775996806ada748bedfc6dd4af6b4b01bbc50d91cce31ec224209d62ddc298"} Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.461498 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e9775996806ada748bedfc6dd4af6b4b01bbc50d91cce31ec224209d62ddc298" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.461016 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-nxlp7" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.549403 5050 scope.go:117] "RemoveContainer" containerID="d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76" Nov 23 16:49:12 crc kubenswrapper[5050]: E1123 16:49:12.550470 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.585611 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-client-openstack-openstack-cell1-pshzq"] Nov 23 16:49:12 crc kubenswrapper[5050]: E1123 16:49:12.586544 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2baf454f-86dc-4e4b-9a53-db1eeec9a497" containerName="install-certs-openstack-openstack-cell1" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.586574 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="2baf454f-86dc-4e4b-9a53-db1eeec9a497" containerName="install-certs-openstack-openstack-cell1" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.586940 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="2baf454f-86dc-4e4b-9a53-db1eeec9a497" containerName="install-certs-openstack-openstack-cell1" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.588145 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-openstack-openstack-cell1-pshzq" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.592153 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.592348 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-ckrpf" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.592517 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.592517 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.598163 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-openstack-openstack-cell1-pshzq"] Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.755966 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/03839446-b063-40d2-b17a-af3b1786f3a5-inventory\") pod \"ceph-client-openstack-openstack-cell1-pshzq\" (UID: \"03839446-b063-40d2-b17a-af3b1786f3a5\") " pod="openstack/ceph-client-openstack-openstack-cell1-pshzq" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.757005 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/03839446-b063-40d2-b17a-af3b1786f3a5-ceph\") pod \"ceph-client-openstack-openstack-cell1-pshzq\" (UID: \"03839446-b063-40d2-b17a-af3b1786f3a5\") " pod="openstack/ceph-client-openstack-openstack-cell1-pshzq" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.757225 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/03839446-b063-40d2-b17a-af3b1786f3a5-ssh-key\") pod \"ceph-client-openstack-openstack-cell1-pshzq\" (UID: \"03839446-b063-40d2-b17a-af3b1786f3a5\") " pod="openstack/ceph-client-openstack-openstack-cell1-pshzq" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.757325 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jwqcs\" (UniqueName: \"kubernetes.io/projected/03839446-b063-40d2-b17a-af3b1786f3a5-kube-api-access-jwqcs\") pod \"ceph-client-openstack-openstack-cell1-pshzq\" (UID: \"03839446-b063-40d2-b17a-af3b1786f3a5\") " pod="openstack/ceph-client-openstack-openstack-cell1-pshzq" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.859266 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/03839446-b063-40d2-b17a-af3b1786f3a5-inventory\") pod \"ceph-client-openstack-openstack-cell1-pshzq\" (UID: \"03839446-b063-40d2-b17a-af3b1786f3a5\") " pod="openstack/ceph-client-openstack-openstack-cell1-pshzq" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.859660 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/03839446-b063-40d2-b17a-af3b1786f3a5-ceph\") pod \"ceph-client-openstack-openstack-cell1-pshzq\" (UID: \"03839446-b063-40d2-b17a-af3b1786f3a5\") " pod="openstack/ceph-client-openstack-openstack-cell1-pshzq" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.859853 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/03839446-b063-40d2-b17a-af3b1786f3a5-ssh-key\") pod \"ceph-client-openstack-openstack-cell1-pshzq\" (UID: \"03839446-b063-40d2-b17a-af3b1786f3a5\") " pod="openstack/ceph-client-openstack-openstack-cell1-pshzq" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.859999 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jwqcs\" (UniqueName: \"kubernetes.io/projected/03839446-b063-40d2-b17a-af3b1786f3a5-kube-api-access-jwqcs\") pod \"ceph-client-openstack-openstack-cell1-pshzq\" (UID: \"03839446-b063-40d2-b17a-af3b1786f3a5\") " pod="openstack/ceph-client-openstack-openstack-cell1-pshzq" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.865598 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/03839446-b063-40d2-b17a-af3b1786f3a5-ssh-key\") pod \"ceph-client-openstack-openstack-cell1-pshzq\" (UID: \"03839446-b063-40d2-b17a-af3b1786f3a5\") " pod="openstack/ceph-client-openstack-openstack-cell1-pshzq" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.866551 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/03839446-b063-40d2-b17a-af3b1786f3a5-inventory\") pod \"ceph-client-openstack-openstack-cell1-pshzq\" (UID: \"03839446-b063-40d2-b17a-af3b1786f3a5\") " pod="openstack/ceph-client-openstack-openstack-cell1-pshzq" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.867301 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/03839446-b063-40d2-b17a-af3b1786f3a5-ceph\") pod \"ceph-client-openstack-openstack-cell1-pshzq\" (UID: \"03839446-b063-40d2-b17a-af3b1786f3a5\") " pod="openstack/ceph-client-openstack-openstack-cell1-pshzq" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.889884 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jwqcs\" (UniqueName: \"kubernetes.io/projected/03839446-b063-40d2-b17a-af3b1786f3a5-kube-api-access-jwqcs\") pod \"ceph-client-openstack-openstack-cell1-pshzq\" (UID: \"03839446-b063-40d2-b17a-af3b1786f3a5\") " pod="openstack/ceph-client-openstack-openstack-cell1-pshzq" Nov 23 16:49:12 crc kubenswrapper[5050]: I1123 16:49:12.922554 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-openstack-openstack-cell1-pshzq" Nov 23 16:49:13 crc kubenswrapper[5050]: I1123 16:49:13.597947 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-openstack-openstack-cell1-pshzq"] Nov 23 16:49:14 crc kubenswrapper[5050]: I1123 16:49:14.491792 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-openstack-openstack-cell1-pshzq" event={"ID":"03839446-b063-40d2-b17a-af3b1786f3a5","Type":"ContainerStarted","Data":"dc6cbd9492c9547fd8e39b5bd50a8e89d36da0cc93b53b5632225f43dc5c5378"} Nov 23 16:49:14 crc kubenswrapper[5050]: I1123 16:49:14.492185 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-openstack-openstack-cell1-pshzq" event={"ID":"03839446-b063-40d2-b17a-af3b1786f3a5","Type":"ContainerStarted","Data":"5be859c4a189c756450da5c858309734497191d05ed54c1b32c68ee500b3d1fb"} Nov 23 16:49:14 crc kubenswrapper[5050]: I1123 16:49:14.525712 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-client-openstack-openstack-cell1-pshzq" podStartSLOduration=2.039427187 podStartE2EDuration="2.525683002s" podCreationTimestamp="2025-11-23 16:49:12 +0000 UTC" firstStartedPulling="2025-11-23 16:49:13.608709023 +0000 UTC m=+7648.775705548" lastFinishedPulling="2025-11-23 16:49:14.094964838 +0000 UTC m=+7649.261961363" observedRunningTime="2025-11-23 16:49:14.512265053 +0000 UTC m=+7649.679261548" watchObservedRunningTime="2025-11-23 16:49:14.525683002 +0000 UTC m=+7649.692679517" Nov 23 16:49:19 crc kubenswrapper[5050]: I1123 16:49:19.573966 5050 generic.go:334] "Generic (PLEG): container finished" podID="03839446-b063-40d2-b17a-af3b1786f3a5" containerID="dc6cbd9492c9547fd8e39b5bd50a8e89d36da0cc93b53b5632225f43dc5c5378" exitCode=0 Nov 23 16:49:19 crc kubenswrapper[5050]: I1123 16:49:19.574821 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-openstack-openstack-cell1-pshzq" event={"ID":"03839446-b063-40d2-b17a-af3b1786f3a5","Type":"ContainerDied","Data":"dc6cbd9492c9547fd8e39b5bd50a8e89d36da0cc93b53b5632225f43dc5c5378"} Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.197490 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-openstack-openstack-cell1-pshzq" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.238925 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/03839446-b063-40d2-b17a-af3b1786f3a5-ceph\") pod \"03839446-b063-40d2-b17a-af3b1786f3a5\" (UID: \"03839446-b063-40d2-b17a-af3b1786f3a5\") " Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.239563 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/03839446-b063-40d2-b17a-af3b1786f3a5-inventory\") pod \"03839446-b063-40d2-b17a-af3b1786f3a5\" (UID: \"03839446-b063-40d2-b17a-af3b1786f3a5\") " Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.248544 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/03839446-b063-40d2-b17a-af3b1786f3a5-ssh-key\") pod \"03839446-b063-40d2-b17a-af3b1786f3a5\" (UID: \"03839446-b063-40d2-b17a-af3b1786f3a5\") " Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.248613 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jwqcs\" (UniqueName: \"kubernetes.io/projected/03839446-b063-40d2-b17a-af3b1786f3a5-kube-api-access-jwqcs\") pod \"03839446-b063-40d2-b17a-af3b1786f3a5\" (UID: \"03839446-b063-40d2-b17a-af3b1786f3a5\") " Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.248947 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03839446-b063-40d2-b17a-af3b1786f3a5-ceph" (OuterVolumeSpecName: "ceph") pod "03839446-b063-40d2-b17a-af3b1786f3a5" (UID: "03839446-b063-40d2-b17a-af3b1786f3a5"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.250848 5050 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/03839446-b063-40d2-b17a-af3b1786f3a5-ceph\") on node \"crc\" DevicePath \"\"" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.253302 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03839446-b063-40d2-b17a-af3b1786f3a5-kube-api-access-jwqcs" (OuterVolumeSpecName: "kube-api-access-jwqcs") pod "03839446-b063-40d2-b17a-af3b1786f3a5" (UID: "03839446-b063-40d2-b17a-af3b1786f3a5"). InnerVolumeSpecName "kube-api-access-jwqcs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:49:21 crc kubenswrapper[5050]: E1123 16:49:21.295154 5050 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/03839446-b063-40d2-b17a-af3b1786f3a5-inventory podName:03839446-b063-40d2-b17a-af3b1786f3a5 nodeName:}" failed. No retries permitted until 2025-11-23 16:49:21.795100578 +0000 UTC m=+7656.962097073 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "inventory" (UniqueName: "kubernetes.io/secret/03839446-b063-40d2-b17a-af3b1786f3a5-inventory") pod "03839446-b063-40d2-b17a-af3b1786f3a5" (UID: "03839446-b063-40d2-b17a-af3b1786f3a5") : error deleting /var/lib/kubelet/pods/03839446-b063-40d2-b17a-af3b1786f3a5/volume-subpaths: remove /var/lib/kubelet/pods/03839446-b063-40d2-b17a-af3b1786f3a5/volume-subpaths: no such file or directory Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.302599 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03839446-b063-40d2-b17a-af3b1786f3a5-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "03839446-b063-40d2-b17a-af3b1786f3a5" (UID: "03839446-b063-40d2-b17a-af3b1786f3a5"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.353601 5050 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/03839446-b063-40d2-b17a-af3b1786f3a5-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.353879 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jwqcs\" (UniqueName: \"kubernetes.io/projected/03839446-b063-40d2-b17a-af3b1786f3a5-kube-api-access-jwqcs\") on node \"crc\" DevicePath \"\"" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.600176 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-openstack-openstack-cell1-pshzq" event={"ID":"03839446-b063-40d2-b17a-af3b1786f3a5","Type":"ContainerDied","Data":"5be859c4a189c756450da5c858309734497191d05ed54c1b32c68ee500b3d1fb"} Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.600241 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5be859c4a189c756450da5c858309734497191d05ed54c1b32c68ee500b3d1fb" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.600312 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-openstack-openstack-cell1-pshzq" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.741620 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-openstack-openstack-cell1-f6s64"] Nov 23 16:49:21 crc kubenswrapper[5050]: E1123 16:49:21.742329 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03839446-b063-40d2-b17a-af3b1786f3a5" containerName="ceph-client-openstack-openstack-cell1" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.742355 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="03839446-b063-40d2-b17a-af3b1786f3a5" containerName="ceph-client-openstack-openstack-cell1" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.742619 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="03839446-b063-40d2-b17a-af3b1786f3a5" containerName="ceph-client-openstack-openstack-cell1" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.743684 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-f6s64" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.750066 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.754759 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-openstack-openstack-cell1-f6s64"] Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.872361 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/03839446-b063-40d2-b17a-af3b1786f3a5-inventory\") pod \"03839446-b063-40d2-b17a-af3b1786f3a5\" (UID: \"03839446-b063-40d2-b17a-af3b1786f3a5\") " Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.873081 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/53184db4-ff0a-431e-bbbc-cc6295558c83-ovncontroller-config-0\") pod \"ovn-openstack-openstack-cell1-f6s64\" (UID: \"53184db4-ff0a-431e-bbbc-cc6295558c83\") " pod="openstack/ovn-openstack-openstack-cell1-f6s64" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.873167 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/53184db4-ff0a-431e-bbbc-cc6295558c83-ceph\") pod \"ovn-openstack-openstack-cell1-f6s64\" (UID: \"53184db4-ff0a-431e-bbbc-cc6295558c83\") " pod="openstack/ovn-openstack-openstack-cell1-f6s64" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.873244 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/53184db4-ff0a-431e-bbbc-cc6295558c83-ssh-key\") pod \"ovn-openstack-openstack-cell1-f6s64\" (UID: \"53184db4-ff0a-431e-bbbc-cc6295558c83\") " pod="openstack/ovn-openstack-openstack-cell1-f6s64" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.873312 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mktps\" (UniqueName: \"kubernetes.io/projected/53184db4-ff0a-431e-bbbc-cc6295558c83-kube-api-access-mktps\") pod \"ovn-openstack-openstack-cell1-f6s64\" (UID: \"53184db4-ff0a-431e-bbbc-cc6295558c83\") " pod="openstack/ovn-openstack-openstack-cell1-f6s64" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.873898 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/53184db4-ff0a-431e-bbbc-cc6295558c83-inventory\") pod \"ovn-openstack-openstack-cell1-f6s64\" (UID: \"53184db4-ff0a-431e-bbbc-cc6295558c83\") " pod="openstack/ovn-openstack-openstack-cell1-f6s64" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.874113 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53184db4-ff0a-431e-bbbc-cc6295558c83-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-cell1-f6s64\" (UID: \"53184db4-ff0a-431e-bbbc-cc6295558c83\") " pod="openstack/ovn-openstack-openstack-cell1-f6s64" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.881757 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03839446-b063-40d2-b17a-af3b1786f3a5-inventory" (OuterVolumeSpecName: "inventory") pod "03839446-b063-40d2-b17a-af3b1786f3a5" (UID: "03839446-b063-40d2-b17a-af3b1786f3a5"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.977737 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/53184db4-ff0a-431e-bbbc-cc6295558c83-inventory\") pod \"ovn-openstack-openstack-cell1-f6s64\" (UID: \"53184db4-ff0a-431e-bbbc-cc6295558c83\") " pod="openstack/ovn-openstack-openstack-cell1-f6s64" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.977911 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53184db4-ff0a-431e-bbbc-cc6295558c83-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-cell1-f6s64\" (UID: \"53184db4-ff0a-431e-bbbc-cc6295558c83\") " pod="openstack/ovn-openstack-openstack-cell1-f6s64" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.978005 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/53184db4-ff0a-431e-bbbc-cc6295558c83-ovncontroller-config-0\") pod \"ovn-openstack-openstack-cell1-f6s64\" (UID: \"53184db4-ff0a-431e-bbbc-cc6295558c83\") " pod="openstack/ovn-openstack-openstack-cell1-f6s64" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.978056 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/53184db4-ff0a-431e-bbbc-cc6295558c83-ceph\") pod \"ovn-openstack-openstack-cell1-f6s64\" (UID: \"53184db4-ff0a-431e-bbbc-cc6295558c83\") " pod="openstack/ovn-openstack-openstack-cell1-f6s64" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.978102 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/53184db4-ff0a-431e-bbbc-cc6295558c83-ssh-key\") pod \"ovn-openstack-openstack-cell1-f6s64\" (UID: \"53184db4-ff0a-431e-bbbc-cc6295558c83\") " pod="openstack/ovn-openstack-openstack-cell1-f6s64" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.978169 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mktps\" (UniqueName: \"kubernetes.io/projected/53184db4-ff0a-431e-bbbc-cc6295558c83-kube-api-access-mktps\") pod \"ovn-openstack-openstack-cell1-f6s64\" (UID: \"53184db4-ff0a-431e-bbbc-cc6295558c83\") " pod="openstack/ovn-openstack-openstack-cell1-f6s64" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.978381 5050 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/03839446-b063-40d2-b17a-af3b1786f3a5-inventory\") on node \"crc\" DevicePath \"\"" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.979093 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/53184db4-ff0a-431e-bbbc-cc6295558c83-ovncontroller-config-0\") pod \"ovn-openstack-openstack-cell1-f6s64\" (UID: \"53184db4-ff0a-431e-bbbc-cc6295558c83\") " pod="openstack/ovn-openstack-openstack-cell1-f6s64" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.982739 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/53184db4-ff0a-431e-bbbc-cc6295558c83-inventory\") pod \"ovn-openstack-openstack-cell1-f6s64\" (UID: \"53184db4-ff0a-431e-bbbc-cc6295558c83\") " pod="openstack/ovn-openstack-openstack-cell1-f6s64" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.982848 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/53184db4-ff0a-431e-bbbc-cc6295558c83-ceph\") pod \"ovn-openstack-openstack-cell1-f6s64\" (UID: \"53184db4-ff0a-431e-bbbc-cc6295558c83\") " pod="openstack/ovn-openstack-openstack-cell1-f6s64" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.983479 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/53184db4-ff0a-431e-bbbc-cc6295558c83-ssh-key\") pod \"ovn-openstack-openstack-cell1-f6s64\" (UID: \"53184db4-ff0a-431e-bbbc-cc6295558c83\") " pod="openstack/ovn-openstack-openstack-cell1-f6s64" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.983810 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53184db4-ff0a-431e-bbbc-cc6295558c83-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-cell1-f6s64\" (UID: \"53184db4-ff0a-431e-bbbc-cc6295558c83\") " pod="openstack/ovn-openstack-openstack-cell1-f6s64" Nov 23 16:49:21 crc kubenswrapper[5050]: I1123 16:49:21.995014 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mktps\" (UniqueName: \"kubernetes.io/projected/53184db4-ff0a-431e-bbbc-cc6295558c83-kube-api-access-mktps\") pod \"ovn-openstack-openstack-cell1-f6s64\" (UID: \"53184db4-ff0a-431e-bbbc-cc6295558c83\") " pod="openstack/ovn-openstack-openstack-cell1-f6s64" Nov 23 16:49:22 crc kubenswrapper[5050]: I1123 16:49:22.076831 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-f6s64" Nov 23 16:49:22 crc kubenswrapper[5050]: I1123 16:49:22.495570 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-openstack-openstack-cell1-f6s64"] Nov 23 16:49:22 crc kubenswrapper[5050]: I1123 16:49:22.618662 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-f6s64" event={"ID":"53184db4-ff0a-431e-bbbc-cc6295558c83","Type":"ContainerStarted","Data":"0aa58320ee448b6cdd32e5d0e297dc8bb409c95aff6b81144b7f342ed4d9818a"} Nov 23 16:49:23 crc kubenswrapper[5050]: I1123 16:49:23.633018 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-f6s64" event={"ID":"53184db4-ff0a-431e-bbbc-cc6295558c83","Type":"ContainerStarted","Data":"6b0cc7619c5fbfcce61edb4d93bcd713105f345c5141fe99183d53a4529ca8d5"} Nov 23 16:49:23 crc kubenswrapper[5050]: I1123 16:49:23.668327 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-openstack-openstack-cell1-f6s64" podStartSLOduration=2.159878107 podStartE2EDuration="2.668299839s" podCreationTimestamp="2025-11-23 16:49:21 +0000 UTC" firstStartedPulling="2025-11-23 16:49:22.504788057 +0000 UTC m=+7657.671784542" lastFinishedPulling="2025-11-23 16:49:23.013209739 +0000 UTC m=+7658.180206274" observedRunningTime="2025-11-23 16:49:23.663203094 +0000 UTC m=+7658.830199579" watchObservedRunningTime="2025-11-23 16:49:23.668299839 +0000 UTC m=+7658.835296324" Nov 23 16:49:24 crc kubenswrapper[5050]: I1123 16:49:24.549722 5050 scope.go:117] "RemoveContainer" containerID="d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76" Nov 23 16:49:24 crc kubenswrapper[5050]: E1123 16:49:24.551061 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:49:38 crc kubenswrapper[5050]: I1123 16:49:38.573891 5050 scope.go:117] "RemoveContainer" containerID="d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76" Nov 23 16:49:38 crc kubenswrapper[5050]: E1123 16:49:38.575599 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:49:53 crc kubenswrapper[5050]: I1123 16:49:53.550824 5050 scope.go:117] "RemoveContainer" containerID="d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76" Nov 23 16:49:53 crc kubenswrapper[5050]: E1123 16:49:53.552072 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:50:06 crc kubenswrapper[5050]: I1123 16:50:06.550179 5050 scope.go:117] "RemoveContainer" containerID="d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76" Nov 23 16:50:06 crc kubenswrapper[5050]: E1123 16:50:06.551400 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:50:21 crc kubenswrapper[5050]: I1123 16:50:21.549784 5050 scope.go:117] "RemoveContainer" containerID="d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76" Nov 23 16:50:21 crc kubenswrapper[5050]: E1123 16:50:21.550654 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:50:32 crc kubenswrapper[5050]: I1123 16:50:32.614823 5050 generic.go:334] "Generic (PLEG): container finished" podID="53184db4-ff0a-431e-bbbc-cc6295558c83" containerID="6b0cc7619c5fbfcce61edb4d93bcd713105f345c5141fe99183d53a4529ca8d5" exitCode=0 Nov 23 16:50:32 crc kubenswrapper[5050]: I1123 16:50:32.615096 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-f6s64" event={"ID":"53184db4-ff0a-431e-bbbc-cc6295558c83","Type":"ContainerDied","Data":"6b0cc7619c5fbfcce61edb4d93bcd713105f345c5141fe99183d53a4529ca8d5"} Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.284235 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-f6s64" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.320118 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mktps\" (UniqueName: \"kubernetes.io/projected/53184db4-ff0a-431e-bbbc-cc6295558c83-kube-api-access-mktps\") pod \"53184db4-ff0a-431e-bbbc-cc6295558c83\" (UID: \"53184db4-ff0a-431e-bbbc-cc6295558c83\") " Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.320179 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/53184db4-ff0a-431e-bbbc-cc6295558c83-ovncontroller-config-0\") pod \"53184db4-ff0a-431e-bbbc-cc6295558c83\" (UID: \"53184db4-ff0a-431e-bbbc-cc6295558c83\") " Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.320307 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/53184db4-ff0a-431e-bbbc-cc6295558c83-ceph\") pod \"53184db4-ff0a-431e-bbbc-cc6295558c83\" (UID: \"53184db4-ff0a-431e-bbbc-cc6295558c83\") " Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.320376 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53184db4-ff0a-431e-bbbc-cc6295558c83-ovn-combined-ca-bundle\") pod \"53184db4-ff0a-431e-bbbc-cc6295558c83\" (UID: \"53184db4-ff0a-431e-bbbc-cc6295558c83\") " Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.320437 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/53184db4-ff0a-431e-bbbc-cc6295558c83-ssh-key\") pod \"53184db4-ff0a-431e-bbbc-cc6295558c83\" (UID: \"53184db4-ff0a-431e-bbbc-cc6295558c83\") " Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.320476 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/53184db4-ff0a-431e-bbbc-cc6295558c83-inventory\") pod \"53184db4-ff0a-431e-bbbc-cc6295558c83\" (UID: \"53184db4-ff0a-431e-bbbc-cc6295558c83\") " Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.339497 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53184db4-ff0a-431e-bbbc-cc6295558c83-ceph" (OuterVolumeSpecName: "ceph") pod "53184db4-ff0a-431e-bbbc-cc6295558c83" (UID: "53184db4-ff0a-431e-bbbc-cc6295558c83"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.340359 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53184db4-ff0a-431e-bbbc-cc6295558c83-kube-api-access-mktps" (OuterVolumeSpecName: "kube-api-access-mktps") pod "53184db4-ff0a-431e-bbbc-cc6295558c83" (UID: "53184db4-ff0a-431e-bbbc-cc6295558c83"). InnerVolumeSpecName "kube-api-access-mktps". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.352148 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53184db4-ff0a-431e-bbbc-cc6295558c83-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "53184db4-ff0a-431e-bbbc-cc6295558c83" (UID: "53184db4-ff0a-431e-bbbc-cc6295558c83"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.359921 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/53184db4-ff0a-431e-bbbc-cc6295558c83-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "53184db4-ff0a-431e-bbbc-cc6295558c83" (UID: "53184db4-ff0a-431e-bbbc-cc6295558c83"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.378260 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53184db4-ff0a-431e-bbbc-cc6295558c83-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "53184db4-ff0a-431e-bbbc-cc6295558c83" (UID: "53184db4-ff0a-431e-bbbc-cc6295558c83"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.378590 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53184db4-ff0a-431e-bbbc-cc6295558c83-inventory" (OuterVolumeSpecName: "inventory") pod "53184db4-ff0a-431e-bbbc-cc6295558c83" (UID: "53184db4-ff0a-431e-bbbc-cc6295558c83"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.424741 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mktps\" (UniqueName: \"kubernetes.io/projected/53184db4-ff0a-431e-bbbc-cc6295558c83-kube-api-access-mktps\") on node \"crc\" DevicePath \"\"" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.424794 5050 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/53184db4-ff0a-431e-bbbc-cc6295558c83-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.425120 5050 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/53184db4-ff0a-431e-bbbc-cc6295558c83-ceph\") on node \"crc\" DevicePath \"\"" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.425143 5050 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53184db4-ff0a-431e-bbbc-cc6295558c83-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.425152 5050 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/53184db4-ff0a-431e-bbbc-cc6295558c83-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.425396 5050 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/53184db4-ff0a-431e-bbbc-cc6295558c83-inventory\") on node \"crc\" DevicePath \"\"" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.652598 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-f6s64" event={"ID":"53184db4-ff0a-431e-bbbc-cc6295558c83","Type":"ContainerDied","Data":"0aa58320ee448b6cdd32e5d0e297dc8bb409c95aff6b81144b7f342ed4d9818a"} Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.652670 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0aa58320ee448b6cdd32e5d0e297dc8bb409c95aff6b81144b7f342ed4d9818a" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.652708 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-f6s64" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.784584 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-openstack-openstack-cell1-zp8dl"] Nov 23 16:50:34 crc kubenswrapper[5050]: E1123 16:50:34.785345 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53184db4-ff0a-431e-bbbc-cc6295558c83" containerName="ovn-openstack-openstack-cell1" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.785378 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="53184db4-ff0a-431e-bbbc-cc6295558c83" containerName="ovn-openstack-openstack-cell1" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.785719 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="53184db4-ff0a-431e-bbbc-cc6295558c83" containerName="ovn-openstack-openstack-cell1" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.786645 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.790406 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-ckrpf" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.791032 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.794054 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.794939 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.795485 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.798277 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.821241 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-openstack-openstack-cell1-zp8dl"] Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.938520 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8hn6\" (UniqueName: \"kubernetes.io/projected/1b78d9c7-ae76-471e-a3a5-04939c991c97-kube-api-access-m8hn6\") pod \"neutron-metadata-openstack-openstack-cell1-zp8dl\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.938630 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-inventory\") pod \"neutron-metadata-openstack-openstack-cell1-zp8dl\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.938690 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-zp8dl\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.938783 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-zp8dl\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.938916 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-ssh-key\") pod \"neutron-metadata-openstack-openstack-cell1-zp8dl\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.938964 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-cell1-zp8dl\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" Nov 23 16:50:34 crc kubenswrapper[5050]: I1123 16:50:34.939016 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-ceph\") pod \"neutron-metadata-openstack-openstack-cell1-zp8dl\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" Nov 23 16:50:35 crc kubenswrapper[5050]: I1123 16:50:35.042626 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-ssh-key\") pod \"neutron-metadata-openstack-openstack-cell1-zp8dl\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" Nov 23 16:50:35 crc kubenswrapper[5050]: I1123 16:50:35.043174 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-cell1-zp8dl\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" Nov 23 16:50:35 crc kubenswrapper[5050]: I1123 16:50:35.043293 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-ceph\") pod \"neutron-metadata-openstack-openstack-cell1-zp8dl\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" Nov 23 16:50:35 crc kubenswrapper[5050]: I1123 16:50:35.043512 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8hn6\" (UniqueName: \"kubernetes.io/projected/1b78d9c7-ae76-471e-a3a5-04939c991c97-kube-api-access-m8hn6\") pod \"neutron-metadata-openstack-openstack-cell1-zp8dl\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" Nov 23 16:50:35 crc kubenswrapper[5050]: I1123 16:50:35.043657 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-inventory\") pod \"neutron-metadata-openstack-openstack-cell1-zp8dl\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" Nov 23 16:50:35 crc kubenswrapper[5050]: I1123 16:50:35.043752 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-zp8dl\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" Nov 23 16:50:35 crc kubenswrapper[5050]: I1123 16:50:35.043859 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-zp8dl\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" Nov 23 16:50:35 crc kubenswrapper[5050]: I1123 16:50:35.048436 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-ssh-key\") pod \"neutron-metadata-openstack-openstack-cell1-zp8dl\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" Nov 23 16:50:35 crc kubenswrapper[5050]: I1123 16:50:35.050392 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-inventory\") pod \"neutron-metadata-openstack-openstack-cell1-zp8dl\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" Nov 23 16:50:35 crc kubenswrapper[5050]: I1123 16:50:35.050695 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-cell1-zp8dl\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" Nov 23 16:50:35 crc kubenswrapper[5050]: I1123 16:50:35.054670 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-zp8dl\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" Nov 23 16:50:35 crc kubenswrapper[5050]: I1123 16:50:35.057867 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-ceph\") pod \"neutron-metadata-openstack-openstack-cell1-zp8dl\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" Nov 23 16:50:35 crc kubenswrapper[5050]: I1123 16:50:35.065743 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-zp8dl\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" Nov 23 16:50:35 crc kubenswrapper[5050]: I1123 16:50:35.075924 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8hn6\" (UniqueName: \"kubernetes.io/projected/1b78d9c7-ae76-471e-a3a5-04939c991c97-kube-api-access-m8hn6\") pod \"neutron-metadata-openstack-openstack-cell1-zp8dl\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" Nov 23 16:50:35 crc kubenswrapper[5050]: I1123 16:50:35.125698 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" Nov 23 16:50:35 crc kubenswrapper[5050]: I1123 16:50:35.564970 5050 scope.go:117] "RemoveContainer" containerID="d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76" Nov 23 16:50:35 crc kubenswrapper[5050]: E1123 16:50:35.565848 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:50:35 crc kubenswrapper[5050]: I1123 16:50:35.759523 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-openstack-openstack-cell1-zp8dl"] Nov 23 16:50:36 crc kubenswrapper[5050]: I1123 16:50:36.710229 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" event={"ID":"1b78d9c7-ae76-471e-a3a5-04939c991c97","Type":"ContainerStarted","Data":"aaaada40f7f35c24477ab8df6fdb47d4b31b6905c7442d809cf5c8cb016be43e"} Nov 23 16:50:36 crc kubenswrapper[5050]: I1123 16:50:36.710746 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" event={"ID":"1b78d9c7-ae76-471e-a3a5-04939c991c97","Type":"ContainerStarted","Data":"12fb4ddcbd1536aa1bf9d800afbcf818bf65abf1f6cbc0ead46e46438354ca8b"} Nov 23 16:50:36 crc kubenswrapper[5050]: I1123 16:50:36.743811 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" podStartSLOduration=2.244805131 podStartE2EDuration="2.743788926s" podCreationTimestamp="2025-11-23 16:50:34 +0000 UTC" firstStartedPulling="2025-11-23 16:50:35.796414497 +0000 UTC m=+7730.963410982" lastFinishedPulling="2025-11-23 16:50:36.295398282 +0000 UTC m=+7731.462394777" observedRunningTime="2025-11-23 16:50:36.734531514 +0000 UTC m=+7731.901528009" watchObservedRunningTime="2025-11-23 16:50:36.743788926 +0000 UTC m=+7731.910785421" Nov 23 16:50:37 crc kubenswrapper[5050]: I1123 16:50:37.674883 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-8xj47"] Nov 23 16:50:37 crc kubenswrapper[5050]: I1123 16:50:37.678836 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8xj47" Nov 23 16:50:37 crc kubenswrapper[5050]: I1123 16:50:37.682470 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8xj47"] Nov 23 16:50:37 crc kubenswrapper[5050]: I1123 16:50:37.727256 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x2wp8\" (UniqueName: \"kubernetes.io/projected/7edaa5c7-3b73-4d30-9f07-2d1f347ea473-kube-api-access-x2wp8\") pod \"community-operators-8xj47\" (UID: \"7edaa5c7-3b73-4d30-9f07-2d1f347ea473\") " pod="openshift-marketplace/community-operators-8xj47" Nov 23 16:50:37 crc kubenswrapper[5050]: I1123 16:50:37.727860 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7edaa5c7-3b73-4d30-9f07-2d1f347ea473-utilities\") pod \"community-operators-8xj47\" (UID: \"7edaa5c7-3b73-4d30-9f07-2d1f347ea473\") " pod="openshift-marketplace/community-operators-8xj47" Nov 23 16:50:37 crc kubenswrapper[5050]: I1123 16:50:37.727948 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7edaa5c7-3b73-4d30-9f07-2d1f347ea473-catalog-content\") pod \"community-operators-8xj47\" (UID: \"7edaa5c7-3b73-4d30-9f07-2d1f347ea473\") " pod="openshift-marketplace/community-operators-8xj47" Nov 23 16:50:37 crc kubenswrapper[5050]: I1123 16:50:37.831083 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7edaa5c7-3b73-4d30-9f07-2d1f347ea473-utilities\") pod \"community-operators-8xj47\" (UID: \"7edaa5c7-3b73-4d30-9f07-2d1f347ea473\") " pod="openshift-marketplace/community-operators-8xj47" Nov 23 16:50:37 crc kubenswrapper[5050]: I1123 16:50:37.831192 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7edaa5c7-3b73-4d30-9f07-2d1f347ea473-catalog-content\") pod \"community-operators-8xj47\" (UID: \"7edaa5c7-3b73-4d30-9f07-2d1f347ea473\") " pod="openshift-marketplace/community-operators-8xj47" Nov 23 16:50:37 crc kubenswrapper[5050]: I1123 16:50:37.831317 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x2wp8\" (UniqueName: \"kubernetes.io/projected/7edaa5c7-3b73-4d30-9f07-2d1f347ea473-kube-api-access-x2wp8\") pod \"community-operators-8xj47\" (UID: \"7edaa5c7-3b73-4d30-9f07-2d1f347ea473\") " pod="openshift-marketplace/community-operators-8xj47" Nov 23 16:50:37 crc kubenswrapper[5050]: I1123 16:50:37.832409 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7edaa5c7-3b73-4d30-9f07-2d1f347ea473-catalog-content\") pod \"community-operators-8xj47\" (UID: \"7edaa5c7-3b73-4d30-9f07-2d1f347ea473\") " pod="openshift-marketplace/community-operators-8xj47" Nov 23 16:50:37 crc kubenswrapper[5050]: I1123 16:50:37.832475 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7edaa5c7-3b73-4d30-9f07-2d1f347ea473-utilities\") pod \"community-operators-8xj47\" (UID: \"7edaa5c7-3b73-4d30-9f07-2d1f347ea473\") " pod="openshift-marketplace/community-operators-8xj47" Nov 23 16:50:37 crc kubenswrapper[5050]: I1123 16:50:37.869549 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x2wp8\" (UniqueName: \"kubernetes.io/projected/7edaa5c7-3b73-4d30-9f07-2d1f347ea473-kube-api-access-x2wp8\") pod \"community-operators-8xj47\" (UID: \"7edaa5c7-3b73-4d30-9f07-2d1f347ea473\") " pod="openshift-marketplace/community-operators-8xj47" Nov 23 16:50:38 crc kubenswrapper[5050]: I1123 16:50:38.014687 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8xj47" Nov 23 16:50:38 crc kubenswrapper[5050]: I1123 16:50:38.593131 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8xj47"] Nov 23 16:50:38 crc kubenswrapper[5050]: W1123 16:50:38.597781 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7edaa5c7_3b73_4d30_9f07_2d1f347ea473.slice/crio-8cd278cf32093e8c1d31e560104b631fb6b65e5d998abb6aaa496c49bce01692 WatchSource:0}: Error finding container 8cd278cf32093e8c1d31e560104b631fb6b65e5d998abb6aaa496c49bce01692: Status 404 returned error can't find the container with id 8cd278cf32093e8c1d31e560104b631fb6b65e5d998abb6aaa496c49bce01692 Nov 23 16:50:38 crc kubenswrapper[5050]: I1123 16:50:38.740533 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8xj47" event={"ID":"7edaa5c7-3b73-4d30-9f07-2d1f347ea473","Type":"ContainerStarted","Data":"8cd278cf32093e8c1d31e560104b631fb6b65e5d998abb6aaa496c49bce01692"} Nov 23 16:50:39 crc kubenswrapper[5050]: I1123 16:50:39.760107 5050 generic.go:334] "Generic (PLEG): container finished" podID="7edaa5c7-3b73-4d30-9f07-2d1f347ea473" containerID="4847c169740162ebf5a27a054f906b496d35b6281be5b051abbaa51bd131b96e" exitCode=0 Nov 23 16:50:39 crc kubenswrapper[5050]: I1123 16:50:39.760188 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8xj47" event={"ID":"7edaa5c7-3b73-4d30-9f07-2d1f347ea473","Type":"ContainerDied","Data":"4847c169740162ebf5a27a054f906b496d35b6281be5b051abbaa51bd131b96e"} Nov 23 16:50:40 crc kubenswrapper[5050]: I1123 16:50:40.777661 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8xj47" event={"ID":"7edaa5c7-3b73-4d30-9f07-2d1f347ea473","Type":"ContainerStarted","Data":"533b640b5e3a4a10e9f7ed965caed25630b5a4c8e062f0d309a1345cbd6f2283"} Nov 23 16:50:41 crc kubenswrapper[5050]: I1123 16:50:41.796916 5050 generic.go:334] "Generic (PLEG): container finished" podID="7edaa5c7-3b73-4d30-9f07-2d1f347ea473" containerID="533b640b5e3a4a10e9f7ed965caed25630b5a4c8e062f0d309a1345cbd6f2283" exitCode=0 Nov 23 16:50:41 crc kubenswrapper[5050]: I1123 16:50:41.797369 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8xj47" event={"ID":"7edaa5c7-3b73-4d30-9f07-2d1f347ea473","Type":"ContainerDied","Data":"533b640b5e3a4a10e9f7ed965caed25630b5a4c8e062f0d309a1345cbd6f2283"} Nov 23 16:50:42 crc kubenswrapper[5050]: I1123 16:50:42.811963 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8xj47" event={"ID":"7edaa5c7-3b73-4d30-9f07-2d1f347ea473","Type":"ContainerStarted","Data":"17ee25e05e5b9804fb5f4b5b1dcfdd51d856097eab21ec8175d3438003f4feb9"} Nov 23 16:50:42 crc kubenswrapper[5050]: I1123 16:50:42.837347 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-8xj47" podStartSLOduration=3.344609562 podStartE2EDuration="5.837319022s" podCreationTimestamp="2025-11-23 16:50:37 +0000 UTC" firstStartedPulling="2025-11-23 16:50:39.76230304 +0000 UTC m=+7734.929299525" lastFinishedPulling="2025-11-23 16:50:42.2550125 +0000 UTC m=+7737.422008985" observedRunningTime="2025-11-23 16:50:42.832384223 +0000 UTC m=+7737.999380748" watchObservedRunningTime="2025-11-23 16:50:42.837319022 +0000 UTC m=+7738.004315517" Nov 23 16:50:46 crc kubenswrapper[5050]: I1123 16:50:46.549776 5050 scope.go:117] "RemoveContainer" containerID="d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76" Nov 23 16:50:46 crc kubenswrapper[5050]: E1123 16:50:46.550926 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:50:48 crc kubenswrapper[5050]: I1123 16:50:48.015437 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-8xj47" Nov 23 16:50:48 crc kubenswrapper[5050]: I1123 16:50:48.016134 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-8xj47" Nov 23 16:50:48 crc kubenswrapper[5050]: I1123 16:50:48.102584 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-8xj47" Nov 23 16:50:48 crc kubenswrapper[5050]: I1123 16:50:48.994097 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-8xj47" Nov 23 16:50:49 crc kubenswrapper[5050]: I1123 16:50:49.072918 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8xj47"] Nov 23 16:50:50 crc kubenswrapper[5050]: I1123 16:50:50.921420 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-8xj47" podUID="7edaa5c7-3b73-4d30-9f07-2d1f347ea473" containerName="registry-server" containerID="cri-o://17ee25e05e5b9804fb5f4b5b1dcfdd51d856097eab21ec8175d3438003f4feb9" gracePeriod=2 Nov 23 16:50:51 crc kubenswrapper[5050]: I1123 16:50:51.517858 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8xj47" Nov 23 16:50:51 crc kubenswrapper[5050]: I1123 16:50:51.646833 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7edaa5c7-3b73-4d30-9f07-2d1f347ea473-utilities\") pod \"7edaa5c7-3b73-4d30-9f07-2d1f347ea473\" (UID: \"7edaa5c7-3b73-4d30-9f07-2d1f347ea473\") " Nov 23 16:50:51 crc kubenswrapper[5050]: I1123 16:50:51.646949 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7edaa5c7-3b73-4d30-9f07-2d1f347ea473-catalog-content\") pod \"7edaa5c7-3b73-4d30-9f07-2d1f347ea473\" (UID: \"7edaa5c7-3b73-4d30-9f07-2d1f347ea473\") " Nov 23 16:50:51 crc kubenswrapper[5050]: I1123 16:50:51.647487 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2wp8\" (UniqueName: \"kubernetes.io/projected/7edaa5c7-3b73-4d30-9f07-2d1f347ea473-kube-api-access-x2wp8\") pod \"7edaa5c7-3b73-4d30-9f07-2d1f347ea473\" (UID: \"7edaa5c7-3b73-4d30-9f07-2d1f347ea473\") " Nov 23 16:50:51 crc kubenswrapper[5050]: I1123 16:50:51.648499 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7edaa5c7-3b73-4d30-9f07-2d1f347ea473-utilities" (OuterVolumeSpecName: "utilities") pod "7edaa5c7-3b73-4d30-9f07-2d1f347ea473" (UID: "7edaa5c7-3b73-4d30-9f07-2d1f347ea473"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:50:51 crc kubenswrapper[5050]: I1123 16:50:51.660936 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7edaa5c7-3b73-4d30-9f07-2d1f347ea473-kube-api-access-x2wp8" (OuterVolumeSpecName: "kube-api-access-x2wp8") pod "7edaa5c7-3b73-4d30-9f07-2d1f347ea473" (UID: "7edaa5c7-3b73-4d30-9f07-2d1f347ea473"). InnerVolumeSpecName "kube-api-access-x2wp8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:50:51 crc kubenswrapper[5050]: I1123 16:50:51.752219 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7edaa5c7-3b73-4d30-9f07-2d1f347ea473-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7edaa5c7-3b73-4d30-9f07-2d1f347ea473" (UID: "7edaa5c7-3b73-4d30-9f07-2d1f347ea473"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:50:51 crc kubenswrapper[5050]: I1123 16:50:51.753316 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7edaa5c7-3b73-4d30-9f07-2d1f347ea473-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 16:50:51 crc kubenswrapper[5050]: I1123 16:50:51.753625 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7edaa5c7-3b73-4d30-9f07-2d1f347ea473-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 16:50:51 crc kubenswrapper[5050]: I1123 16:50:51.753811 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2wp8\" (UniqueName: \"kubernetes.io/projected/7edaa5c7-3b73-4d30-9f07-2d1f347ea473-kube-api-access-x2wp8\") on node \"crc\" DevicePath \"\"" Nov 23 16:50:51 crc kubenswrapper[5050]: I1123 16:50:51.936973 5050 generic.go:334] "Generic (PLEG): container finished" podID="7edaa5c7-3b73-4d30-9f07-2d1f347ea473" containerID="17ee25e05e5b9804fb5f4b5b1dcfdd51d856097eab21ec8175d3438003f4feb9" exitCode=0 Nov 23 16:50:51 crc kubenswrapper[5050]: I1123 16:50:51.937045 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8xj47" event={"ID":"7edaa5c7-3b73-4d30-9f07-2d1f347ea473","Type":"ContainerDied","Data":"17ee25e05e5b9804fb5f4b5b1dcfdd51d856097eab21ec8175d3438003f4feb9"} Nov 23 16:50:51 crc kubenswrapper[5050]: I1123 16:50:51.937089 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8xj47" event={"ID":"7edaa5c7-3b73-4d30-9f07-2d1f347ea473","Type":"ContainerDied","Data":"8cd278cf32093e8c1d31e560104b631fb6b65e5d998abb6aaa496c49bce01692"} Nov 23 16:50:51 crc kubenswrapper[5050]: I1123 16:50:51.937123 5050 scope.go:117] "RemoveContainer" containerID="17ee25e05e5b9804fb5f4b5b1dcfdd51d856097eab21ec8175d3438003f4feb9" Nov 23 16:50:51 crc kubenswrapper[5050]: I1123 16:50:51.937369 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8xj47" Nov 23 16:50:51 crc kubenswrapper[5050]: I1123 16:50:51.973803 5050 scope.go:117] "RemoveContainer" containerID="533b640b5e3a4a10e9f7ed965caed25630b5a4c8e062f0d309a1345cbd6f2283" Nov 23 16:50:51 crc kubenswrapper[5050]: I1123 16:50:51.996891 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8xj47"] Nov 23 16:50:52 crc kubenswrapper[5050]: I1123 16:50:52.012245 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-8xj47"] Nov 23 16:50:52 crc kubenswrapper[5050]: I1123 16:50:52.028071 5050 scope.go:117] "RemoveContainer" containerID="4847c169740162ebf5a27a054f906b496d35b6281be5b051abbaa51bd131b96e" Nov 23 16:50:52 crc kubenswrapper[5050]: I1123 16:50:52.097321 5050 scope.go:117] "RemoveContainer" containerID="17ee25e05e5b9804fb5f4b5b1dcfdd51d856097eab21ec8175d3438003f4feb9" Nov 23 16:50:52 crc kubenswrapper[5050]: E1123 16:50:52.099686 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"17ee25e05e5b9804fb5f4b5b1dcfdd51d856097eab21ec8175d3438003f4feb9\": container with ID starting with 17ee25e05e5b9804fb5f4b5b1dcfdd51d856097eab21ec8175d3438003f4feb9 not found: ID does not exist" containerID="17ee25e05e5b9804fb5f4b5b1dcfdd51d856097eab21ec8175d3438003f4feb9" Nov 23 16:50:52 crc kubenswrapper[5050]: I1123 16:50:52.099772 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17ee25e05e5b9804fb5f4b5b1dcfdd51d856097eab21ec8175d3438003f4feb9"} err="failed to get container status \"17ee25e05e5b9804fb5f4b5b1dcfdd51d856097eab21ec8175d3438003f4feb9\": rpc error: code = NotFound desc = could not find container \"17ee25e05e5b9804fb5f4b5b1dcfdd51d856097eab21ec8175d3438003f4feb9\": container with ID starting with 17ee25e05e5b9804fb5f4b5b1dcfdd51d856097eab21ec8175d3438003f4feb9 not found: ID does not exist" Nov 23 16:50:52 crc kubenswrapper[5050]: I1123 16:50:52.099829 5050 scope.go:117] "RemoveContainer" containerID="533b640b5e3a4a10e9f7ed965caed25630b5a4c8e062f0d309a1345cbd6f2283" Nov 23 16:50:52 crc kubenswrapper[5050]: E1123 16:50:52.100396 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"533b640b5e3a4a10e9f7ed965caed25630b5a4c8e062f0d309a1345cbd6f2283\": container with ID starting with 533b640b5e3a4a10e9f7ed965caed25630b5a4c8e062f0d309a1345cbd6f2283 not found: ID does not exist" containerID="533b640b5e3a4a10e9f7ed965caed25630b5a4c8e062f0d309a1345cbd6f2283" Nov 23 16:50:52 crc kubenswrapper[5050]: I1123 16:50:52.100486 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"533b640b5e3a4a10e9f7ed965caed25630b5a4c8e062f0d309a1345cbd6f2283"} err="failed to get container status \"533b640b5e3a4a10e9f7ed965caed25630b5a4c8e062f0d309a1345cbd6f2283\": rpc error: code = NotFound desc = could not find container \"533b640b5e3a4a10e9f7ed965caed25630b5a4c8e062f0d309a1345cbd6f2283\": container with ID starting with 533b640b5e3a4a10e9f7ed965caed25630b5a4c8e062f0d309a1345cbd6f2283 not found: ID does not exist" Nov 23 16:50:52 crc kubenswrapper[5050]: I1123 16:50:52.100521 5050 scope.go:117] "RemoveContainer" containerID="4847c169740162ebf5a27a054f906b496d35b6281be5b051abbaa51bd131b96e" Nov 23 16:50:52 crc kubenswrapper[5050]: E1123 16:50:52.100903 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4847c169740162ebf5a27a054f906b496d35b6281be5b051abbaa51bd131b96e\": container with ID starting with 4847c169740162ebf5a27a054f906b496d35b6281be5b051abbaa51bd131b96e not found: ID does not exist" containerID="4847c169740162ebf5a27a054f906b496d35b6281be5b051abbaa51bd131b96e" Nov 23 16:50:52 crc kubenswrapper[5050]: I1123 16:50:52.100955 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4847c169740162ebf5a27a054f906b496d35b6281be5b051abbaa51bd131b96e"} err="failed to get container status \"4847c169740162ebf5a27a054f906b496d35b6281be5b051abbaa51bd131b96e\": rpc error: code = NotFound desc = could not find container \"4847c169740162ebf5a27a054f906b496d35b6281be5b051abbaa51bd131b96e\": container with ID starting with 4847c169740162ebf5a27a054f906b496d35b6281be5b051abbaa51bd131b96e not found: ID does not exist" Nov 23 16:50:53 crc kubenswrapper[5050]: I1123 16:50:53.576275 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7edaa5c7-3b73-4d30-9f07-2d1f347ea473" path="/var/lib/kubelet/pods/7edaa5c7-3b73-4d30-9f07-2d1f347ea473/volumes" Nov 23 16:51:01 crc kubenswrapper[5050]: I1123 16:51:01.550073 5050 scope.go:117] "RemoveContainer" containerID="d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76" Nov 23 16:51:01 crc kubenswrapper[5050]: E1123 16:51:01.554104 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:51:15 crc kubenswrapper[5050]: I1123 16:51:15.558194 5050 scope.go:117] "RemoveContainer" containerID="d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76" Nov 23 16:51:15 crc kubenswrapper[5050]: E1123 16:51:15.558904 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:51:26 crc kubenswrapper[5050]: I1123 16:51:26.549289 5050 scope.go:117] "RemoveContainer" containerID="d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76" Nov 23 16:51:26 crc kubenswrapper[5050]: E1123 16:51:26.550529 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:51:32 crc kubenswrapper[5050]: I1123 16:51:32.611207 5050 generic.go:334] "Generic (PLEG): container finished" podID="1b78d9c7-ae76-471e-a3a5-04939c991c97" containerID="aaaada40f7f35c24477ab8df6fdb47d4b31b6905c7442d809cf5c8cb016be43e" exitCode=0 Nov 23 16:51:32 crc kubenswrapper[5050]: I1123 16:51:32.612266 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" event={"ID":"1b78d9c7-ae76-471e-a3a5-04939c991c97","Type":"ContainerDied","Data":"aaaada40f7f35c24477ab8df6fdb47d4b31b6905c7442d809cf5c8cb016be43e"} Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.136403 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.233784 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-ceph\") pod \"1b78d9c7-ae76-471e-a3a5-04939c991c97\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.233838 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-neutron-metadata-combined-ca-bundle\") pod \"1b78d9c7-ae76-471e-a3a5-04939c991c97\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.233979 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m8hn6\" (UniqueName: \"kubernetes.io/projected/1b78d9c7-ae76-471e-a3a5-04939c991c97-kube-api-access-m8hn6\") pod \"1b78d9c7-ae76-471e-a3a5-04939c991c97\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.234014 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-neutron-ovn-metadata-agent-neutron-config-0\") pod \"1b78d9c7-ae76-471e-a3a5-04939c991c97\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.234105 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-inventory\") pod \"1b78d9c7-ae76-471e-a3a5-04939c991c97\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.234239 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-ssh-key\") pod \"1b78d9c7-ae76-471e-a3a5-04939c991c97\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.234272 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-nova-metadata-neutron-config-0\") pod \"1b78d9c7-ae76-471e-a3a5-04939c991c97\" (UID: \"1b78d9c7-ae76-471e-a3a5-04939c991c97\") " Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.242869 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "1b78d9c7-ae76-471e-a3a5-04939c991c97" (UID: "1b78d9c7-ae76-471e-a3a5-04939c991c97"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.243082 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-ceph" (OuterVolumeSpecName: "ceph") pod "1b78d9c7-ae76-471e-a3a5-04939c991c97" (UID: "1b78d9c7-ae76-471e-a3a5-04939c991c97"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.249071 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b78d9c7-ae76-471e-a3a5-04939c991c97-kube-api-access-m8hn6" (OuterVolumeSpecName: "kube-api-access-m8hn6") pod "1b78d9c7-ae76-471e-a3a5-04939c991c97" (UID: "1b78d9c7-ae76-471e-a3a5-04939c991c97"). InnerVolumeSpecName "kube-api-access-m8hn6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.272709 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-inventory" (OuterVolumeSpecName: "inventory") pod "1b78d9c7-ae76-471e-a3a5-04939c991c97" (UID: "1b78d9c7-ae76-471e-a3a5-04939c991c97"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.275300 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "1b78d9c7-ae76-471e-a3a5-04939c991c97" (UID: "1b78d9c7-ae76-471e-a3a5-04939c991c97"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.286340 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "1b78d9c7-ae76-471e-a3a5-04939c991c97" (UID: "1b78d9c7-ae76-471e-a3a5-04939c991c97"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.299871 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "1b78d9c7-ae76-471e-a3a5-04939c991c97" (UID: "1b78d9c7-ae76-471e-a3a5-04939c991c97"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.340094 5050 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-inventory\") on node \"crc\" DevicePath \"\"" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.340143 5050 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.340164 5050 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.340186 5050 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-ceph\") on node \"crc\" DevicePath \"\"" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.340204 5050 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.340227 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m8hn6\" (UniqueName: \"kubernetes.io/projected/1b78d9c7-ae76-471e-a3a5-04939c991c97-kube-api-access-m8hn6\") on node \"crc\" DevicePath \"\"" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.340247 5050 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1b78d9c7-ae76-471e-a3a5-04939c991c97-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.637930 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" event={"ID":"1b78d9c7-ae76-471e-a3a5-04939c991c97","Type":"ContainerDied","Data":"12fb4ddcbd1536aa1bf9d800afbcf818bf65abf1f6cbc0ead46e46438354ca8b"} Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.637991 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="12fb4ddcbd1536aa1bf9d800afbcf818bf65abf1f6cbc0ead46e46438354ca8b" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.638053 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-zp8dl" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.914804 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-openstack-openstack-cell1-r7z7f"] Nov 23 16:51:34 crc kubenswrapper[5050]: E1123 16:51:34.915427 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7edaa5c7-3b73-4d30-9f07-2d1f347ea473" containerName="registry-server" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.915469 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="7edaa5c7-3b73-4d30-9f07-2d1f347ea473" containerName="registry-server" Nov 23 16:51:34 crc kubenswrapper[5050]: E1123 16:51:34.915508 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7edaa5c7-3b73-4d30-9f07-2d1f347ea473" containerName="extract-content" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.915516 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="7edaa5c7-3b73-4d30-9f07-2d1f347ea473" containerName="extract-content" Nov 23 16:51:34 crc kubenswrapper[5050]: E1123 16:51:34.915550 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7edaa5c7-3b73-4d30-9f07-2d1f347ea473" containerName="extract-utilities" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.915562 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="7edaa5c7-3b73-4d30-9f07-2d1f347ea473" containerName="extract-utilities" Nov 23 16:51:34 crc kubenswrapper[5050]: E1123 16:51:34.915573 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b78d9c7-ae76-471e-a3a5-04939c991c97" containerName="neutron-metadata-openstack-openstack-cell1" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.915584 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b78d9c7-ae76-471e-a3a5-04939c991c97" containerName="neutron-metadata-openstack-openstack-cell1" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.915853 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="7edaa5c7-3b73-4d30-9f07-2d1f347ea473" containerName="registry-server" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.915885 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b78d9c7-ae76-471e-a3a5-04939c991c97" containerName="neutron-metadata-openstack-openstack-cell1" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.917265 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-r7z7f" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.920585 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.920668 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.920822 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.923462 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.924076 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-ckrpf" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.940875 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-openstack-openstack-cell1-r7z7f"] Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.956935 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-ceph\") pod \"libvirt-openstack-openstack-cell1-r7z7f\" (UID: \"3653bc64-477f-4adc-935a-94f8264ee939\") " pod="openstack/libvirt-openstack-openstack-cell1-r7z7f" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.957015 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mj4t8\" (UniqueName: \"kubernetes.io/projected/3653bc64-477f-4adc-935a-94f8264ee939-kube-api-access-mj4t8\") pod \"libvirt-openstack-openstack-cell1-r7z7f\" (UID: \"3653bc64-477f-4adc-935a-94f8264ee939\") " pod="openstack/libvirt-openstack-openstack-cell1-r7z7f" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.957262 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-inventory\") pod \"libvirt-openstack-openstack-cell1-r7z7f\" (UID: \"3653bc64-477f-4adc-935a-94f8264ee939\") " pod="openstack/libvirt-openstack-openstack-cell1-r7z7f" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.957645 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-libvirt-secret-0\") pod \"libvirt-openstack-openstack-cell1-r7z7f\" (UID: \"3653bc64-477f-4adc-935a-94f8264ee939\") " pod="openstack/libvirt-openstack-openstack-cell1-r7z7f" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.957858 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-ssh-key\") pod \"libvirt-openstack-openstack-cell1-r7z7f\" (UID: \"3653bc64-477f-4adc-935a-94f8264ee939\") " pod="openstack/libvirt-openstack-openstack-cell1-r7z7f" Nov 23 16:51:34 crc kubenswrapper[5050]: I1123 16:51:34.958023 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-libvirt-combined-ca-bundle\") pod \"libvirt-openstack-openstack-cell1-r7z7f\" (UID: \"3653bc64-477f-4adc-935a-94f8264ee939\") " pod="openstack/libvirt-openstack-openstack-cell1-r7z7f" Nov 23 16:51:35 crc kubenswrapper[5050]: I1123 16:51:35.062125 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-ceph\") pod \"libvirt-openstack-openstack-cell1-r7z7f\" (UID: \"3653bc64-477f-4adc-935a-94f8264ee939\") " pod="openstack/libvirt-openstack-openstack-cell1-r7z7f" Nov 23 16:51:35 crc kubenswrapper[5050]: I1123 16:51:35.062215 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mj4t8\" (UniqueName: \"kubernetes.io/projected/3653bc64-477f-4adc-935a-94f8264ee939-kube-api-access-mj4t8\") pod \"libvirt-openstack-openstack-cell1-r7z7f\" (UID: \"3653bc64-477f-4adc-935a-94f8264ee939\") " pod="openstack/libvirt-openstack-openstack-cell1-r7z7f" Nov 23 16:51:35 crc kubenswrapper[5050]: I1123 16:51:35.062317 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-inventory\") pod \"libvirt-openstack-openstack-cell1-r7z7f\" (UID: \"3653bc64-477f-4adc-935a-94f8264ee939\") " pod="openstack/libvirt-openstack-openstack-cell1-r7z7f" Nov 23 16:51:35 crc kubenswrapper[5050]: I1123 16:51:35.062502 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-libvirt-secret-0\") pod \"libvirt-openstack-openstack-cell1-r7z7f\" (UID: \"3653bc64-477f-4adc-935a-94f8264ee939\") " pod="openstack/libvirt-openstack-openstack-cell1-r7z7f" Nov 23 16:51:35 crc kubenswrapper[5050]: I1123 16:51:35.062579 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-ssh-key\") pod \"libvirt-openstack-openstack-cell1-r7z7f\" (UID: \"3653bc64-477f-4adc-935a-94f8264ee939\") " pod="openstack/libvirt-openstack-openstack-cell1-r7z7f" Nov 23 16:51:35 crc kubenswrapper[5050]: I1123 16:51:35.062647 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-libvirt-combined-ca-bundle\") pod \"libvirt-openstack-openstack-cell1-r7z7f\" (UID: \"3653bc64-477f-4adc-935a-94f8264ee939\") " pod="openstack/libvirt-openstack-openstack-cell1-r7z7f" Nov 23 16:51:35 crc kubenswrapper[5050]: I1123 16:51:35.068540 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-libvirt-secret-0\") pod \"libvirt-openstack-openstack-cell1-r7z7f\" (UID: \"3653bc64-477f-4adc-935a-94f8264ee939\") " pod="openstack/libvirt-openstack-openstack-cell1-r7z7f" Nov 23 16:51:35 crc kubenswrapper[5050]: I1123 16:51:35.069624 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-libvirt-combined-ca-bundle\") pod \"libvirt-openstack-openstack-cell1-r7z7f\" (UID: \"3653bc64-477f-4adc-935a-94f8264ee939\") " pod="openstack/libvirt-openstack-openstack-cell1-r7z7f" Nov 23 16:51:35 crc kubenswrapper[5050]: I1123 16:51:35.070423 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-inventory\") pod \"libvirt-openstack-openstack-cell1-r7z7f\" (UID: \"3653bc64-477f-4adc-935a-94f8264ee939\") " pod="openstack/libvirt-openstack-openstack-cell1-r7z7f" Nov 23 16:51:35 crc kubenswrapper[5050]: I1123 16:51:35.072572 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-ceph\") pod \"libvirt-openstack-openstack-cell1-r7z7f\" (UID: \"3653bc64-477f-4adc-935a-94f8264ee939\") " pod="openstack/libvirt-openstack-openstack-cell1-r7z7f" Nov 23 16:51:35 crc kubenswrapper[5050]: I1123 16:51:35.073485 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-ssh-key\") pod \"libvirt-openstack-openstack-cell1-r7z7f\" (UID: \"3653bc64-477f-4adc-935a-94f8264ee939\") " pod="openstack/libvirt-openstack-openstack-cell1-r7z7f" Nov 23 16:51:35 crc kubenswrapper[5050]: I1123 16:51:35.092163 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mj4t8\" (UniqueName: \"kubernetes.io/projected/3653bc64-477f-4adc-935a-94f8264ee939-kube-api-access-mj4t8\") pod \"libvirt-openstack-openstack-cell1-r7z7f\" (UID: \"3653bc64-477f-4adc-935a-94f8264ee939\") " pod="openstack/libvirt-openstack-openstack-cell1-r7z7f" Nov 23 16:51:35 crc kubenswrapper[5050]: I1123 16:51:35.243617 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-r7z7f" Nov 23 16:51:35 crc kubenswrapper[5050]: I1123 16:51:35.869854 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-openstack-openstack-cell1-r7z7f"] Nov 23 16:51:36 crc kubenswrapper[5050]: I1123 16:51:36.683204 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-r7z7f" event={"ID":"3653bc64-477f-4adc-935a-94f8264ee939","Type":"ContainerStarted","Data":"5933d08c682cdd778be8ff652e3c6a5ca2a7bd5301f31b0704a7bf158bcb8970"} Nov 23 16:51:37 crc kubenswrapper[5050]: I1123 16:51:37.725917 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-r7z7f" event={"ID":"3653bc64-477f-4adc-935a-94f8264ee939","Type":"ContainerStarted","Data":"40d768620db1c20e5af6e9b08ecec701617b44508b011bb07f939cca729d9ce7"} Nov 23 16:51:37 crc kubenswrapper[5050]: I1123 16:51:37.753646 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-openstack-openstack-cell1-r7z7f" podStartSLOduration=3.226827338 podStartE2EDuration="3.753612589s" podCreationTimestamp="2025-11-23 16:51:34 +0000 UTC" firstStartedPulling="2025-11-23 16:51:35.876365747 +0000 UTC m=+7791.043362232" lastFinishedPulling="2025-11-23 16:51:36.403150988 +0000 UTC m=+7791.570147483" observedRunningTime="2025-11-23 16:51:37.752792256 +0000 UTC m=+7792.919788741" watchObservedRunningTime="2025-11-23 16:51:37.753612589 +0000 UTC m=+7792.920609114" Nov 23 16:51:39 crc kubenswrapper[5050]: I1123 16:51:39.550487 5050 scope.go:117] "RemoveContainer" containerID="d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76" Nov 23 16:51:39 crc kubenswrapper[5050]: E1123 16:51:39.551828 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:51:51 crc kubenswrapper[5050]: I1123 16:51:51.551138 5050 scope.go:117] "RemoveContainer" containerID="d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76" Nov 23 16:51:51 crc kubenswrapper[5050]: E1123 16:51:51.552323 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:52:03 crc kubenswrapper[5050]: I1123 16:52:03.548865 5050 scope.go:117] "RemoveContainer" containerID="d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76" Nov 23 16:52:03 crc kubenswrapper[5050]: E1123 16:52:03.549967 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:52:17 crc kubenswrapper[5050]: I1123 16:52:17.549661 5050 scope.go:117] "RemoveContainer" containerID="d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76" Nov 23 16:52:17 crc kubenswrapper[5050]: E1123 16:52:17.551953 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:52:29 crc kubenswrapper[5050]: I1123 16:52:29.920270 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-l2m4q"] Nov 23 16:52:29 crc kubenswrapper[5050]: I1123 16:52:29.927093 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l2m4q" Nov 23 16:52:29 crc kubenswrapper[5050]: I1123 16:52:29.954107 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l2m4q"] Nov 23 16:52:30 crc kubenswrapper[5050]: I1123 16:52:30.016072 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tqrnj\" (UniqueName: \"kubernetes.io/projected/475faf5c-a709-4b9a-8c36-b3dc9a1989e8-kube-api-access-tqrnj\") pod \"certified-operators-l2m4q\" (UID: \"475faf5c-a709-4b9a-8c36-b3dc9a1989e8\") " pod="openshift-marketplace/certified-operators-l2m4q" Nov 23 16:52:30 crc kubenswrapper[5050]: I1123 16:52:30.016841 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/475faf5c-a709-4b9a-8c36-b3dc9a1989e8-catalog-content\") pod \"certified-operators-l2m4q\" (UID: \"475faf5c-a709-4b9a-8c36-b3dc9a1989e8\") " pod="openshift-marketplace/certified-operators-l2m4q" Nov 23 16:52:30 crc kubenswrapper[5050]: I1123 16:52:30.016913 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/475faf5c-a709-4b9a-8c36-b3dc9a1989e8-utilities\") pod \"certified-operators-l2m4q\" (UID: \"475faf5c-a709-4b9a-8c36-b3dc9a1989e8\") " pod="openshift-marketplace/certified-operators-l2m4q" Nov 23 16:52:30 crc kubenswrapper[5050]: I1123 16:52:30.119992 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/475faf5c-a709-4b9a-8c36-b3dc9a1989e8-catalog-content\") pod \"certified-operators-l2m4q\" (UID: \"475faf5c-a709-4b9a-8c36-b3dc9a1989e8\") " pod="openshift-marketplace/certified-operators-l2m4q" Nov 23 16:52:30 crc kubenswrapper[5050]: I1123 16:52:30.120068 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/475faf5c-a709-4b9a-8c36-b3dc9a1989e8-utilities\") pod \"certified-operators-l2m4q\" (UID: \"475faf5c-a709-4b9a-8c36-b3dc9a1989e8\") " pod="openshift-marketplace/certified-operators-l2m4q" Nov 23 16:52:30 crc kubenswrapper[5050]: I1123 16:52:30.120179 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tqrnj\" (UniqueName: \"kubernetes.io/projected/475faf5c-a709-4b9a-8c36-b3dc9a1989e8-kube-api-access-tqrnj\") pod \"certified-operators-l2m4q\" (UID: \"475faf5c-a709-4b9a-8c36-b3dc9a1989e8\") " pod="openshift-marketplace/certified-operators-l2m4q" Nov 23 16:52:30 crc kubenswrapper[5050]: I1123 16:52:30.121632 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/475faf5c-a709-4b9a-8c36-b3dc9a1989e8-catalog-content\") pod \"certified-operators-l2m4q\" (UID: \"475faf5c-a709-4b9a-8c36-b3dc9a1989e8\") " pod="openshift-marketplace/certified-operators-l2m4q" Nov 23 16:52:30 crc kubenswrapper[5050]: I1123 16:52:30.121946 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/475faf5c-a709-4b9a-8c36-b3dc9a1989e8-utilities\") pod \"certified-operators-l2m4q\" (UID: \"475faf5c-a709-4b9a-8c36-b3dc9a1989e8\") " pod="openshift-marketplace/certified-operators-l2m4q" Nov 23 16:52:30 crc kubenswrapper[5050]: I1123 16:52:30.160433 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tqrnj\" (UniqueName: \"kubernetes.io/projected/475faf5c-a709-4b9a-8c36-b3dc9a1989e8-kube-api-access-tqrnj\") pod \"certified-operators-l2m4q\" (UID: \"475faf5c-a709-4b9a-8c36-b3dc9a1989e8\") " pod="openshift-marketplace/certified-operators-l2m4q" Nov 23 16:52:30 crc kubenswrapper[5050]: I1123 16:52:30.271636 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l2m4q" Nov 23 16:52:30 crc kubenswrapper[5050]: I1123 16:52:30.549074 5050 scope.go:117] "RemoveContainer" containerID="d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76" Nov 23 16:52:30 crc kubenswrapper[5050]: E1123 16:52:30.549654 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:52:30 crc kubenswrapper[5050]: I1123 16:52:30.826834 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l2m4q"] Nov 23 16:52:31 crc kubenswrapper[5050]: I1123 16:52:31.436282 5050 generic.go:334] "Generic (PLEG): container finished" podID="475faf5c-a709-4b9a-8c36-b3dc9a1989e8" containerID="b27bc6b46f337e516d53b9c81ebd4e4109befa748b1125065ad6736dee37c3f8" exitCode=0 Nov 23 16:52:31 crc kubenswrapper[5050]: I1123 16:52:31.436397 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l2m4q" event={"ID":"475faf5c-a709-4b9a-8c36-b3dc9a1989e8","Type":"ContainerDied","Data":"b27bc6b46f337e516d53b9c81ebd4e4109befa748b1125065ad6736dee37c3f8"} Nov 23 16:52:31 crc kubenswrapper[5050]: I1123 16:52:31.436764 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l2m4q" event={"ID":"475faf5c-a709-4b9a-8c36-b3dc9a1989e8","Type":"ContainerStarted","Data":"8e085133a83a17cbea46db1370dd55d28c5dbd5f4f70d3002306da81e3ff817e"} Nov 23 16:52:33 crc kubenswrapper[5050]: I1123 16:52:33.464919 5050 generic.go:334] "Generic (PLEG): container finished" podID="475faf5c-a709-4b9a-8c36-b3dc9a1989e8" containerID="7aeb3ce798a072489aeed54f7230258524affb8722c8636d6b00cb8203b7054e" exitCode=0 Nov 23 16:52:33 crc kubenswrapper[5050]: I1123 16:52:33.465027 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l2m4q" event={"ID":"475faf5c-a709-4b9a-8c36-b3dc9a1989e8","Type":"ContainerDied","Data":"7aeb3ce798a072489aeed54f7230258524affb8722c8636d6b00cb8203b7054e"} Nov 23 16:52:34 crc kubenswrapper[5050]: I1123 16:52:34.489430 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l2m4q" event={"ID":"475faf5c-a709-4b9a-8c36-b3dc9a1989e8","Type":"ContainerStarted","Data":"7aa8ccd713d6e46640d3469e4b052a2d647ebc3220c5f9f70cb3566ea21b77f2"} Nov 23 16:52:34 crc kubenswrapper[5050]: I1123 16:52:34.526076 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-l2m4q" podStartSLOduration=2.867314185 podStartE2EDuration="5.526044178s" podCreationTimestamp="2025-11-23 16:52:29 +0000 UTC" firstStartedPulling="2025-11-23 16:52:31.439207419 +0000 UTC m=+7846.606203924" lastFinishedPulling="2025-11-23 16:52:34.097937432 +0000 UTC m=+7849.264933917" observedRunningTime="2025-11-23 16:52:34.525149333 +0000 UTC m=+7849.692145868" watchObservedRunningTime="2025-11-23 16:52:34.526044178 +0000 UTC m=+7849.693040703" Nov 23 16:52:40 crc kubenswrapper[5050]: I1123 16:52:40.272701 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-l2m4q" Nov 23 16:52:40 crc kubenswrapper[5050]: I1123 16:52:40.273622 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-l2m4q" Nov 23 16:52:40 crc kubenswrapper[5050]: I1123 16:52:40.355226 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-l2m4q" Nov 23 16:52:40 crc kubenswrapper[5050]: I1123 16:52:40.623234 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-l2m4q" Nov 23 16:52:40 crc kubenswrapper[5050]: I1123 16:52:40.687699 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-l2m4q"] Nov 23 16:52:42 crc kubenswrapper[5050]: I1123 16:52:42.598774 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-l2m4q" podUID="475faf5c-a709-4b9a-8c36-b3dc9a1989e8" containerName="registry-server" containerID="cri-o://7aa8ccd713d6e46640d3469e4b052a2d647ebc3220c5f9f70cb3566ea21b77f2" gracePeriod=2 Nov 23 16:52:43 crc kubenswrapper[5050]: I1123 16:52:43.216775 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l2m4q" Nov 23 16:52:43 crc kubenswrapper[5050]: I1123 16:52:43.346290 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/475faf5c-a709-4b9a-8c36-b3dc9a1989e8-utilities\") pod \"475faf5c-a709-4b9a-8c36-b3dc9a1989e8\" (UID: \"475faf5c-a709-4b9a-8c36-b3dc9a1989e8\") " Nov 23 16:52:43 crc kubenswrapper[5050]: I1123 16:52:43.346542 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/475faf5c-a709-4b9a-8c36-b3dc9a1989e8-catalog-content\") pod \"475faf5c-a709-4b9a-8c36-b3dc9a1989e8\" (UID: \"475faf5c-a709-4b9a-8c36-b3dc9a1989e8\") " Nov 23 16:52:43 crc kubenswrapper[5050]: I1123 16:52:43.346633 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tqrnj\" (UniqueName: \"kubernetes.io/projected/475faf5c-a709-4b9a-8c36-b3dc9a1989e8-kube-api-access-tqrnj\") pod \"475faf5c-a709-4b9a-8c36-b3dc9a1989e8\" (UID: \"475faf5c-a709-4b9a-8c36-b3dc9a1989e8\") " Nov 23 16:52:43 crc kubenswrapper[5050]: I1123 16:52:43.348224 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/475faf5c-a709-4b9a-8c36-b3dc9a1989e8-utilities" (OuterVolumeSpecName: "utilities") pod "475faf5c-a709-4b9a-8c36-b3dc9a1989e8" (UID: "475faf5c-a709-4b9a-8c36-b3dc9a1989e8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:52:43 crc kubenswrapper[5050]: I1123 16:52:43.359422 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/475faf5c-a709-4b9a-8c36-b3dc9a1989e8-kube-api-access-tqrnj" (OuterVolumeSpecName: "kube-api-access-tqrnj") pod "475faf5c-a709-4b9a-8c36-b3dc9a1989e8" (UID: "475faf5c-a709-4b9a-8c36-b3dc9a1989e8"). InnerVolumeSpecName "kube-api-access-tqrnj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:52:43 crc kubenswrapper[5050]: I1123 16:52:43.407565 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/475faf5c-a709-4b9a-8c36-b3dc9a1989e8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "475faf5c-a709-4b9a-8c36-b3dc9a1989e8" (UID: "475faf5c-a709-4b9a-8c36-b3dc9a1989e8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:52:43 crc kubenswrapper[5050]: I1123 16:52:43.449886 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/475faf5c-a709-4b9a-8c36-b3dc9a1989e8-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 16:52:43 crc kubenswrapper[5050]: I1123 16:52:43.449931 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/475faf5c-a709-4b9a-8c36-b3dc9a1989e8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 16:52:43 crc kubenswrapper[5050]: I1123 16:52:43.449952 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tqrnj\" (UniqueName: \"kubernetes.io/projected/475faf5c-a709-4b9a-8c36-b3dc9a1989e8-kube-api-access-tqrnj\") on node \"crc\" DevicePath \"\"" Nov 23 16:52:43 crc kubenswrapper[5050]: I1123 16:52:43.635095 5050 generic.go:334] "Generic (PLEG): container finished" podID="475faf5c-a709-4b9a-8c36-b3dc9a1989e8" containerID="7aa8ccd713d6e46640d3469e4b052a2d647ebc3220c5f9f70cb3566ea21b77f2" exitCode=0 Nov 23 16:52:43 crc kubenswrapper[5050]: I1123 16:52:43.635173 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l2m4q" event={"ID":"475faf5c-a709-4b9a-8c36-b3dc9a1989e8","Type":"ContainerDied","Data":"7aa8ccd713d6e46640d3469e4b052a2d647ebc3220c5f9f70cb3566ea21b77f2"} Nov 23 16:52:43 crc kubenswrapper[5050]: I1123 16:52:43.635237 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l2m4q" event={"ID":"475faf5c-a709-4b9a-8c36-b3dc9a1989e8","Type":"ContainerDied","Data":"8e085133a83a17cbea46db1370dd55d28c5dbd5f4f70d3002306da81e3ff817e"} Nov 23 16:52:43 crc kubenswrapper[5050]: I1123 16:52:43.635231 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l2m4q" Nov 23 16:52:43 crc kubenswrapper[5050]: I1123 16:52:43.635271 5050 scope.go:117] "RemoveContainer" containerID="7aa8ccd713d6e46640d3469e4b052a2d647ebc3220c5f9f70cb3566ea21b77f2" Nov 23 16:52:43 crc kubenswrapper[5050]: I1123 16:52:43.693634 5050 scope.go:117] "RemoveContainer" containerID="7aeb3ce798a072489aeed54f7230258524affb8722c8636d6b00cb8203b7054e" Nov 23 16:52:43 crc kubenswrapper[5050]: I1123 16:52:43.695527 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-l2m4q"] Nov 23 16:52:43 crc kubenswrapper[5050]: I1123 16:52:43.708682 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-l2m4q"] Nov 23 16:52:43 crc kubenswrapper[5050]: I1123 16:52:43.740026 5050 scope.go:117] "RemoveContainer" containerID="b27bc6b46f337e516d53b9c81ebd4e4109befa748b1125065ad6736dee37c3f8" Nov 23 16:52:43 crc kubenswrapper[5050]: I1123 16:52:43.799191 5050 scope.go:117] "RemoveContainer" containerID="7aa8ccd713d6e46640d3469e4b052a2d647ebc3220c5f9f70cb3566ea21b77f2" Nov 23 16:52:43 crc kubenswrapper[5050]: E1123 16:52:43.800253 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7aa8ccd713d6e46640d3469e4b052a2d647ebc3220c5f9f70cb3566ea21b77f2\": container with ID starting with 7aa8ccd713d6e46640d3469e4b052a2d647ebc3220c5f9f70cb3566ea21b77f2 not found: ID does not exist" containerID="7aa8ccd713d6e46640d3469e4b052a2d647ebc3220c5f9f70cb3566ea21b77f2" Nov 23 16:52:43 crc kubenswrapper[5050]: I1123 16:52:43.800343 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7aa8ccd713d6e46640d3469e4b052a2d647ebc3220c5f9f70cb3566ea21b77f2"} err="failed to get container status \"7aa8ccd713d6e46640d3469e4b052a2d647ebc3220c5f9f70cb3566ea21b77f2\": rpc error: code = NotFound desc = could not find container \"7aa8ccd713d6e46640d3469e4b052a2d647ebc3220c5f9f70cb3566ea21b77f2\": container with ID starting with 7aa8ccd713d6e46640d3469e4b052a2d647ebc3220c5f9f70cb3566ea21b77f2 not found: ID does not exist" Nov 23 16:52:43 crc kubenswrapper[5050]: I1123 16:52:43.800388 5050 scope.go:117] "RemoveContainer" containerID="7aeb3ce798a072489aeed54f7230258524affb8722c8636d6b00cb8203b7054e" Nov 23 16:52:43 crc kubenswrapper[5050]: E1123 16:52:43.800855 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7aeb3ce798a072489aeed54f7230258524affb8722c8636d6b00cb8203b7054e\": container with ID starting with 7aeb3ce798a072489aeed54f7230258524affb8722c8636d6b00cb8203b7054e not found: ID does not exist" containerID="7aeb3ce798a072489aeed54f7230258524affb8722c8636d6b00cb8203b7054e" Nov 23 16:52:43 crc kubenswrapper[5050]: I1123 16:52:43.800905 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7aeb3ce798a072489aeed54f7230258524affb8722c8636d6b00cb8203b7054e"} err="failed to get container status \"7aeb3ce798a072489aeed54f7230258524affb8722c8636d6b00cb8203b7054e\": rpc error: code = NotFound desc = could not find container \"7aeb3ce798a072489aeed54f7230258524affb8722c8636d6b00cb8203b7054e\": container with ID starting with 7aeb3ce798a072489aeed54f7230258524affb8722c8636d6b00cb8203b7054e not found: ID does not exist" Nov 23 16:52:43 crc kubenswrapper[5050]: I1123 16:52:43.800940 5050 scope.go:117] "RemoveContainer" containerID="b27bc6b46f337e516d53b9c81ebd4e4109befa748b1125065ad6736dee37c3f8" Nov 23 16:52:43 crc kubenswrapper[5050]: E1123 16:52:43.801399 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b27bc6b46f337e516d53b9c81ebd4e4109befa748b1125065ad6736dee37c3f8\": container with ID starting with b27bc6b46f337e516d53b9c81ebd4e4109befa748b1125065ad6736dee37c3f8 not found: ID does not exist" containerID="b27bc6b46f337e516d53b9c81ebd4e4109befa748b1125065ad6736dee37c3f8" Nov 23 16:52:43 crc kubenswrapper[5050]: I1123 16:52:43.801489 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b27bc6b46f337e516d53b9c81ebd4e4109befa748b1125065ad6736dee37c3f8"} err="failed to get container status \"b27bc6b46f337e516d53b9c81ebd4e4109befa748b1125065ad6736dee37c3f8\": rpc error: code = NotFound desc = could not find container \"b27bc6b46f337e516d53b9c81ebd4e4109befa748b1125065ad6736dee37c3f8\": container with ID starting with b27bc6b46f337e516d53b9c81ebd4e4109befa748b1125065ad6736dee37c3f8 not found: ID does not exist" Nov 23 16:52:45 crc kubenswrapper[5050]: I1123 16:52:45.560769 5050 scope.go:117] "RemoveContainer" containerID="d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76" Nov 23 16:52:45 crc kubenswrapper[5050]: E1123 16:52:45.561787 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:52:45 crc kubenswrapper[5050]: I1123 16:52:45.573813 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="475faf5c-a709-4b9a-8c36-b3dc9a1989e8" path="/var/lib/kubelet/pods/475faf5c-a709-4b9a-8c36-b3dc9a1989e8/volumes" Nov 23 16:52:58 crc kubenswrapper[5050]: I1123 16:52:58.549807 5050 scope.go:117] "RemoveContainer" containerID="d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76" Nov 23 16:52:58 crc kubenswrapper[5050]: E1123 16:52:58.550835 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:53:12 crc kubenswrapper[5050]: I1123 16:53:12.549116 5050 scope.go:117] "RemoveContainer" containerID="d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76" Nov 23 16:53:12 crc kubenswrapper[5050]: E1123 16:53:12.550264 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:53:24 crc kubenswrapper[5050]: I1123 16:53:24.549938 5050 scope.go:117] "RemoveContainer" containerID="d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76" Nov 23 16:53:24 crc kubenswrapper[5050]: E1123 16:53:24.551888 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:53:36 crc kubenswrapper[5050]: I1123 16:53:36.550719 5050 scope.go:117] "RemoveContainer" containerID="d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76" Nov 23 16:53:36 crc kubenswrapper[5050]: E1123 16:53:36.552306 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:53:51 crc kubenswrapper[5050]: I1123 16:53:51.549214 5050 scope.go:117] "RemoveContainer" containerID="d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76" Nov 23 16:53:51 crc kubenswrapper[5050]: E1123 16:53:51.550308 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 16:54:03 crc kubenswrapper[5050]: I1123 16:54:03.549269 5050 scope.go:117] "RemoveContainer" containerID="d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76" Nov 23 16:54:04 crc kubenswrapper[5050]: I1123 16:54:04.487793 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"697b9ae232bf75544badaa8e6059a4b2bd0f542e31a482e7418cd61ae786a3fd"} Nov 23 16:56:22 crc kubenswrapper[5050]: I1123 16:56:22.511441 5050 generic.go:334] "Generic (PLEG): container finished" podID="3653bc64-477f-4adc-935a-94f8264ee939" containerID="40d768620db1c20e5af6e9b08ecec701617b44508b011bb07f939cca729d9ce7" exitCode=0 Nov 23 16:56:22 crc kubenswrapper[5050]: I1123 16:56:22.511908 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-r7z7f" event={"ID":"3653bc64-477f-4adc-935a-94f8264ee939","Type":"ContainerDied","Data":"40d768620db1c20e5af6e9b08ecec701617b44508b011bb07f939cca729d9ce7"} Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.054313 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-r7z7f" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.116535 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-ssh-key\") pod \"3653bc64-477f-4adc-935a-94f8264ee939\" (UID: \"3653bc64-477f-4adc-935a-94f8264ee939\") " Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.116605 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-ceph\") pod \"3653bc64-477f-4adc-935a-94f8264ee939\" (UID: \"3653bc64-477f-4adc-935a-94f8264ee939\") " Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.116920 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-libvirt-secret-0\") pod \"3653bc64-477f-4adc-935a-94f8264ee939\" (UID: \"3653bc64-477f-4adc-935a-94f8264ee939\") " Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.117101 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-inventory\") pod \"3653bc64-477f-4adc-935a-94f8264ee939\" (UID: \"3653bc64-477f-4adc-935a-94f8264ee939\") " Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.117194 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-libvirt-combined-ca-bundle\") pod \"3653bc64-477f-4adc-935a-94f8264ee939\" (UID: \"3653bc64-477f-4adc-935a-94f8264ee939\") " Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.117234 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mj4t8\" (UniqueName: \"kubernetes.io/projected/3653bc64-477f-4adc-935a-94f8264ee939-kube-api-access-mj4t8\") pod \"3653bc64-477f-4adc-935a-94f8264ee939\" (UID: \"3653bc64-477f-4adc-935a-94f8264ee939\") " Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.124295 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-ceph" (OuterVolumeSpecName: "ceph") pod "3653bc64-477f-4adc-935a-94f8264ee939" (UID: "3653bc64-477f-4adc-935a-94f8264ee939"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.124332 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "3653bc64-477f-4adc-935a-94f8264ee939" (UID: "3653bc64-477f-4adc-935a-94f8264ee939"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.124466 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3653bc64-477f-4adc-935a-94f8264ee939-kube-api-access-mj4t8" (OuterVolumeSpecName: "kube-api-access-mj4t8") pod "3653bc64-477f-4adc-935a-94f8264ee939" (UID: "3653bc64-477f-4adc-935a-94f8264ee939"). InnerVolumeSpecName "kube-api-access-mj4t8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.153763 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-inventory" (OuterVolumeSpecName: "inventory") pod "3653bc64-477f-4adc-935a-94f8264ee939" (UID: "3653bc64-477f-4adc-935a-94f8264ee939"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.157540 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "3653bc64-477f-4adc-935a-94f8264ee939" (UID: "3653bc64-477f-4adc-935a-94f8264ee939"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.164641 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "3653bc64-477f-4adc-935a-94f8264ee939" (UID: "3653bc64-477f-4adc-935a-94f8264ee939"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.221038 5050 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.221088 5050 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-inventory\") on node \"crc\" DevicePath \"\"" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.221102 5050 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.221118 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mj4t8\" (UniqueName: \"kubernetes.io/projected/3653bc64-477f-4adc-935a-94f8264ee939-kube-api-access-mj4t8\") on node \"crc\" DevicePath \"\"" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.221135 5050 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.221146 5050 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3653bc64-477f-4adc-935a-94f8264ee939-ceph\") on node \"crc\" DevicePath \"\"" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.538468 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-r7z7f" event={"ID":"3653bc64-477f-4adc-935a-94f8264ee939","Type":"ContainerDied","Data":"5933d08c682cdd778be8ff652e3c6a5ca2a7bd5301f31b0704a7bf158bcb8970"} Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.538532 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5933d08c682cdd778be8ff652e3c6a5ca2a7bd5301f31b0704a7bf158bcb8970" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.538620 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-r7z7f" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.685604 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-openstack-openstack-cell1-28qz2"] Nov 23 16:56:24 crc kubenswrapper[5050]: E1123 16:56:24.686306 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="475faf5c-a709-4b9a-8c36-b3dc9a1989e8" containerName="extract-utilities" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.686355 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="475faf5c-a709-4b9a-8c36-b3dc9a1989e8" containerName="extract-utilities" Nov 23 16:56:24 crc kubenswrapper[5050]: E1123 16:56:24.686374 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="475faf5c-a709-4b9a-8c36-b3dc9a1989e8" containerName="registry-server" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.686383 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="475faf5c-a709-4b9a-8c36-b3dc9a1989e8" containerName="registry-server" Nov 23 16:56:24 crc kubenswrapper[5050]: E1123 16:56:24.686406 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3653bc64-477f-4adc-935a-94f8264ee939" containerName="libvirt-openstack-openstack-cell1" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.686420 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="3653bc64-477f-4adc-935a-94f8264ee939" containerName="libvirt-openstack-openstack-cell1" Nov 23 16:56:24 crc kubenswrapper[5050]: E1123 16:56:24.686466 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="475faf5c-a709-4b9a-8c36-b3dc9a1989e8" containerName="extract-content" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.686476 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="475faf5c-a709-4b9a-8c36-b3dc9a1989e8" containerName="extract-content" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.686829 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="475faf5c-a709-4b9a-8c36-b3dc9a1989e8" containerName="registry-server" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.686872 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="3653bc64-477f-4adc-935a-94f8264ee939" containerName="libvirt-openstack-openstack-cell1" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.688246 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.691263 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.692343 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.693176 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.693391 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-cells-global-config" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.694321 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.694473 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-ckrpf" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.696714 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.718080 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-openstack-cell1-28qz2"] Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.733605 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-ssh-key\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.733961 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-inventory\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.734170 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-ceph\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.734315 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4clf\" (UniqueName: \"kubernetes.io/projected/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-kube-api-access-x4clf\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.734576 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cells-global-config-0\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.734840 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.735037 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.735165 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cells-global-config-1\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.735223 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.735396 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.735549 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.839099 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.839177 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.839215 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cells-global-config-1\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.839242 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.839287 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.839319 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.839379 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-ssh-key\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.839434 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-inventory\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.839499 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-ceph\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.839533 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4clf\" (UniqueName: \"kubernetes.io/projected/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-kube-api-access-x4clf\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.839578 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cells-global-config-0\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.840763 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cells-global-config-0\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.842107 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cells-global-config-1\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.847126 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-ceph\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.849035 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.849249 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-ssh-key\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.849629 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.849753 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.850851 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-inventory\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.855304 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.855833 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:24 crc kubenswrapper[5050]: I1123 16:56:24.860936 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4clf\" (UniqueName: \"kubernetes.io/projected/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-kube-api-access-x4clf\") pod \"nova-cell1-openstack-openstack-cell1-28qz2\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:25 crc kubenswrapper[5050]: I1123 16:56:25.012337 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:56:25 crc kubenswrapper[5050]: I1123 16:56:25.642679 5050 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 23 16:56:25 crc kubenswrapper[5050]: I1123 16:56:25.645965 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-openstack-cell1-28qz2"] Nov 23 16:56:26 crc kubenswrapper[5050]: I1123 16:56:26.576657 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" event={"ID":"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0","Type":"ContainerStarted","Data":"00a185857804964f81f79ab7f4323ab9e11520c4e6d3d819ef2c819d7659bd06"} Nov 23 16:56:26 crc kubenswrapper[5050]: I1123 16:56:26.577060 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" event={"ID":"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0","Type":"ContainerStarted","Data":"f4be6308efa0060d0660c918fe44bbe20e6b1a56d7ba35f8391ad6109d23c84a"} Nov 23 16:56:26 crc kubenswrapper[5050]: I1123 16:56:26.613546 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" podStartSLOduration=2.060519426 podStartE2EDuration="2.613524478s" podCreationTimestamp="2025-11-23 16:56:24 +0000 UTC" firstStartedPulling="2025-11-23 16:56:25.642322599 +0000 UTC m=+8080.809319084" lastFinishedPulling="2025-11-23 16:56:26.195327661 +0000 UTC m=+8081.362324136" observedRunningTime="2025-11-23 16:56:26.605238404 +0000 UTC m=+8081.772234929" watchObservedRunningTime="2025-11-23 16:56:26.613524478 +0000 UTC m=+8081.780520963" Nov 23 16:56:29 crc kubenswrapper[5050]: I1123 16:56:29.225144 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:56:29 crc kubenswrapper[5050]: I1123 16:56:29.225916 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:56:59 crc kubenswrapper[5050]: I1123 16:56:59.224214 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:56:59 crc kubenswrapper[5050]: I1123 16:56:59.225197 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:57:29 crc kubenswrapper[5050]: I1123 16:57:29.224349 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:57:29 crc kubenswrapper[5050]: I1123 16:57:29.224989 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:57:29 crc kubenswrapper[5050]: I1123 16:57:29.225043 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 16:57:29 crc kubenswrapper[5050]: I1123 16:57:29.226240 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"697b9ae232bf75544badaa8e6059a4b2bd0f542e31a482e7418cd61ae786a3fd"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 16:57:29 crc kubenswrapper[5050]: I1123 16:57:29.226302 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://697b9ae232bf75544badaa8e6059a4b2bd0f542e31a482e7418cd61ae786a3fd" gracePeriod=600 Nov 23 16:57:29 crc kubenswrapper[5050]: I1123 16:57:29.385976 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="697b9ae232bf75544badaa8e6059a4b2bd0f542e31a482e7418cd61ae786a3fd" exitCode=0 Nov 23 16:57:29 crc kubenswrapper[5050]: I1123 16:57:29.386052 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"697b9ae232bf75544badaa8e6059a4b2bd0f542e31a482e7418cd61ae786a3fd"} Nov 23 16:57:29 crc kubenswrapper[5050]: I1123 16:57:29.386111 5050 scope.go:117] "RemoveContainer" containerID="d40b7f46f9e2ee457869ccc402db524dd6b3b4db87a0fafb629086ba5d450e76" Nov 23 16:57:30 crc kubenswrapper[5050]: I1123 16:57:30.400682 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689"} Nov 23 16:58:15 crc kubenswrapper[5050]: I1123 16:58:15.534799 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-pwnzp"] Nov 23 16:58:15 crc kubenswrapper[5050]: I1123 16:58:15.551298 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pwnzp" Nov 23 16:58:15 crc kubenswrapper[5050]: I1123 16:58:15.614806 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pwnzp"] Nov 23 16:58:15 crc kubenswrapper[5050]: I1123 16:58:15.655176 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a2c5f35-665c-4709-a7fa-03ed8119abc1-utilities\") pod \"redhat-marketplace-pwnzp\" (UID: \"4a2c5f35-665c-4709-a7fa-03ed8119abc1\") " pod="openshift-marketplace/redhat-marketplace-pwnzp" Nov 23 16:58:15 crc kubenswrapper[5050]: I1123 16:58:15.655375 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bx8h4\" (UniqueName: \"kubernetes.io/projected/4a2c5f35-665c-4709-a7fa-03ed8119abc1-kube-api-access-bx8h4\") pod \"redhat-marketplace-pwnzp\" (UID: \"4a2c5f35-665c-4709-a7fa-03ed8119abc1\") " pod="openshift-marketplace/redhat-marketplace-pwnzp" Nov 23 16:58:15 crc kubenswrapper[5050]: I1123 16:58:15.655468 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a2c5f35-665c-4709-a7fa-03ed8119abc1-catalog-content\") pod \"redhat-marketplace-pwnzp\" (UID: \"4a2c5f35-665c-4709-a7fa-03ed8119abc1\") " pod="openshift-marketplace/redhat-marketplace-pwnzp" Nov 23 16:58:15 crc kubenswrapper[5050]: I1123 16:58:15.759087 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a2c5f35-665c-4709-a7fa-03ed8119abc1-utilities\") pod \"redhat-marketplace-pwnzp\" (UID: \"4a2c5f35-665c-4709-a7fa-03ed8119abc1\") " pod="openshift-marketplace/redhat-marketplace-pwnzp" Nov 23 16:58:15 crc kubenswrapper[5050]: I1123 16:58:15.759228 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bx8h4\" (UniqueName: \"kubernetes.io/projected/4a2c5f35-665c-4709-a7fa-03ed8119abc1-kube-api-access-bx8h4\") pod \"redhat-marketplace-pwnzp\" (UID: \"4a2c5f35-665c-4709-a7fa-03ed8119abc1\") " pod="openshift-marketplace/redhat-marketplace-pwnzp" Nov 23 16:58:15 crc kubenswrapper[5050]: I1123 16:58:15.759288 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a2c5f35-665c-4709-a7fa-03ed8119abc1-catalog-content\") pod \"redhat-marketplace-pwnzp\" (UID: \"4a2c5f35-665c-4709-a7fa-03ed8119abc1\") " pod="openshift-marketplace/redhat-marketplace-pwnzp" Nov 23 16:58:15 crc kubenswrapper[5050]: I1123 16:58:15.759905 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a2c5f35-665c-4709-a7fa-03ed8119abc1-catalog-content\") pod \"redhat-marketplace-pwnzp\" (UID: \"4a2c5f35-665c-4709-a7fa-03ed8119abc1\") " pod="openshift-marketplace/redhat-marketplace-pwnzp" Nov 23 16:58:15 crc kubenswrapper[5050]: I1123 16:58:15.762193 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a2c5f35-665c-4709-a7fa-03ed8119abc1-utilities\") pod \"redhat-marketplace-pwnzp\" (UID: \"4a2c5f35-665c-4709-a7fa-03ed8119abc1\") " pod="openshift-marketplace/redhat-marketplace-pwnzp" Nov 23 16:58:15 crc kubenswrapper[5050]: I1123 16:58:15.805091 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bx8h4\" (UniqueName: \"kubernetes.io/projected/4a2c5f35-665c-4709-a7fa-03ed8119abc1-kube-api-access-bx8h4\") pod \"redhat-marketplace-pwnzp\" (UID: \"4a2c5f35-665c-4709-a7fa-03ed8119abc1\") " pod="openshift-marketplace/redhat-marketplace-pwnzp" Nov 23 16:58:15 crc kubenswrapper[5050]: I1123 16:58:15.950073 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pwnzp" Nov 23 16:58:16 crc kubenswrapper[5050]: I1123 16:58:16.496900 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pwnzp"] Nov 23 16:58:17 crc kubenswrapper[5050]: I1123 16:58:17.037560 5050 generic.go:334] "Generic (PLEG): container finished" podID="4a2c5f35-665c-4709-a7fa-03ed8119abc1" containerID="05e900748dee4d8fbf261716b1553b53e9efc9bc8a1934a955187b71b8796f33" exitCode=0 Nov 23 16:58:17 crc kubenswrapper[5050]: I1123 16:58:17.038039 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pwnzp" event={"ID":"4a2c5f35-665c-4709-a7fa-03ed8119abc1","Type":"ContainerDied","Data":"05e900748dee4d8fbf261716b1553b53e9efc9bc8a1934a955187b71b8796f33"} Nov 23 16:58:17 crc kubenswrapper[5050]: I1123 16:58:17.038075 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pwnzp" event={"ID":"4a2c5f35-665c-4709-a7fa-03ed8119abc1","Type":"ContainerStarted","Data":"5708ca38d7ddebc77a6b39207840279a0a07ea9b317a8cb26511801c3f40aa53"} Nov 23 16:58:19 crc kubenswrapper[5050]: I1123 16:58:19.071883 5050 generic.go:334] "Generic (PLEG): container finished" podID="4a2c5f35-665c-4709-a7fa-03ed8119abc1" containerID="36c800f68248c75f5cecb85de3aedadecbc48f2b4b2b9b75652d62942558ee39" exitCode=0 Nov 23 16:58:19 crc kubenswrapper[5050]: I1123 16:58:19.072064 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pwnzp" event={"ID":"4a2c5f35-665c-4709-a7fa-03ed8119abc1","Type":"ContainerDied","Data":"36c800f68248c75f5cecb85de3aedadecbc48f2b4b2b9b75652d62942558ee39"} Nov 23 16:58:20 crc kubenswrapper[5050]: I1123 16:58:20.089497 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pwnzp" event={"ID":"4a2c5f35-665c-4709-a7fa-03ed8119abc1","Type":"ContainerStarted","Data":"712a5572aa151c42c74afd186aeb2e0a5bc5b8de687162db3f0139d2464b4174"} Nov 23 16:58:20 crc kubenswrapper[5050]: I1123 16:58:20.112437 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-pwnzp" podStartSLOduration=2.642215721 podStartE2EDuration="5.112415682s" podCreationTimestamp="2025-11-23 16:58:15 +0000 UTC" firstStartedPulling="2025-11-23 16:58:17.04039372 +0000 UTC m=+8192.207390225" lastFinishedPulling="2025-11-23 16:58:19.510593691 +0000 UTC m=+8194.677590186" observedRunningTime="2025-11-23 16:58:20.108264065 +0000 UTC m=+8195.275260550" watchObservedRunningTime="2025-11-23 16:58:20.112415682 +0000 UTC m=+8195.279412157" Nov 23 16:58:25 crc kubenswrapper[5050]: I1123 16:58:25.950976 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-pwnzp" Nov 23 16:58:25 crc kubenswrapper[5050]: I1123 16:58:25.953981 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-pwnzp" Nov 23 16:58:26 crc kubenswrapper[5050]: I1123 16:58:26.024102 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-pwnzp" Nov 23 16:58:26 crc kubenswrapper[5050]: I1123 16:58:26.230239 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-pwnzp" Nov 23 16:58:26 crc kubenswrapper[5050]: I1123 16:58:26.290034 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pwnzp"] Nov 23 16:58:28 crc kubenswrapper[5050]: I1123 16:58:28.199139 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-pwnzp" podUID="4a2c5f35-665c-4709-a7fa-03ed8119abc1" containerName="registry-server" containerID="cri-o://712a5572aa151c42c74afd186aeb2e0a5bc5b8de687162db3f0139d2464b4174" gracePeriod=2 Nov 23 16:58:28 crc kubenswrapper[5050]: I1123 16:58:28.789224 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pwnzp" Nov 23 16:58:28 crc kubenswrapper[5050]: I1123 16:58:28.879084 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a2c5f35-665c-4709-a7fa-03ed8119abc1-catalog-content\") pod \"4a2c5f35-665c-4709-a7fa-03ed8119abc1\" (UID: \"4a2c5f35-665c-4709-a7fa-03ed8119abc1\") " Nov 23 16:58:28 crc kubenswrapper[5050]: I1123 16:58:28.879541 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bx8h4\" (UniqueName: \"kubernetes.io/projected/4a2c5f35-665c-4709-a7fa-03ed8119abc1-kube-api-access-bx8h4\") pod \"4a2c5f35-665c-4709-a7fa-03ed8119abc1\" (UID: \"4a2c5f35-665c-4709-a7fa-03ed8119abc1\") " Nov 23 16:58:28 crc kubenswrapper[5050]: I1123 16:58:28.879634 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a2c5f35-665c-4709-a7fa-03ed8119abc1-utilities\") pod \"4a2c5f35-665c-4709-a7fa-03ed8119abc1\" (UID: \"4a2c5f35-665c-4709-a7fa-03ed8119abc1\") " Nov 23 16:58:28 crc kubenswrapper[5050]: I1123 16:58:28.880484 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a2c5f35-665c-4709-a7fa-03ed8119abc1-utilities" (OuterVolumeSpecName: "utilities") pod "4a2c5f35-665c-4709-a7fa-03ed8119abc1" (UID: "4a2c5f35-665c-4709-a7fa-03ed8119abc1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:58:28 crc kubenswrapper[5050]: I1123 16:58:28.895862 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a2c5f35-665c-4709-a7fa-03ed8119abc1-kube-api-access-bx8h4" (OuterVolumeSpecName: "kube-api-access-bx8h4") pod "4a2c5f35-665c-4709-a7fa-03ed8119abc1" (UID: "4a2c5f35-665c-4709-a7fa-03ed8119abc1"). InnerVolumeSpecName "kube-api-access-bx8h4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:58:28 crc kubenswrapper[5050]: I1123 16:58:28.909646 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a2c5f35-665c-4709-a7fa-03ed8119abc1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4a2c5f35-665c-4709-a7fa-03ed8119abc1" (UID: "4a2c5f35-665c-4709-a7fa-03ed8119abc1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:58:28 crc kubenswrapper[5050]: I1123 16:58:28.983794 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a2c5f35-665c-4709-a7fa-03ed8119abc1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 16:58:28 crc kubenswrapper[5050]: I1123 16:58:28.983843 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bx8h4\" (UniqueName: \"kubernetes.io/projected/4a2c5f35-665c-4709-a7fa-03ed8119abc1-kube-api-access-bx8h4\") on node \"crc\" DevicePath \"\"" Nov 23 16:58:28 crc kubenswrapper[5050]: I1123 16:58:28.983868 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a2c5f35-665c-4709-a7fa-03ed8119abc1-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 16:58:29 crc kubenswrapper[5050]: I1123 16:58:29.216798 5050 generic.go:334] "Generic (PLEG): container finished" podID="4a2c5f35-665c-4709-a7fa-03ed8119abc1" containerID="712a5572aa151c42c74afd186aeb2e0a5bc5b8de687162db3f0139d2464b4174" exitCode=0 Nov 23 16:58:29 crc kubenswrapper[5050]: I1123 16:58:29.216902 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pwnzp" Nov 23 16:58:29 crc kubenswrapper[5050]: I1123 16:58:29.216887 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pwnzp" event={"ID":"4a2c5f35-665c-4709-a7fa-03ed8119abc1","Type":"ContainerDied","Data":"712a5572aa151c42c74afd186aeb2e0a5bc5b8de687162db3f0139d2464b4174"} Nov 23 16:58:29 crc kubenswrapper[5050]: I1123 16:58:29.219131 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pwnzp" event={"ID":"4a2c5f35-665c-4709-a7fa-03ed8119abc1","Type":"ContainerDied","Data":"5708ca38d7ddebc77a6b39207840279a0a07ea9b317a8cb26511801c3f40aa53"} Nov 23 16:58:29 crc kubenswrapper[5050]: I1123 16:58:29.219207 5050 scope.go:117] "RemoveContainer" containerID="712a5572aa151c42c74afd186aeb2e0a5bc5b8de687162db3f0139d2464b4174" Nov 23 16:58:29 crc kubenswrapper[5050]: I1123 16:58:29.242247 5050 scope.go:117] "RemoveContainer" containerID="36c800f68248c75f5cecb85de3aedadecbc48f2b4b2b9b75652d62942558ee39" Nov 23 16:58:29 crc kubenswrapper[5050]: I1123 16:58:29.278619 5050 scope.go:117] "RemoveContainer" containerID="05e900748dee4d8fbf261716b1553b53e9efc9bc8a1934a955187b71b8796f33" Nov 23 16:58:29 crc kubenswrapper[5050]: I1123 16:58:29.291722 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pwnzp"] Nov 23 16:58:29 crc kubenswrapper[5050]: I1123 16:58:29.312603 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-pwnzp"] Nov 23 16:58:29 crc kubenswrapper[5050]: I1123 16:58:29.347180 5050 scope.go:117] "RemoveContainer" containerID="712a5572aa151c42c74afd186aeb2e0a5bc5b8de687162db3f0139d2464b4174" Nov 23 16:58:29 crc kubenswrapper[5050]: E1123 16:58:29.347891 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"712a5572aa151c42c74afd186aeb2e0a5bc5b8de687162db3f0139d2464b4174\": container with ID starting with 712a5572aa151c42c74afd186aeb2e0a5bc5b8de687162db3f0139d2464b4174 not found: ID does not exist" containerID="712a5572aa151c42c74afd186aeb2e0a5bc5b8de687162db3f0139d2464b4174" Nov 23 16:58:29 crc kubenswrapper[5050]: I1123 16:58:29.347960 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"712a5572aa151c42c74afd186aeb2e0a5bc5b8de687162db3f0139d2464b4174"} err="failed to get container status \"712a5572aa151c42c74afd186aeb2e0a5bc5b8de687162db3f0139d2464b4174\": rpc error: code = NotFound desc = could not find container \"712a5572aa151c42c74afd186aeb2e0a5bc5b8de687162db3f0139d2464b4174\": container with ID starting with 712a5572aa151c42c74afd186aeb2e0a5bc5b8de687162db3f0139d2464b4174 not found: ID does not exist" Nov 23 16:58:29 crc kubenswrapper[5050]: I1123 16:58:29.348001 5050 scope.go:117] "RemoveContainer" containerID="36c800f68248c75f5cecb85de3aedadecbc48f2b4b2b9b75652d62942558ee39" Nov 23 16:58:29 crc kubenswrapper[5050]: E1123 16:58:29.348376 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"36c800f68248c75f5cecb85de3aedadecbc48f2b4b2b9b75652d62942558ee39\": container with ID starting with 36c800f68248c75f5cecb85de3aedadecbc48f2b4b2b9b75652d62942558ee39 not found: ID does not exist" containerID="36c800f68248c75f5cecb85de3aedadecbc48f2b4b2b9b75652d62942558ee39" Nov 23 16:58:29 crc kubenswrapper[5050]: I1123 16:58:29.348433 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36c800f68248c75f5cecb85de3aedadecbc48f2b4b2b9b75652d62942558ee39"} err="failed to get container status \"36c800f68248c75f5cecb85de3aedadecbc48f2b4b2b9b75652d62942558ee39\": rpc error: code = NotFound desc = could not find container \"36c800f68248c75f5cecb85de3aedadecbc48f2b4b2b9b75652d62942558ee39\": container with ID starting with 36c800f68248c75f5cecb85de3aedadecbc48f2b4b2b9b75652d62942558ee39 not found: ID does not exist" Nov 23 16:58:29 crc kubenswrapper[5050]: I1123 16:58:29.348479 5050 scope.go:117] "RemoveContainer" containerID="05e900748dee4d8fbf261716b1553b53e9efc9bc8a1934a955187b71b8796f33" Nov 23 16:58:29 crc kubenswrapper[5050]: E1123 16:58:29.348739 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"05e900748dee4d8fbf261716b1553b53e9efc9bc8a1934a955187b71b8796f33\": container with ID starting with 05e900748dee4d8fbf261716b1553b53e9efc9bc8a1934a955187b71b8796f33 not found: ID does not exist" containerID="05e900748dee4d8fbf261716b1553b53e9efc9bc8a1934a955187b71b8796f33" Nov 23 16:58:29 crc kubenswrapper[5050]: I1123 16:58:29.348763 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05e900748dee4d8fbf261716b1553b53e9efc9bc8a1934a955187b71b8796f33"} err="failed to get container status \"05e900748dee4d8fbf261716b1553b53e9efc9bc8a1934a955187b71b8796f33\": rpc error: code = NotFound desc = could not find container \"05e900748dee4d8fbf261716b1553b53e9efc9bc8a1934a955187b71b8796f33\": container with ID starting with 05e900748dee4d8fbf261716b1553b53e9efc9bc8a1934a955187b71b8796f33 not found: ID does not exist" Nov 23 16:58:29 crc kubenswrapper[5050]: I1123 16:58:29.573123 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a2c5f35-665c-4709-a7fa-03ed8119abc1" path="/var/lib/kubelet/pods/4a2c5f35-665c-4709-a7fa-03ed8119abc1/volumes" Nov 23 16:58:43 crc kubenswrapper[5050]: I1123 16:58:43.621553 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-svj6h"] Nov 23 16:58:43 crc kubenswrapper[5050]: E1123 16:58:43.623087 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a2c5f35-665c-4709-a7fa-03ed8119abc1" containerName="extract-utilities" Nov 23 16:58:43 crc kubenswrapper[5050]: I1123 16:58:43.623112 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a2c5f35-665c-4709-a7fa-03ed8119abc1" containerName="extract-utilities" Nov 23 16:58:43 crc kubenswrapper[5050]: E1123 16:58:43.623168 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a2c5f35-665c-4709-a7fa-03ed8119abc1" containerName="registry-server" Nov 23 16:58:43 crc kubenswrapper[5050]: I1123 16:58:43.623186 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a2c5f35-665c-4709-a7fa-03ed8119abc1" containerName="registry-server" Nov 23 16:58:43 crc kubenswrapper[5050]: E1123 16:58:43.623249 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a2c5f35-665c-4709-a7fa-03ed8119abc1" containerName="extract-content" Nov 23 16:58:43 crc kubenswrapper[5050]: I1123 16:58:43.623263 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a2c5f35-665c-4709-a7fa-03ed8119abc1" containerName="extract-content" Nov 23 16:58:43 crc kubenswrapper[5050]: I1123 16:58:43.623716 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a2c5f35-665c-4709-a7fa-03ed8119abc1" containerName="registry-server" Nov 23 16:58:43 crc kubenswrapper[5050]: I1123 16:58:43.628413 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-svj6h" Nov 23 16:58:43 crc kubenswrapper[5050]: I1123 16:58:43.660176 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-svj6h"] Nov 23 16:58:43 crc kubenswrapper[5050]: I1123 16:58:43.801365 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa504388-e223-4301-8c56-f33ba4d23654-catalog-content\") pod \"redhat-operators-svj6h\" (UID: \"fa504388-e223-4301-8c56-f33ba4d23654\") " pod="openshift-marketplace/redhat-operators-svj6h" Nov 23 16:58:43 crc kubenswrapper[5050]: I1123 16:58:43.801440 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa504388-e223-4301-8c56-f33ba4d23654-utilities\") pod \"redhat-operators-svj6h\" (UID: \"fa504388-e223-4301-8c56-f33ba4d23654\") " pod="openshift-marketplace/redhat-operators-svj6h" Nov 23 16:58:43 crc kubenswrapper[5050]: I1123 16:58:43.802100 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ns876\" (UniqueName: \"kubernetes.io/projected/fa504388-e223-4301-8c56-f33ba4d23654-kube-api-access-ns876\") pod \"redhat-operators-svj6h\" (UID: \"fa504388-e223-4301-8c56-f33ba4d23654\") " pod="openshift-marketplace/redhat-operators-svj6h" Nov 23 16:58:43 crc kubenswrapper[5050]: I1123 16:58:43.905970 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ns876\" (UniqueName: \"kubernetes.io/projected/fa504388-e223-4301-8c56-f33ba4d23654-kube-api-access-ns876\") pod \"redhat-operators-svj6h\" (UID: \"fa504388-e223-4301-8c56-f33ba4d23654\") " pod="openshift-marketplace/redhat-operators-svj6h" Nov 23 16:58:43 crc kubenswrapper[5050]: I1123 16:58:43.906336 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa504388-e223-4301-8c56-f33ba4d23654-catalog-content\") pod \"redhat-operators-svj6h\" (UID: \"fa504388-e223-4301-8c56-f33ba4d23654\") " pod="openshift-marketplace/redhat-operators-svj6h" Nov 23 16:58:43 crc kubenswrapper[5050]: I1123 16:58:43.906424 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa504388-e223-4301-8c56-f33ba4d23654-utilities\") pod \"redhat-operators-svj6h\" (UID: \"fa504388-e223-4301-8c56-f33ba4d23654\") " pod="openshift-marketplace/redhat-operators-svj6h" Nov 23 16:58:43 crc kubenswrapper[5050]: I1123 16:58:43.907424 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa504388-e223-4301-8c56-f33ba4d23654-catalog-content\") pod \"redhat-operators-svj6h\" (UID: \"fa504388-e223-4301-8c56-f33ba4d23654\") " pod="openshift-marketplace/redhat-operators-svj6h" Nov 23 16:58:43 crc kubenswrapper[5050]: I1123 16:58:43.907456 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa504388-e223-4301-8c56-f33ba4d23654-utilities\") pod \"redhat-operators-svj6h\" (UID: \"fa504388-e223-4301-8c56-f33ba4d23654\") " pod="openshift-marketplace/redhat-operators-svj6h" Nov 23 16:58:43 crc kubenswrapper[5050]: I1123 16:58:43.934804 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ns876\" (UniqueName: \"kubernetes.io/projected/fa504388-e223-4301-8c56-f33ba4d23654-kube-api-access-ns876\") pod \"redhat-operators-svj6h\" (UID: \"fa504388-e223-4301-8c56-f33ba4d23654\") " pod="openshift-marketplace/redhat-operators-svj6h" Nov 23 16:58:43 crc kubenswrapper[5050]: I1123 16:58:43.962594 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-svj6h" Nov 23 16:58:44 crc kubenswrapper[5050]: I1123 16:58:44.546854 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-svj6h"] Nov 23 16:58:45 crc kubenswrapper[5050]: I1123 16:58:45.441202 5050 generic.go:334] "Generic (PLEG): container finished" podID="fa504388-e223-4301-8c56-f33ba4d23654" containerID="ddd5f6189a76582f083c17328ef22dbad20cd3bfc7a52f233c7cafafdadbab2b" exitCode=0 Nov 23 16:58:45 crc kubenswrapper[5050]: I1123 16:58:45.441395 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-svj6h" event={"ID":"fa504388-e223-4301-8c56-f33ba4d23654","Type":"ContainerDied","Data":"ddd5f6189a76582f083c17328ef22dbad20cd3bfc7a52f233c7cafafdadbab2b"} Nov 23 16:58:45 crc kubenswrapper[5050]: I1123 16:58:45.441664 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-svj6h" event={"ID":"fa504388-e223-4301-8c56-f33ba4d23654","Type":"ContainerStarted","Data":"4c3afa662d54d1ead2cbec9845dec1968ebd732f13fee2bd33ed0a4f4f1903eb"} Nov 23 16:58:47 crc kubenswrapper[5050]: I1123 16:58:47.472012 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-svj6h" event={"ID":"fa504388-e223-4301-8c56-f33ba4d23654","Type":"ContainerStarted","Data":"bd8e3de18dae78d18c14949bdd9eb9e84ffbb529bbeac8dc7284ab1147c7cfaa"} Nov 23 16:58:48 crc kubenswrapper[5050]: I1123 16:58:48.486088 5050 generic.go:334] "Generic (PLEG): container finished" podID="fa504388-e223-4301-8c56-f33ba4d23654" containerID="bd8e3de18dae78d18c14949bdd9eb9e84ffbb529bbeac8dc7284ab1147c7cfaa" exitCode=0 Nov 23 16:58:48 crc kubenswrapper[5050]: I1123 16:58:48.486592 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-svj6h" event={"ID":"fa504388-e223-4301-8c56-f33ba4d23654","Type":"ContainerDied","Data":"bd8e3de18dae78d18c14949bdd9eb9e84ffbb529bbeac8dc7284ab1147c7cfaa"} Nov 23 16:58:49 crc kubenswrapper[5050]: I1123 16:58:49.504348 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-svj6h" event={"ID":"fa504388-e223-4301-8c56-f33ba4d23654","Type":"ContainerStarted","Data":"9452117433fff6a40fc00558b29620efdd668191009dd64d79119e39e927f063"} Nov 23 16:58:49 crc kubenswrapper[5050]: I1123 16:58:49.541875 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-svj6h" podStartSLOduration=3.060207414 podStartE2EDuration="6.54184506s" podCreationTimestamp="2025-11-23 16:58:43 +0000 UTC" firstStartedPulling="2025-11-23 16:58:45.44376083 +0000 UTC m=+8220.610757315" lastFinishedPulling="2025-11-23 16:58:48.925398466 +0000 UTC m=+8224.092394961" observedRunningTime="2025-11-23 16:58:49.535195132 +0000 UTC m=+8224.702191647" watchObservedRunningTime="2025-11-23 16:58:49.54184506 +0000 UTC m=+8224.708841555" Nov 23 16:58:53 crc kubenswrapper[5050]: I1123 16:58:53.963541 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-svj6h" Nov 23 16:58:53 crc kubenswrapper[5050]: I1123 16:58:53.964353 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-svj6h" Nov 23 16:58:55 crc kubenswrapper[5050]: I1123 16:58:55.019649 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-svj6h" podUID="fa504388-e223-4301-8c56-f33ba4d23654" containerName="registry-server" probeResult="failure" output=< Nov 23 16:58:55 crc kubenswrapper[5050]: timeout: failed to connect service ":50051" within 1s Nov 23 16:58:55 crc kubenswrapper[5050]: > Nov 23 16:59:04 crc kubenswrapper[5050]: I1123 16:59:04.034475 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-svj6h" Nov 23 16:59:04 crc kubenswrapper[5050]: I1123 16:59:04.121661 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-svj6h" Nov 23 16:59:04 crc kubenswrapper[5050]: I1123 16:59:04.295847 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-svj6h"] Nov 23 16:59:05 crc kubenswrapper[5050]: I1123 16:59:05.716110 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-svj6h" podUID="fa504388-e223-4301-8c56-f33ba4d23654" containerName="registry-server" containerID="cri-o://9452117433fff6a40fc00558b29620efdd668191009dd64d79119e39e927f063" gracePeriod=2 Nov 23 16:59:06 crc kubenswrapper[5050]: I1123 16:59:06.303283 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-svj6h" Nov 23 16:59:06 crc kubenswrapper[5050]: I1123 16:59:06.473189 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa504388-e223-4301-8c56-f33ba4d23654-utilities\") pod \"fa504388-e223-4301-8c56-f33ba4d23654\" (UID: \"fa504388-e223-4301-8c56-f33ba4d23654\") " Nov 23 16:59:06 crc kubenswrapper[5050]: I1123 16:59:06.473695 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ns876\" (UniqueName: \"kubernetes.io/projected/fa504388-e223-4301-8c56-f33ba4d23654-kube-api-access-ns876\") pod \"fa504388-e223-4301-8c56-f33ba4d23654\" (UID: \"fa504388-e223-4301-8c56-f33ba4d23654\") " Nov 23 16:59:06 crc kubenswrapper[5050]: I1123 16:59:06.473798 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa504388-e223-4301-8c56-f33ba4d23654-catalog-content\") pod \"fa504388-e223-4301-8c56-f33ba4d23654\" (UID: \"fa504388-e223-4301-8c56-f33ba4d23654\") " Nov 23 16:59:06 crc kubenswrapper[5050]: I1123 16:59:06.475332 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa504388-e223-4301-8c56-f33ba4d23654-utilities" (OuterVolumeSpecName: "utilities") pod "fa504388-e223-4301-8c56-f33ba4d23654" (UID: "fa504388-e223-4301-8c56-f33ba4d23654"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:59:06 crc kubenswrapper[5050]: I1123 16:59:06.486551 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa504388-e223-4301-8c56-f33ba4d23654-kube-api-access-ns876" (OuterVolumeSpecName: "kube-api-access-ns876") pod "fa504388-e223-4301-8c56-f33ba4d23654" (UID: "fa504388-e223-4301-8c56-f33ba4d23654"). InnerVolumeSpecName "kube-api-access-ns876". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:59:06 crc kubenswrapper[5050]: I1123 16:59:06.594930 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa504388-e223-4301-8c56-f33ba4d23654-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 16:59:06 crc kubenswrapper[5050]: I1123 16:59:06.594985 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ns876\" (UniqueName: \"kubernetes.io/projected/fa504388-e223-4301-8c56-f33ba4d23654-kube-api-access-ns876\") on node \"crc\" DevicePath \"\"" Nov 23 16:59:06 crc kubenswrapper[5050]: I1123 16:59:06.598405 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa504388-e223-4301-8c56-f33ba4d23654-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fa504388-e223-4301-8c56-f33ba4d23654" (UID: "fa504388-e223-4301-8c56-f33ba4d23654"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 16:59:06 crc kubenswrapper[5050]: I1123 16:59:06.698575 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa504388-e223-4301-8c56-f33ba4d23654-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 16:59:06 crc kubenswrapper[5050]: I1123 16:59:06.729511 5050 generic.go:334] "Generic (PLEG): container finished" podID="fa504388-e223-4301-8c56-f33ba4d23654" containerID="9452117433fff6a40fc00558b29620efdd668191009dd64d79119e39e927f063" exitCode=0 Nov 23 16:59:06 crc kubenswrapper[5050]: I1123 16:59:06.729568 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-svj6h" event={"ID":"fa504388-e223-4301-8c56-f33ba4d23654","Type":"ContainerDied","Data":"9452117433fff6a40fc00558b29620efdd668191009dd64d79119e39e927f063"} Nov 23 16:59:06 crc kubenswrapper[5050]: I1123 16:59:06.729704 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-svj6h" Nov 23 16:59:06 crc kubenswrapper[5050]: I1123 16:59:06.729842 5050 scope.go:117] "RemoveContainer" containerID="9452117433fff6a40fc00558b29620efdd668191009dd64d79119e39e927f063" Nov 23 16:59:06 crc kubenswrapper[5050]: I1123 16:59:06.731378 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-svj6h" event={"ID":"fa504388-e223-4301-8c56-f33ba4d23654","Type":"ContainerDied","Data":"4c3afa662d54d1ead2cbec9845dec1968ebd732f13fee2bd33ed0a4f4f1903eb"} Nov 23 16:59:06 crc kubenswrapper[5050]: I1123 16:59:06.776580 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-svj6h"] Nov 23 16:59:06 crc kubenswrapper[5050]: I1123 16:59:06.786345 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-svj6h"] Nov 23 16:59:06 crc kubenswrapper[5050]: I1123 16:59:06.786974 5050 scope.go:117] "RemoveContainer" containerID="bd8e3de18dae78d18c14949bdd9eb9e84ffbb529bbeac8dc7284ab1147c7cfaa" Nov 23 16:59:06 crc kubenswrapper[5050]: I1123 16:59:06.825414 5050 scope.go:117] "RemoveContainer" containerID="ddd5f6189a76582f083c17328ef22dbad20cd3bfc7a52f233c7cafafdadbab2b" Nov 23 16:59:06 crc kubenswrapper[5050]: I1123 16:59:06.898561 5050 scope.go:117] "RemoveContainer" containerID="9452117433fff6a40fc00558b29620efdd668191009dd64d79119e39e927f063" Nov 23 16:59:06 crc kubenswrapper[5050]: E1123 16:59:06.899215 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9452117433fff6a40fc00558b29620efdd668191009dd64d79119e39e927f063\": container with ID starting with 9452117433fff6a40fc00558b29620efdd668191009dd64d79119e39e927f063 not found: ID does not exist" containerID="9452117433fff6a40fc00558b29620efdd668191009dd64d79119e39e927f063" Nov 23 16:59:06 crc kubenswrapper[5050]: I1123 16:59:06.899279 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9452117433fff6a40fc00558b29620efdd668191009dd64d79119e39e927f063"} err="failed to get container status \"9452117433fff6a40fc00558b29620efdd668191009dd64d79119e39e927f063\": rpc error: code = NotFound desc = could not find container \"9452117433fff6a40fc00558b29620efdd668191009dd64d79119e39e927f063\": container with ID starting with 9452117433fff6a40fc00558b29620efdd668191009dd64d79119e39e927f063 not found: ID does not exist" Nov 23 16:59:06 crc kubenswrapper[5050]: I1123 16:59:06.899317 5050 scope.go:117] "RemoveContainer" containerID="bd8e3de18dae78d18c14949bdd9eb9e84ffbb529bbeac8dc7284ab1147c7cfaa" Nov 23 16:59:06 crc kubenswrapper[5050]: E1123 16:59:06.899878 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd8e3de18dae78d18c14949bdd9eb9e84ffbb529bbeac8dc7284ab1147c7cfaa\": container with ID starting with bd8e3de18dae78d18c14949bdd9eb9e84ffbb529bbeac8dc7284ab1147c7cfaa not found: ID does not exist" containerID="bd8e3de18dae78d18c14949bdd9eb9e84ffbb529bbeac8dc7284ab1147c7cfaa" Nov 23 16:59:06 crc kubenswrapper[5050]: I1123 16:59:06.899947 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd8e3de18dae78d18c14949bdd9eb9e84ffbb529bbeac8dc7284ab1147c7cfaa"} err="failed to get container status \"bd8e3de18dae78d18c14949bdd9eb9e84ffbb529bbeac8dc7284ab1147c7cfaa\": rpc error: code = NotFound desc = could not find container \"bd8e3de18dae78d18c14949bdd9eb9e84ffbb529bbeac8dc7284ab1147c7cfaa\": container with ID starting with bd8e3de18dae78d18c14949bdd9eb9e84ffbb529bbeac8dc7284ab1147c7cfaa not found: ID does not exist" Nov 23 16:59:06 crc kubenswrapper[5050]: I1123 16:59:06.899995 5050 scope.go:117] "RemoveContainer" containerID="ddd5f6189a76582f083c17328ef22dbad20cd3bfc7a52f233c7cafafdadbab2b" Nov 23 16:59:06 crc kubenswrapper[5050]: E1123 16:59:06.900543 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ddd5f6189a76582f083c17328ef22dbad20cd3bfc7a52f233c7cafafdadbab2b\": container with ID starting with ddd5f6189a76582f083c17328ef22dbad20cd3bfc7a52f233c7cafafdadbab2b not found: ID does not exist" containerID="ddd5f6189a76582f083c17328ef22dbad20cd3bfc7a52f233c7cafafdadbab2b" Nov 23 16:59:06 crc kubenswrapper[5050]: I1123 16:59:06.900610 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ddd5f6189a76582f083c17328ef22dbad20cd3bfc7a52f233c7cafafdadbab2b"} err="failed to get container status \"ddd5f6189a76582f083c17328ef22dbad20cd3bfc7a52f233c7cafafdadbab2b\": rpc error: code = NotFound desc = could not find container \"ddd5f6189a76582f083c17328ef22dbad20cd3bfc7a52f233c7cafafdadbab2b\": container with ID starting with ddd5f6189a76582f083c17328ef22dbad20cd3bfc7a52f233c7cafafdadbab2b not found: ID does not exist" Nov 23 16:59:07 crc kubenswrapper[5050]: I1123 16:59:07.574758 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa504388-e223-4301-8c56-f33ba4d23654" path="/var/lib/kubelet/pods/fa504388-e223-4301-8c56-f33ba4d23654/volumes" Nov 23 16:59:29 crc kubenswrapper[5050]: I1123 16:59:29.231092 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:59:29 crc kubenswrapper[5050]: I1123 16:59:29.232336 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 16:59:47 crc kubenswrapper[5050]: I1123 16:59:47.349214 5050 generic.go:334] "Generic (PLEG): container finished" podID="c83fc0ae-3d5e-4abe-9654-df9afd83c9a0" containerID="00a185857804964f81f79ab7f4323ab9e11520c4e6d3d819ef2c819d7659bd06" exitCode=0 Nov 23 16:59:47 crc kubenswrapper[5050]: I1123 16:59:47.349284 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" event={"ID":"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0","Type":"ContainerDied","Data":"00a185857804964f81f79ab7f4323ab9e11520c4e6d3d819ef2c819d7659bd06"} Nov 23 16:59:48 crc kubenswrapper[5050]: I1123 16:59:48.948545 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.066528 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-migration-ssh-key-1\") pod \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.067120 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-ceph\") pod \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.067277 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cells-global-config-0\") pod \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.067409 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-inventory\") pod \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.067540 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-migration-ssh-key-0\") pod \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.067662 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cells-global-config-1\") pod \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.067824 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-ssh-key\") pod \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.067895 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cell1-combined-ca-bundle\") pod \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.067962 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cell1-compute-config-1\") pod \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.068096 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4clf\" (UniqueName: \"kubernetes.io/projected/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-kube-api-access-x4clf\") pod \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.068165 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cell1-compute-config-0\") pod \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\" (UID: \"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0\") " Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.075624 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-ceph" (OuterVolumeSpecName: "ceph") pod "c83fc0ae-3d5e-4abe-9654-df9afd83c9a0" (UID: "c83fc0ae-3d5e-4abe-9654-df9afd83c9a0"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.077232 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cell1-combined-ca-bundle" (OuterVolumeSpecName: "nova-cell1-combined-ca-bundle") pod "c83fc0ae-3d5e-4abe-9654-df9afd83c9a0" (UID: "c83fc0ae-3d5e-4abe-9654-df9afd83c9a0"). InnerVolumeSpecName "nova-cell1-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.086722 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-kube-api-access-x4clf" (OuterVolumeSpecName: "kube-api-access-x4clf") pod "c83fc0ae-3d5e-4abe-9654-df9afd83c9a0" (UID: "c83fc0ae-3d5e-4abe-9654-df9afd83c9a0"). InnerVolumeSpecName "kube-api-access-x4clf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.101388 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-inventory" (OuterVolumeSpecName: "inventory") pod "c83fc0ae-3d5e-4abe-9654-df9afd83c9a0" (UID: "c83fc0ae-3d5e-4abe-9654-df9afd83c9a0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.111838 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "c83fc0ae-3d5e-4abe-9654-df9afd83c9a0" (UID: "c83fc0ae-3d5e-4abe-9654-df9afd83c9a0"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.114141 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "c83fc0ae-3d5e-4abe-9654-df9afd83c9a0" (UID: "c83fc0ae-3d5e-4abe-9654-df9afd83c9a0"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.118603 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c83fc0ae-3d5e-4abe-9654-df9afd83c9a0" (UID: "c83fc0ae-3d5e-4abe-9654-df9afd83c9a0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.123635 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cells-global-config-1" (OuterVolumeSpecName: "nova-cells-global-config-1") pod "c83fc0ae-3d5e-4abe-9654-df9afd83c9a0" (UID: "c83fc0ae-3d5e-4abe-9654-df9afd83c9a0"). InnerVolumeSpecName "nova-cells-global-config-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.144208 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "c83fc0ae-3d5e-4abe-9654-df9afd83c9a0" (UID: "c83fc0ae-3d5e-4abe-9654-df9afd83c9a0"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.144505 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "c83fc0ae-3d5e-4abe-9654-df9afd83c9a0" (UID: "c83fc0ae-3d5e-4abe-9654-df9afd83c9a0"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.146414 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cells-global-config-0" (OuterVolumeSpecName: "nova-cells-global-config-0") pod "c83fc0ae-3d5e-4abe-9654-df9afd83c9a0" (UID: "c83fc0ae-3d5e-4abe-9654-df9afd83c9a0"). InnerVolumeSpecName "nova-cells-global-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.171774 5050 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.171813 5050 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-ceph\") on node \"crc\" DevicePath \"\"" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.171826 5050 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cells-global-config-0\") on node \"crc\" DevicePath \"\"" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.171842 5050 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-inventory\") on node \"crc\" DevicePath \"\"" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.171854 5050 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.171865 5050 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cells-global-config-1\") on node \"crc\" DevicePath \"\"" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.171877 5050 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.171889 5050 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cell1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.171901 5050 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.171915 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4clf\" (UniqueName: \"kubernetes.io/projected/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-kube-api-access-x4clf\") on node \"crc\" DevicePath \"\"" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.171926 5050 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/c83fc0ae-3d5e-4abe-9654-df9afd83c9a0-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.380562 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" event={"ID":"c83fc0ae-3d5e-4abe-9654-df9afd83c9a0","Type":"ContainerDied","Data":"f4be6308efa0060d0660c918fe44bbe20e6b1a56d7ba35f8391ad6109d23c84a"} Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.380624 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f4be6308efa0060d0660c918fe44bbe20e6b1a56d7ba35f8391ad6109d23c84a" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.380666 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-28qz2" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.531034 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-openstack-openstack-cell1-7rf78"] Nov 23 16:59:49 crc kubenswrapper[5050]: E1123 16:59:49.532333 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa504388-e223-4301-8c56-f33ba4d23654" containerName="registry-server" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.532385 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa504388-e223-4301-8c56-f33ba4d23654" containerName="registry-server" Nov 23 16:59:49 crc kubenswrapper[5050]: E1123 16:59:49.532436 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c83fc0ae-3d5e-4abe-9654-df9afd83c9a0" containerName="nova-cell1-openstack-openstack-cell1" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.532488 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="c83fc0ae-3d5e-4abe-9654-df9afd83c9a0" containerName="nova-cell1-openstack-openstack-cell1" Nov 23 16:59:49 crc kubenswrapper[5050]: E1123 16:59:49.532517 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa504388-e223-4301-8c56-f33ba4d23654" containerName="extract-content" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.532534 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa504388-e223-4301-8c56-f33ba4d23654" containerName="extract-content" Nov 23 16:59:49 crc kubenswrapper[5050]: E1123 16:59:49.532584 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa504388-e223-4301-8c56-f33ba4d23654" containerName="extract-utilities" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.532603 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa504388-e223-4301-8c56-f33ba4d23654" containerName="extract-utilities" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.533171 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="c83fc0ae-3d5e-4abe-9654-df9afd83c9a0" containerName="nova-cell1-openstack-openstack-cell1" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.533223 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa504388-e223-4301-8c56-f33ba4d23654" containerName="registry-server" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.535371 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.540166 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.541390 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-ckrpf" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.541949 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.542420 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.543527 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-openstack-openstack-cell1-7rf78"] Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.543745 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.687031 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-telemetry-combined-ca-bundle\") pod \"telemetry-openstack-openstack-cell1-7rf78\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.687148 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ssh-key\") pod \"telemetry-openstack-openstack-cell1-7rf78\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.687288 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r5nj2\" (UniqueName: \"kubernetes.io/projected/7c29377d-9bd8-4eb5-8777-3ae34021588e-kube-api-access-r5nj2\") pod \"telemetry-openstack-openstack-cell1-7rf78\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.687405 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ceilometer-compute-config-data-1\") pod \"telemetry-openstack-openstack-cell1-7rf78\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.687558 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ceilometer-compute-config-data-2\") pod \"telemetry-openstack-openstack-cell1-7rf78\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.687619 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ceph\") pod \"telemetry-openstack-openstack-cell1-7rf78\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.687649 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-inventory\") pod \"telemetry-openstack-openstack-cell1-7rf78\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.687702 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ceilometer-compute-config-data-0\") pod \"telemetry-openstack-openstack-cell1-7rf78\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.790473 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ceilometer-compute-config-data-1\") pod \"telemetry-openstack-openstack-cell1-7rf78\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.790607 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ceilometer-compute-config-data-2\") pod \"telemetry-openstack-openstack-cell1-7rf78\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.790682 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ceph\") pod \"telemetry-openstack-openstack-cell1-7rf78\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.790743 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-inventory\") pod \"telemetry-openstack-openstack-cell1-7rf78\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.790807 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ceilometer-compute-config-data-0\") pod \"telemetry-openstack-openstack-cell1-7rf78\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.790877 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-telemetry-combined-ca-bundle\") pod \"telemetry-openstack-openstack-cell1-7rf78\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.790988 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ssh-key\") pod \"telemetry-openstack-openstack-cell1-7rf78\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.791148 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r5nj2\" (UniqueName: \"kubernetes.io/projected/7c29377d-9bd8-4eb5-8777-3ae34021588e-kube-api-access-r5nj2\") pod \"telemetry-openstack-openstack-cell1-7rf78\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.799791 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ceilometer-compute-config-data-0\") pod \"telemetry-openstack-openstack-cell1-7rf78\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.800181 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ceph\") pod \"telemetry-openstack-openstack-cell1-7rf78\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.801267 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ceilometer-compute-config-data-1\") pod \"telemetry-openstack-openstack-cell1-7rf78\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.801328 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-telemetry-combined-ca-bundle\") pod \"telemetry-openstack-openstack-cell1-7rf78\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.805235 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-inventory\") pod \"telemetry-openstack-openstack-cell1-7rf78\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.805996 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ssh-key\") pod \"telemetry-openstack-openstack-cell1-7rf78\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.806341 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ceilometer-compute-config-data-2\") pod \"telemetry-openstack-openstack-cell1-7rf78\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.830220 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r5nj2\" (UniqueName: \"kubernetes.io/projected/7c29377d-9bd8-4eb5-8777-3ae34021588e-kube-api-access-r5nj2\") pod \"telemetry-openstack-openstack-cell1-7rf78\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 16:59:49 crc kubenswrapper[5050]: I1123 16:59:49.899773 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 16:59:50 crc kubenswrapper[5050]: I1123 16:59:50.567440 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-openstack-openstack-cell1-7rf78"] Nov 23 16:59:51 crc kubenswrapper[5050]: I1123 16:59:51.424103 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-7rf78" event={"ID":"7c29377d-9bd8-4eb5-8777-3ae34021588e","Type":"ContainerStarted","Data":"872d96ea9a86d181e807b00a45fb8cb7c75ff8aaf69bd07284edfec27b62f03d"} Nov 23 16:59:52 crc kubenswrapper[5050]: I1123 16:59:52.439252 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-7rf78" event={"ID":"7c29377d-9bd8-4eb5-8777-3ae34021588e","Type":"ContainerStarted","Data":"09a4db6e69319f303e1c02c7b2a0ebe94aba2b70f24219bcab02f5b7a32f5939"} Nov 23 16:59:52 crc kubenswrapper[5050]: I1123 16:59:52.486954 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-openstack-openstack-cell1-7rf78" podStartSLOduration=2.9329832319999998 podStartE2EDuration="3.486925441s" podCreationTimestamp="2025-11-23 16:59:49 +0000 UTC" firstStartedPulling="2025-11-23 16:59:50.563754104 +0000 UTC m=+8285.730750589" lastFinishedPulling="2025-11-23 16:59:51.117696303 +0000 UTC m=+8286.284692798" observedRunningTime="2025-11-23 16:59:52.469425257 +0000 UTC m=+8287.636421752" watchObservedRunningTime="2025-11-23 16:59:52.486925441 +0000 UTC m=+8287.653921936" Nov 23 16:59:59 crc kubenswrapper[5050]: I1123 16:59:59.224684 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 16:59:59 crc kubenswrapper[5050]: I1123 16:59:59.225639 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 17:00:00 crc kubenswrapper[5050]: I1123 17:00:00.146143 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398620-s5jqt"] Nov 23 17:00:00 crc kubenswrapper[5050]: I1123 17:00:00.148037 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398620-s5jqt" Nov 23 17:00:00 crc kubenswrapper[5050]: I1123 17:00:00.151913 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 23 17:00:00 crc kubenswrapper[5050]: I1123 17:00:00.158039 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 23 17:00:00 crc kubenswrapper[5050]: I1123 17:00:00.173720 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398620-s5jqt"] Nov 23 17:00:00 crc kubenswrapper[5050]: I1123 17:00:00.311044 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bzncp\" (UniqueName: \"kubernetes.io/projected/0e417ee4-1474-40d0-9b92-9f51896b6761-kube-api-access-bzncp\") pod \"collect-profiles-29398620-s5jqt\" (UID: \"0e417ee4-1474-40d0-9b92-9f51896b6761\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398620-s5jqt" Nov 23 17:00:00 crc kubenswrapper[5050]: I1123 17:00:00.311107 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0e417ee4-1474-40d0-9b92-9f51896b6761-secret-volume\") pod \"collect-profiles-29398620-s5jqt\" (UID: \"0e417ee4-1474-40d0-9b92-9f51896b6761\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398620-s5jqt" Nov 23 17:00:00 crc kubenswrapper[5050]: I1123 17:00:00.311176 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0e417ee4-1474-40d0-9b92-9f51896b6761-config-volume\") pod \"collect-profiles-29398620-s5jqt\" (UID: \"0e417ee4-1474-40d0-9b92-9f51896b6761\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398620-s5jqt" Nov 23 17:00:00 crc kubenswrapper[5050]: I1123 17:00:00.413415 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bzncp\" (UniqueName: \"kubernetes.io/projected/0e417ee4-1474-40d0-9b92-9f51896b6761-kube-api-access-bzncp\") pod \"collect-profiles-29398620-s5jqt\" (UID: \"0e417ee4-1474-40d0-9b92-9f51896b6761\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398620-s5jqt" Nov 23 17:00:00 crc kubenswrapper[5050]: I1123 17:00:00.413916 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0e417ee4-1474-40d0-9b92-9f51896b6761-secret-volume\") pod \"collect-profiles-29398620-s5jqt\" (UID: \"0e417ee4-1474-40d0-9b92-9f51896b6761\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398620-s5jqt" Nov 23 17:00:00 crc kubenswrapper[5050]: I1123 17:00:00.414109 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0e417ee4-1474-40d0-9b92-9f51896b6761-config-volume\") pod \"collect-profiles-29398620-s5jqt\" (UID: \"0e417ee4-1474-40d0-9b92-9f51896b6761\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398620-s5jqt" Nov 23 17:00:00 crc kubenswrapper[5050]: I1123 17:00:00.415304 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0e417ee4-1474-40d0-9b92-9f51896b6761-config-volume\") pod \"collect-profiles-29398620-s5jqt\" (UID: \"0e417ee4-1474-40d0-9b92-9f51896b6761\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398620-s5jqt" Nov 23 17:00:00 crc kubenswrapper[5050]: I1123 17:00:00.425612 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0e417ee4-1474-40d0-9b92-9f51896b6761-secret-volume\") pod \"collect-profiles-29398620-s5jqt\" (UID: \"0e417ee4-1474-40d0-9b92-9f51896b6761\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398620-s5jqt" Nov 23 17:00:00 crc kubenswrapper[5050]: I1123 17:00:00.434580 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bzncp\" (UniqueName: \"kubernetes.io/projected/0e417ee4-1474-40d0-9b92-9f51896b6761-kube-api-access-bzncp\") pod \"collect-profiles-29398620-s5jqt\" (UID: \"0e417ee4-1474-40d0-9b92-9f51896b6761\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398620-s5jqt" Nov 23 17:00:00 crc kubenswrapper[5050]: I1123 17:00:00.476206 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398620-s5jqt" Nov 23 17:00:01 crc kubenswrapper[5050]: W1123 17:00:01.014227 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0e417ee4_1474_40d0_9b92_9f51896b6761.slice/crio-297360ca7036b0552b747d079496ec5f32cc20047618c1623feb8fdce6666de7 WatchSource:0}: Error finding container 297360ca7036b0552b747d079496ec5f32cc20047618c1623feb8fdce6666de7: Status 404 returned error can't find the container with id 297360ca7036b0552b747d079496ec5f32cc20047618c1623feb8fdce6666de7 Nov 23 17:00:01 crc kubenswrapper[5050]: I1123 17:00:01.022984 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398620-s5jqt"] Nov 23 17:00:01 crc kubenswrapper[5050]: I1123 17:00:01.559326 5050 generic.go:334] "Generic (PLEG): container finished" podID="0e417ee4-1474-40d0-9b92-9f51896b6761" containerID="5f263a6f9ffbdbf9ff55d311c2cdb48072446e5e529d6e1d083d3dc09560015d" exitCode=0 Nov 23 17:00:01 crc kubenswrapper[5050]: I1123 17:00:01.563612 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398620-s5jqt" event={"ID":"0e417ee4-1474-40d0-9b92-9f51896b6761","Type":"ContainerDied","Data":"5f263a6f9ffbdbf9ff55d311c2cdb48072446e5e529d6e1d083d3dc09560015d"} Nov 23 17:00:01 crc kubenswrapper[5050]: I1123 17:00:01.563701 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398620-s5jqt" event={"ID":"0e417ee4-1474-40d0-9b92-9f51896b6761","Type":"ContainerStarted","Data":"297360ca7036b0552b747d079496ec5f32cc20047618c1623feb8fdce6666de7"} Nov 23 17:00:03 crc kubenswrapper[5050]: I1123 17:00:03.008055 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398620-s5jqt" Nov 23 17:00:03 crc kubenswrapper[5050]: I1123 17:00:03.193232 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0e417ee4-1474-40d0-9b92-9f51896b6761-config-volume\") pod \"0e417ee4-1474-40d0-9b92-9f51896b6761\" (UID: \"0e417ee4-1474-40d0-9b92-9f51896b6761\") " Nov 23 17:00:03 crc kubenswrapper[5050]: I1123 17:00:03.193336 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0e417ee4-1474-40d0-9b92-9f51896b6761-secret-volume\") pod \"0e417ee4-1474-40d0-9b92-9f51896b6761\" (UID: \"0e417ee4-1474-40d0-9b92-9f51896b6761\") " Nov 23 17:00:03 crc kubenswrapper[5050]: I1123 17:00:03.193655 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bzncp\" (UniqueName: \"kubernetes.io/projected/0e417ee4-1474-40d0-9b92-9f51896b6761-kube-api-access-bzncp\") pod \"0e417ee4-1474-40d0-9b92-9f51896b6761\" (UID: \"0e417ee4-1474-40d0-9b92-9f51896b6761\") " Nov 23 17:00:03 crc kubenswrapper[5050]: I1123 17:00:03.194656 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e417ee4-1474-40d0-9b92-9f51896b6761-config-volume" (OuterVolumeSpecName: "config-volume") pod "0e417ee4-1474-40d0-9b92-9f51896b6761" (UID: "0e417ee4-1474-40d0-9b92-9f51896b6761"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 17:00:03 crc kubenswrapper[5050]: I1123 17:00:03.201219 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e417ee4-1474-40d0-9b92-9f51896b6761-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "0e417ee4-1474-40d0-9b92-9f51896b6761" (UID: "0e417ee4-1474-40d0-9b92-9f51896b6761"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:00:03 crc kubenswrapper[5050]: I1123 17:00:03.201525 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e417ee4-1474-40d0-9b92-9f51896b6761-kube-api-access-bzncp" (OuterVolumeSpecName: "kube-api-access-bzncp") pod "0e417ee4-1474-40d0-9b92-9f51896b6761" (UID: "0e417ee4-1474-40d0-9b92-9f51896b6761"). InnerVolumeSpecName "kube-api-access-bzncp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:00:03 crc kubenswrapper[5050]: I1123 17:00:03.296393 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bzncp\" (UniqueName: \"kubernetes.io/projected/0e417ee4-1474-40d0-9b92-9f51896b6761-kube-api-access-bzncp\") on node \"crc\" DevicePath \"\"" Nov 23 17:00:03 crc kubenswrapper[5050]: I1123 17:00:03.296476 5050 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0e417ee4-1474-40d0-9b92-9f51896b6761-config-volume\") on node \"crc\" DevicePath \"\"" Nov 23 17:00:03 crc kubenswrapper[5050]: I1123 17:00:03.296491 5050 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0e417ee4-1474-40d0-9b92-9f51896b6761-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 23 17:00:03 crc kubenswrapper[5050]: I1123 17:00:03.588863 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398620-s5jqt" event={"ID":"0e417ee4-1474-40d0-9b92-9f51896b6761","Type":"ContainerDied","Data":"297360ca7036b0552b747d079496ec5f32cc20047618c1623feb8fdce6666de7"} Nov 23 17:00:03 crc kubenswrapper[5050]: I1123 17:00:03.588945 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="297360ca7036b0552b747d079496ec5f32cc20047618c1623feb8fdce6666de7" Nov 23 17:00:03 crc kubenswrapper[5050]: I1123 17:00:03.588949 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398620-s5jqt" Nov 23 17:00:04 crc kubenswrapper[5050]: I1123 17:00:04.113406 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398575-jcbhs"] Nov 23 17:00:04 crc kubenswrapper[5050]: I1123 17:00:04.124612 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398575-jcbhs"] Nov 23 17:00:05 crc kubenswrapper[5050]: I1123 17:00:05.578957 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f91221e-c49b-4426-8db5-94a008eb456d" path="/var/lib/kubelet/pods/6f91221e-c49b-4426-8db5-94a008eb456d/volumes" Nov 23 17:00:20 crc kubenswrapper[5050]: I1123 17:00:20.962209 5050 scope.go:117] "RemoveContainer" containerID="82debed6bd07622fc11a45346590cf2ab8a1142f6542b5ef408f9b70aa6d77bc" Nov 23 17:00:29 crc kubenswrapper[5050]: I1123 17:00:29.224166 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 17:00:29 crc kubenswrapper[5050]: I1123 17:00:29.225376 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 17:00:29 crc kubenswrapper[5050]: I1123 17:00:29.225503 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 17:00:29 crc kubenswrapper[5050]: I1123 17:00:29.227202 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 17:00:29 crc kubenswrapper[5050]: I1123 17:00:29.227350 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" gracePeriod=600 Nov 23 17:00:29 crc kubenswrapper[5050]: E1123 17:00:29.355084 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:00:29 crc kubenswrapper[5050]: E1123 17:00:29.434360 5050 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1d998909_9470_47ef_87e8_b34f0473682f.slice/crio-conmon-d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1d998909_9470_47ef_87e8_b34f0473682f.slice/crio-d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689.scope\": RecentStats: unable to find data in memory cache]" Nov 23 17:00:30 crc kubenswrapper[5050]: I1123 17:00:30.011512 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" exitCode=0 Nov 23 17:00:30 crc kubenswrapper[5050]: I1123 17:00:30.011588 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689"} Nov 23 17:00:30 crc kubenswrapper[5050]: I1123 17:00:30.011965 5050 scope.go:117] "RemoveContainer" containerID="697b9ae232bf75544badaa8e6059a4b2bd0f542e31a482e7418cd61ae786a3fd" Nov 23 17:00:30 crc kubenswrapper[5050]: I1123 17:00:30.021291 5050 scope.go:117] "RemoveContainer" containerID="d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" Nov 23 17:00:30 crc kubenswrapper[5050]: E1123 17:00:30.022208 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:00:40 crc kubenswrapper[5050]: I1123 17:00:40.549164 5050 scope.go:117] "RemoveContainer" containerID="d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" Nov 23 17:00:40 crc kubenswrapper[5050]: E1123 17:00:40.550557 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:00:53 crc kubenswrapper[5050]: I1123 17:00:53.549148 5050 scope.go:117] "RemoveContainer" containerID="d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" Nov 23 17:00:53 crc kubenswrapper[5050]: E1123 17:00:53.550874 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.111635 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-8svs6"] Nov 23 17:01:00 crc kubenswrapper[5050]: E1123 17:01:00.113287 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e417ee4-1474-40d0-9b92-9f51896b6761" containerName="collect-profiles" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.113309 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e417ee4-1474-40d0-9b92-9f51896b6761" containerName="collect-profiles" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.113783 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e417ee4-1474-40d0-9b92-9f51896b6761" containerName="collect-profiles" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.117036 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8svs6" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.125312 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8svs6"] Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.226100 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29398621-bdwd7"] Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.228784 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29398621-bdwd7" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.236759 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtwwf\" (UniqueName: \"kubernetes.io/projected/29c74501-18ab-4bab-8811-9a766efc6b17-kube-api-access-dtwwf\") pod \"community-operators-8svs6\" (UID: \"29c74501-18ab-4bab-8811-9a766efc6b17\") " pod="openshift-marketplace/community-operators-8svs6" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.236894 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29c74501-18ab-4bab-8811-9a766efc6b17-catalog-content\") pod \"community-operators-8svs6\" (UID: \"29c74501-18ab-4bab-8811-9a766efc6b17\") " pod="openshift-marketplace/community-operators-8svs6" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.237069 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29c74501-18ab-4bab-8811-9a766efc6b17-utilities\") pod \"community-operators-8svs6\" (UID: \"29c74501-18ab-4bab-8811-9a766efc6b17\") " pod="openshift-marketplace/community-operators-8svs6" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.270615 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29398621-bdwd7"] Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.338821 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/054236c5-b5b5-4794-bb9a-f52f18150a9e-config-data\") pod \"keystone-cron-29398621-bdwd7\" (UID: \"054236c5-b5b5-4794-bb9a-f52f18150a9e\") " pod="openstack/keystone-cron-29398621-bdwd7" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.338900 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6tzsp\" (UniqueName: \"kubernetes.io/projected/054236c5-b5b5-4794-bb9a-f52f18150a9e-kube-api-access-6tzsp\") pod \"keystone-cron-29398621-bdwd7\" (UID: \"054236c5-b5b5-4794-bb9a-f52f18150a9e\") " pod="openstack/keystone-cron-29398621-bdwd7" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.338979 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29c74501-18ab-4bab-8811-9a766efc6b17-utilities\") pod \"community-operators-8svs6\" (UID: \"29c74501-18ab-4bab-8811-9a766efc6b17\") " pod="openshift-marketplace/community-operators-8svs6" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.339027 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtwwf\" (UniqueName: \"kubernetes.io/projected/29c74501-18ab-4bab-8811-9a766efc6b17-kube-api-access-dtwwf\") pod \"community-operators-8svs6\" (UID: \"29c74501-18ab-4bab-8811-9a766efc6b17\") " pod="openshift-marketplace/community-operators-8svs6" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.339099 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29c74501-18ab-4bab-8811-9a766efc6b17-catalog-content\") pod \"community-operators-8svs6\" (UID: \"29c74501-18ab-4bab-8811-9a766efc6b17\") " pod="openshift-marketplace/community-operators-8svs6" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.339138 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/054236c5-b5b5-4794-bb9a-f52f18150a9e-fernet-keys\") pod \"keystone-cron-29398621-bdwd7\" (UID: \"054236c5-b5b5-4794-bb9a-f52f18150a9e\") " pod="openstack/keystone-cron-29398621-bdwd7" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.339159 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/054236c5-b5b5-4794-bb9a-f52f18150a9e-combined-ca-bundle\") pod \"keystone-cron-29398621-bdwd7\" (UID: \"054236c5-b5b5-4794-bb9a-f52f18150a9e\") " pod="openstack/keystone-cron-29398621-bdwd7" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.339678 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29c74501-18ab-4bab-8811-9a766efc6b17-utilities\") pod \"community-operators-8svs6\" (UID: \"29c74501-18ab-4bab-8811-9a766efc6b17\") " pod="openshift-marketplace/community-operators-8svs6" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.340188 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29c74501-18ab-4bab-8811-9a766efc6b17-catalog-content\") pod \"community-operators-8svs6\" (UID: \"29c74501-18ab-4bab-8811-9a766efc6b17\") " pod="openshift-marketplace/community-operators-8svs6" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.365396 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtwwf\" (UniqueName: \"kubernetes.io/projected/29c74501-18ab-4bab-8811-9a766efc6b17-kube-api-access-dtwwf\") pod \"community-operators-8svs6\" (UID: \"29c74501-18ab-4bab-8811-9a766efc6b17\") " pod="openshift-marketplace/community-operators-8svs6" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.441196 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/054236c5-b5b5-4794-bb9a-f52f18150a9e-fernet-keys\") pod \"keystone-cron-29398621-bdwd7\" (UID: \"054236c5-b5b5-4794-bb9a-f52f18150a9e\") " pod="openstack/keystone-cron-29398621-bdwd7" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.441258 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/054236c5-b5b5-4794-bb9a-f52f18150a9e-combined-ca-bundle\") pod \"keystone-cron-29398621-bdwd7\" (UID: \"054236c5-b5b5-4794-bb9a-f52f18150a9e\") " pod="openstack/keystone-cron-29398621-bdwd7" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.441314 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/054236c5-b5b5-4794-bb9a-f52f18150a9e-config-data\") pod \"keystone-cron-29398621-bdwd7\" (UID: \"054236c5-b5b5-4794-bb9a-f52f18150a9e\") " pod="openstack/keystone-cron-29398621-bdwd7" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.441366 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6tzsp\" (UniqueName: \"kubernetes.io/projected/054236c5-b5b5-4794-bb9a-f52f18150a9e-kube-api-access-6tzsp\") pod \"keystone-cron-29398621-bdwd7\" (UID: \"054236c5-b5b5-4794-bb9a-f52f18150a9e\") " pod="openstack/keystone-cron-29398621-bdwd7" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.445510 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/054236c5-b5b5-4794-bb9a-f52f18150a9e-fernet-keys\") pod \"keystone-cron-29398621-bdwd7\" (UID: \"054236c5-b5b5-4794-bb9a-f52f18150a9e\") " pod="openstack/keystone-cron-29398621-bdwd7" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.445523 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/054236c5-b5b5-4794-bb9a-f52f18150a9e-combined-ca-bundle\") pod \"keystone-cron-29398621-bdwd7\" (UID: \"054236c5-b5b5-4794-bb9a-f52f18150a9e\") " pod="openstack/keystone-cron-29398621-bdwd7" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.447491 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/054236c5-b5b5-4794-bb9a-f52f18150a9e-config-data\") pod \"keystone-cron-29398621-bdwd7\" (UID: \"054236c5-b5b5-4794-bb9a-f52f18150a9e\") " pod="openstack/keystone-cron-29398621-bdwd7" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.464717 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6tzsp\" (UniqueName: \"kubernetes.io/projected/054236c5-b5b5-4794-bb9a-f52f18150a9e-kube-api-access-6tzsp\") pod \"keystone-cron-29398621-bdwd7\" (UID: \"054236c5-b5b5-4794-bb9a-f52f18150a9e\") " pod="openstack/keystone-cron-29398621-bdwd7" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.474498 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8svs6" Nov 23 17:01:00 crc kubenswrapper[5050]: I1123 17:01:00.558537 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29398621-bdwd7" Nov 23 17:01:01 crc kubenswrapper[5050]: I1123 17:01:01.152436 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8svs6"] Nov 23 17:01:01 crc kubenswrapper[5050]: I1123 17:01:01.302289 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29398621-bdwd7"] Nov 23 17:01:01 crc kubenswrapper[5050]: I1123 17:01:01.460566 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29398621-bdwd7" event={"ID":"054236c5-b5b5-4794-bb9a-f52f18150a9e","Type":"ContainerStarted","Data":"1a577230a2a9734bfd9fc4da062190f0155b94bd0a1424db2e25a4dc3f6fc16e"} Nov 23 17:01:01 crc kubenswrapper[5050]: I1123 17:01:01.462568 5050 generic.go:334] "Generic (PLEG): container finished" podID="29c74501-18ab-4bab-8811-9a766efc6b17" containerID="081a41b54f66bb9ac738e3f68bd09f604355e2b3720db10d1d18f4e654966dbe" exitCode=0 Nov 23 17:01:01 crc kubenswrapper[5050]: I1123 17:01:01.462602 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8svs6" event={"ID":"29c74501-18ab-4bab-8811-9a766efc6b17","Type":"ContainerDied","Data":"081a41b54f66bb9ac738e3f68bd09f604355e2b3720db10d1d18f4e654966dbe"} Nov 23 17:01:01 crc kubenswrapper[5050]: I1123 17:01:01.462618 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8svs6" event={"ID":"29c74501-18ab-4bab-8811-9a766efc6b17","Type":"ContainerStarted","Data":"26a957cfe427dfb364ed3d1d7babc3d36895e3cc47bdafe7a76ad8b22da842b4"} Nov 23 17:01:02 crc kubenswrapper[5050]: I1123 17:01:02.518884 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29398621-bdwd7" event={"ID":"054236c5-b5b5-4794-bb9a-f52f18150a9e","Type":"ContainerStarted","Data":"61dcae38269b1b5f488fda24b301e6080a8dca792926b19d681c0160cfd37352"} Nov 23 17:01:02 crc kubenswrapper[5050]: I1123 17:01:02.539034 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29398621-bdwd7" podStartSLOduration=2.539011569 podStartE2EDuration="2.539011569s" podCreationTimestamp="2025-11-23 17:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 17:01:02.537827586 +0000 UTC m=+8357.704824071" watchObservedRunningTime="2025-11-23 17:01:02.539011569 +0000 UTC m=+8357.706008054" Nov 23 17:01:03 crc kubenswrapper[5050]: I1123 17:01:03.532234 5050 generic.go:334] "Generic (PLEG): container finished" podID="29c74501-18ab-4bab-8811-9a766efc6b17" containerID="9b623c36f9b00a2a152f9633dd338db03e05c13778d4ba05b82d69e46ec2d8c2" exitCode=0 Nov 23 17:01:03 crc kubenswrapper[5050]: I1123 17:01:03.532307 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8svs6" event={"ID":"29c74501-18ab-4bab-8811-9a766efc6b17","Type":"ContainerDied","Data":"9b623c36f9b00a2a152f9633dd338db03e05c13778d4ba05b82d69e46ec2d8c2"} Nov 23 17:01:04 crc kubenswrapper[5050]: I1123 17:01:04.551518 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8svs6" event={"ID":"29c74501-18ab-4bab-8811-9a766efc6b17","Type":"ContainerStarted","Data":"89b71b6df46906962dab5860e1d7e8de61e82e4e12f914600a0dcb1e3683a181"} Nov 23 17:01:04 crc kubenswrapper[5050]: I1123 17:01:04.588609 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-8svs6" podStartSLOduration=2.139449268 podStartE2EDuration="4.588586014s" podCreationTimestamp="2025-11-23 17:01:00 +0000 UTC" firstStartedPulling="2025-11-23 17:01:01.465211263 +0000 UTC m=+8356.632207748" lastFinishedPulling="2025-11-23 17:01:03.914347999 +0000 UTC m=+8359.081344494" observedRunningTime="2025-11-23 17:01:04.578819039 +0000 UTC m=+8359.745815544" watchObservedRunningTime="2025-11-23 17:01:04.588586014 +0000 UTC m=+8359.755582499" Nov 23 17:01:05 crc kubenswrapper[5050]: I1123 17:01:05.567679 5050 generic.go:334] "Generic (PLEG): container finished" podID="054236c5-b5b5-4794-bb9a-f52f18150a9e" containerID="61dcae38269b1b5f488fda24b301e6080a8dca792926b19d681c0160cfd37352" exitCode=0 Nov 23 17:01:05 crc kubenswrapper[5050]: I1123 17:01:05.567777 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29398621-bdwd7" event={"ID":"054236c5-b5b5-4794-bb9a-f52f18150a9e","Type":"ContainerDied","Data":"61dcae38269b1b5f488fda24b301e6080a8dca792926b19d681c0160cfd37352"} Nov 23 17:01:07 crc kubenswrapper[5050]: I1123 17:01:07.027650 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29398621-bdwd7" Nov 23 17:01:07 crc kubenswrapper[5050]: I1123 17:01:07.134338 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/054236c5-b5b5-4794-bb9a-f52f18150a9e-config-data\") pod \"054236c5-b5b5-4794-bb9a-f52f18150a9e\" (UID: \"054236c5-b5b5-4794-bb9a-f52f18150a9e\") " Nov 23 17:01:07 crc kubenswrapper[5050]: I1123 17:01:07.134529 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/054236c5-b5b5-4794-bb9a-f52f18150a9e-combined-ca-bundle\") pod \"054236c5-b5b5-4794-bb9a-f52f18150a9e\" (UID: \"054236c5-b5b5-4794-bb9a-f52f18150a9e\") " Nov 23 17:01:07 crc kubenswrapper[5050]: I1123 17:01:07.134633 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6tzsp\" (UniqueName: \"kubernetes.io/projected/054236c5-b5b5-4794-bb9a-f52f18150a9e-kube-api-access-6tzsp\") pod \"054236c5-b5b5-4794-bb9a-f52f18150a9e\" (UID: \"054236c5-b5b5-4794-bb9a-f52f18150a9e\") " Nov 23 17:01:07 crc kubenswrapper[5050]: I1123 17:01:07.134814 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/054236c5-b5b5-4794-bb9a-f52f18150a9e-fernet-keys\") pod \"054236c5-b5b5-4794-bb9a-f52f18150a9e\" (UID: \"054236c5-b5b5-4794-bb9a-f52f18150a9e\") " Nov 23 17:01:07 crc kubenswrapper[5050]: I1123 17:01:07.141337 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/054236c5-b5b5-4794-bb9a-f52f18150a9e-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "054236c5-b5b5-4794-bb9a-f52f18150a9e" (UID: "054236c5-b5b5-4794-bb9a-f52f18150a9e"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:01:07 crc kubenswrapper[5050]: I1123 17:01:07.157008 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/054236c5-b5b5-4794-bb9a-f52f18150a9e-kube-api-access-6tzsp" (OuterVolumeSpecName: "kube-api-access-6tzsp") pod "054236c5-b5b5-4794-bb9a-f52f18150a9e" (UID: "054236c5-b5b5-4794-bb9a-f52f18150a9e"). InnerVolumeSpecName "kube-api-access-6tzsp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:01:07 crc kubenswrapper[5050]: I1123 17:01:07.175637 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/054236c5-b5b5-4794-bb9a-f52f18150a9e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "054236c5-b5b5-4794-bb9a-f52f18150a9e" (UID: "054236c5-b5b5-4794-bb9a-f52f18150a9e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:01:07 crc kubenswrapper[5050]: I1123 17:01:07.207494 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/054236c5-b5b5-4794-bb9a-f52f18150a9e-config-data" (OuterVolumeSpecName: "config-data") pod "054236c5-b5b5-4794-bb9a-f52f18150a9e" (UID: "054236c5-b5b5-4794-bb9a-f52f18150a9e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:01:07 crc kubenswrapper[5050]: I1123 17:01:07.238743 5050 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/054236c5-b5b5-4794-bb9a-f52f18150a9e-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 23 17:01:07 crc kubenswrapper[5050]: I1123 17:01:07.238799 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/054236c5-b5b5-4794-bb9a-f52f18150a9e-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 17:01:07 crc kubenswrapper[5050]: I1123 17:01:07.238811 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/054236c5-b5b5-4794-bb9a-f52f18150a9e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 17:01:07 crc kubenswrapper[5050]: I1123 17:01:07.238834 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6tzsp\" (UniqueName: \"kubernetes.io/projected/054236c5-b5b5-4794-bb9a-f52f18150a9e-kube-api-access-6tzsp\") on node \"crc\" DevicePath \"\"" Nov 23 17:01:07 crc kubenswrapper[5050]: I1123 17:01:07.594950 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29398621-bdwd7" event={"ID":"054236c5-b5b5-4794-bb9a-f52f18150a9e","Type":"ContainerDied","Data":"1a577230a2a9734bfd9fc4da062190f0155b94bd0a1424db2e25a4dc3f6fc16e"} Nov 23 17:01:07 crc kubenswrapper[5050]: I1123 17:01:07.595006 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1a577230a2a9734bfd9fc4da062190f0155b94bd0a1424db2e25a4dc3f6fc16e" Nov 23 17:01:07 crc kubenswrapper[5050]: I1123 17:01:07.595265 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29398621-bdwd7" Nov 23 17:01:08 crc kubenswrapper[5050]: I1123 17:01:08.548754 5050 scope.go:117] "RemoveContainer" containerID="d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" Nov 23 17:01:08 crc kubenswrapper[5050]: E1123 17:01:08.549537 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:01:10 crc kubenswrapper[5050]: I1123 17:01:10.474967 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-8svs6" Nov 23 17:01:10 crc kubenswrapper[5050]: I1123 17:01:10.475728 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-8svs6" Nov 23 17:01:10 crc kubenswrapper[5050]: I1123 17:01:10.556922 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-8svs6" Nov 23 17:01:10 crc kubenswrapper[5050]: I1123 17:01:10.719099 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-8svs6" Nov 23 17:01:10 crc kubenswrapper[5050]: I1123 17:01:10.814158 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8svs6"] Nov 23 17:01:12 crc kubenswrapper[5050]: I1123 17:01:12.686112 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-8svs6" podUID="29c74501-18ab-4bab-8811-9a766efc6b17" containerName="registry-server" containerID="cri-o://89b71b6df46906962dab5860e1d7e8de61e82e4e12f914600a0dcb1e3683a181" gracePeriod=2 Nov 23 17:01:13 crc kubenswrapper[5050]: I1123 17:01:13.297984 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8svs6" Nov 23 17:01:13 crc kubenswrapper[5050]: I1123 17:01:13.402575 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dtwwf\" (UniqueName: \"kubernetes.io/projected/29c74501-18ab-4bab-8811-9a766efc6b17-kube-api-access-dtwwf\") pod \"29c74501-18ab-4bab-8811-9a766efc6b17\" (UID: \"29c74501-18ab-4bab-8811-9a766efc6b17\") " Nov 23 17:01:13 crc kubenswrapper[5050]: I1123 17:01:13.402791 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29c74501-18ab-4bab-8811-9a766efc6b17-catalog-content\") pod \"29c74501-18ab-4bab-8811-9a766efc6b17\" (UID: \"29c74501-18ab-4bab-8811-9a766efc6b17\") " Nov 23 17:01:13 crc kubenswrapper[5050]: I1123 17:01:13.403551 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29c74501-18ab-4bab-8811-9a766efc6b17-utilities\") pod \"29c74501-18ab-4bab-8811-9a766efc6b17\" (UID: \"29c74501-18ab-4bab-8811-9a766efc6b17\") " Nov 23 17:01:13 crc kubenswrapper[5050]: I1123 17:01:13.404526 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29c74501-18ab-4bab-8811-9a766efc6b17-utilities" (OuterVolumeSpecName: "utilities") pod "29c74501-18ab-4bab-8811-9a766efc6b17" (UID: "29c74501-18ab-4bab-8811-9a766efc6b17"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:01:13 crc kubenswrapper[5050]: I1123 17:01:13.410092 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29c74501-18ab-4bab-8811-9a766efc6b17-kube-api-access-dtwwf" (OuterVolumeSpecName: "kube-api-access-dtwwf") pod "29c74501-18ab-4bab-8811-9a766efc6b17" (UID: "29c74501-18ab-4bab-8811-9a766efc6b17"). InnerVolumeSpecName "kube-api-access-dtwwf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:01:13 crc kubenswrapper[5050]: I1123 17:01:13.459586 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29c74501-18ab-4bab-8811-9a766efc6b17-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "29c74501-18ab-4bab-8811-9a766efc6b17" (UID: "29c74501-18ab-4bab-8811-9a766efc6b17"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:01:13 crc kubenswrapper[5050]: I1123 17:01:13.507092 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29c74501-18ab-4bab-8811-9a766efc6b17-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 17:01:13 crc kubenswrapper[5050]: I1123 17:01:13.507150 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29c74501-18ab-4bab-8811-9a766efc6b17-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 17:01:13 crc kubenswrapper[5050]: I1123 17:01:13.507162 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dtwwf\" (UniqueName: \"kubernetes.io/projected/29c74501-18ab-4bab-8811-9a766efc6b17-kube-api-access-dtwwf\") on node \"crc\" DevicePath \"\"" Nov 23 17:01:13 crc kubenswrapper[5050]: I1123 17:01:13.700524 5050 generic.go:334] "Generic (PLEG): container finished" podID="29c74501-18ab-4bab-8811-9a766efc6b17" containerID="89b71b6df46906962dab5860e1d7e8de61e82e4e12f914600a0dcb1e3683a181" exitCode=0 Nov 23 17:01:13 crc kubenswrapper[5050]: I1123 17:01:13.700580 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8svs6" event={"ID":"29c74501-18ab-4bab-8811-9a766efc6b17","Type":"ContainerDied","Data":"89b71b6df46906962dab5860e1d7e8de61e82e4e12f914600a0dcb1e3683a181"} Nov 23 17:01:13 crc kubenswrapper[5050]: I1123 17:01:13.700614 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8svs6" event={"ID":"29c74501-18ab-4bab-8811-9a766efc6b17","Type":"ContainerDied","Data":"26a957cfe427dfb364ed3d1d7babc3d36895e3cc47bdafe7a76ad8b22da842b4"} Nov 23 17:01:13 crc kubenswrapper[5050]: I1123 17:01:13.700637 5050 scope.go:117] "RemoveContainer" containerID="89b71b6df46906962dab5860e1d7e8de61e82e4e12f914600a0dcb1e3683a181" Nov 23 17:01:13 crc kubenswrapper[5050]: I1123 17:01:13.700715 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8svs6" Nov 23 17:01:13 crc kubenswrapper[5050]: I1123 17:01:13.733535 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8svs6"] Nov 23 17:01:13 crc kubenswrapper[5050]: I1123 17:01:13.743972 5050 scope.go:117] "RemoveContainer" containerID="9b623c36f9b00a2a152f9633dd338db03e05c13778d4ba05b82d69e46ec2d8c2" Nov 23 17:01:13 crc kubenswrapper[5050]: I1123 17:01:13.747151 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-8svs6"] Nov 23 17:01:13 crc kubenswrapper[5050]: I1123 17:01:13.774308 5050 scope.go:117] "RemoveContainer" containerID="081a41b54f66bb9ac738e3f68bd09f604355e2b3720db10d1d18f4e654966dbe" Nov 23 17:01:13 crc kubenswrapper[5050]: I1123 17:01:13.842614 5050 scope.go:117] "RemoveContainer" containerID="89b71b6df46906962dab5860e1d7e8de61e82e4e12f914600a0dcb1e3683a181" Nov 23 17:01:13 crc kubenswrapper[5050]: E1123 17:01:13.843485 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"89b71b6df46906962dab5860e1d7e8de61e82e4e12f914600a0dcb1e3683a181\": container with ID starting with 89b71b6df46906962dab5860e1d7e8de61e82e4e12f914600a0dcb1e3683a181 not found: ID does not exist" containerID="89b71b6df46906962dab5860e1d7e8de61e82e4e12f914600a0dcb1e3683a181" Nov 23 17:01:13 crc kubenswrapper[5050]: I1123 17:01:13.843574 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"89b71b6df46906962dab5860e1d7e8de61e82e4e12f914600a0dcb1e3683a181"} err="failed to get container status \"89b71b6df46906962dab5860e1d7e8de61e82e4e12f914600a0dcb1e3683a181\": rpc error: code = NotFound desc = could not find container \"89b71b6df46906962dab5860e1d7e8de61e82e4e12f914600a0dcb1e3683a181\": container with ID starting with 89b71b6df46906962dab5860e1d7e8de61e82e4e12f914600a0dcb1e3683a181 not found: ID does not exist" Nov 23 17:01:13 crc kubenswrapper[5050]: I1123 17:01:13.843638 5050 scope.go:117] "RemoveContainer" containerID="9b623c36f9b00a2a152f9633dd338db03e05c13778d4ba05b82d69e46ec2d8c2" Nov 23 17:01:13 crc kubenswrapper[5050]: E1123 17:01:13.844123 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b623c36f9b00a2a152f9633dd338db03e05c13778d4ba05b82d69e46ec2d8c2\": container with ID starting with 9b623c36f9b00a2a152f9633dd338db03e05c13778d4ba05b82d69e46ec2d8c2 not found: ID does not exist" containerID="9b623c36f9b00a2a152f9633dd338db03e05c13778d4ba05b82d69e46ec2d8c2" Nov 23 17:01:13 crc kubenswrapper[5050]: I1123 17:01:13.844176 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b623c36f9b00a2a152f9633dd338db03e05c13778d4ba05b82d69e46ec2d8c2"} err="failed to get container status \"9b623c36f9b00a2a152f9633dd338db03e05c13778d4ba05b82d69e46ec2d8c2\": rpc error: code = NotFound desc = could not find container \"9b623c36f9b00a2a152f9633dd338db03e05c13778d4ba05b82d69e46ec2d8c2\": container with ID starting with 9b623c36f9b00a2a152f9633dd338db03e05c13778d4ba05b82d69e46ec2d8c2 not found: ID does not exist" Nov 23 17:01:13 crc kubenswrapper[5050]: I1123 17:01:13.844208 5050 scope.go:117] "RemoveContainer" containerID="081a41b54f66bb9ac738e3f68bd09f604355e2b3720db10d1d18f4e654966dbe" Nov 23 17:01:13 crc kubenswrapper[5050]: E1123 17:01:13.844570 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"081a41b54f66bb9ac738e3f68bd09f604355e2b3720db10d1d18f4e654966dbe\": container with ID starting with 081a41b54f66bb9ac738e3f68bd09f604355e2b3720db10d1d18f4e654966dbe not found: ID does not exist" containerID="081a41b54f66bb9ac738e3f68bd09f604355e2b3720db10d1d18f4e654966dbe" Nov 23 17:01:13 crc kubenswrapper[5050]: I1123 17:01:13.844597 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"081a41b54f66bb9ac738e3f68bd09f604355e2b3720db10d1d18f4e654966dbe"} err="failed to get container status \"081a41b54f66bb9ac738e3f68bd09f604355e2b3720db10d1d18f4e654966dbe\": rpc error: code = NotFound desc = could not find container \"081a41b54f66bb9ac738e3f68bd09f604355e2b3720db10d1d18f4e654966dbe\": container with ID starting with 081a41b54f66bb9ac738e3f68bd09f604355e2b3720db10d1d18f4e654966dbe not found: ID does not exist" Nov 23 17:01:15 crc kubenswrapper[5050]: I1123 17:01:15.564112 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29c74501-18ab-4bab-8811-9a766efc6b17" path="/var/lib/kubelet/pods/29c74501-18ab-4bab-8811-9a766efc6b17/volumes" Nov 23 17:01:19 crc kubenswrapper[5050]: I1123 17:01:19.549775 5050 scope.go:117] "RemoveContainer" containerID="d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" Nov 23 17:01:19 crc kubenswrapper[5050]: E1123 17:01:19.552460 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:01:32 crc kubenswrapper[5050]: I1123 17:01:32.549648 5050 scope.go:117] "RemoveContainer" containerID="d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" Nov 23 17:01:32 crc kubenswrapper[5050]: E1123 17:01:32.553541 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:01:44 crc kubenswrapper[5050]: I1123 17:01:44.549562 5050 scope.go:117] "RemoveContainer" containerID="d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" Nov 23 17:01:44 crc kubenswrapper[5050]: E1123 17:01:44.551515 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:01:56 crc kubenswrapper[5050]: I1123 17:01:56.549569 5050 scope.go:117] "RemoveContainer" containerID="d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" Nov 23 17:01:56 crc kubenswrapper[5050]: E1123 17:01:56.550861 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:02:09 crc kubenswrapper[5050]: I1123 17:02:09.551124 5050 scope.go:117] "RemoveContainer" containerID="d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" Nov 23 17:02:09 crc kubenswrapper[5050]: E1123 17:02:09.552352 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:02:21 crc kubenswrapper[5050]: I1123 17:02:21.550073 5050 scope.go:117] "RemoveContainer" containerID="d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" Nov 23 17:02:21 crc kubenswrapper[5050]: E1123 17:02:21.551404 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:02:34 crc kubenswrapper[5050]: I1123 17:02:34.549124 5050 scope.go:117] "RemoveContainer" containerID="d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" Nov 23 17:02:34 crc kubenswrapper[5050]: E1123 17:02:34.550257 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:02:45 crc kubenswrapper[5050]: I1123 17:02:45.566497 5050 scope.go:117] "RemoveContainer" containerID="d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" Nov 23 17:02:45 crc kubenswrapper[5050]: E1123 17:02:45.567536 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:02:59 crc kubenswrapper[5050]: I1123 17:02:59.549204 5050 scope.go:117] "RemoveContainer" containerID="d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" Nov 23 17:02:59 crc kubenswrapper[5050]: E1123 17:02:59.550093 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:03:14 crc kubenswrapper[5050]: I1123 17:03:14.591356 5050 scope.go:117] "RemoveContainer" containerID="d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" Nov 23 17:03:14 crc kubenswrapper[5050]: E1123 17:03:14.600836 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:03:29 crc kubenswrapper[5050]: I1123 17:03:29.549529 5050 scope.go:117] "RemoveContainer" containerID="d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" Nov 23 17:03:29 crc kubenswrapper[5050]: E1123 17:03:29.551541 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:03:44 crc kubenswrapper[5050]: I1123 17:03:44.549292 5050 scope.go:117] "RemoveContainer" containerID="d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" Nov 23 17:03:44 crc kubenswrapper[5050]: E1123 17:03:44.550313 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:03:58 crc kubenswrapper[5050]: I1123 17:03:58.551507 5050 scope.go:117] "RemoveContainer" containerID="d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" Nov 23 17:03:58 crc kubenswrapper[5050]: E1123 17:03:58.552482 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:04:12 crc kubenswrapper[5050]: I1123 17:04:12.549437 5050 scope.go:117] "RemoveContainer" containerID="d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" Nov 23 17:04:12 crc kubenswrapper[5050]: E1123 17:04:12.550541 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:04:22 crc kubenswrapper[5050]: I1123 17:04:22.628679 5050 generic.go:334] "Generic (PLEG): container finished" podID="7c29377d-9bd8-4eb5-8777-3ae34021588e" containerID="09a4db6e69319f303e1c02c7b2a0ebe94aba2b70f24219bcab02f5b7a32f5939" exitCode=0 Nov 23 17:04:22 crc kubenswrapper[5050]: I1123 17:04:22.629221 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-7rf78" event={"ID":"7c29377d-9bd8-4eb5-8777-3ae34021588e","Type":"ContainerDied","Data":"09a4db6e69319f303e1c02c7b2a0ebe94aba2b70f24219bcab02f5b7a32f5939"} Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.210289 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.363032 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ceilometer-compute-config-data-1\") pod \"7c29377d-9bd8-4eb5-8777-3ae34021588e\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.363644 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-inventory\") pod \"7c29377d-9bd8-4eb5-8777-3ae34021588e\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.363692 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r5nj2\" (UniqueName: \"kubernetes.io/projected/7c29377d-9bd8-4eb5-8777-3ae34021588e-kube-api-access-r5nj2\") pod \"7c29377d-9bd8-4eb5-8777-3ae34021588e\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.363995 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ceph\") pod \"7c29377d-9bd8-4eb5-8777-3ae34021588e\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.364115 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ceilometer-compute-config-data-2\") pod \"7c29377d-9bd8-4eb5-8777-3ae34021588e\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.364218 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ssh-key\") pod \"7c29377d-9bd8-4eb5-8777-3ae34021588e\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.364296 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-telemetry-combined-ca-bundle\") pod \"7c29377d-9bd8-4eb5-8777-3ae34021588e\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.364433 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ceilometer-compute-config-data-0\") pod \"7c29377d-9bd8-4eb5-8777-3ae34021588e\" (UID: \"7c29377d-9bd8-4eb5-8777-3ae34021588e\") " Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.372363 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c29377d-9bd8-4eb5-8777-3ae34021588e-kube-api-access-r5nj2" (OuterVolumeSpecName: "kube-api-access-r5nj2") pod "7c29377d-9bd8-4eb5-8777-3ae34021588e" (UID: "7c29377d-9bd8-4eb5-8777-3ae34021588e"). InnerVolumeSpecName "kube-api-access-r5nj2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.376248 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ceph" (OuterVolumeSpecName: "ceph") pod "7c29377d-9bd8-4eb5-8777-3ae34021588e" (UID: "7c29377d-9bd8-4eb5-8777-3ae34021588e"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.377802 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "7c29377d-9bd8-4eb5-8777-3ae34021588e" (UID: "7c29377d-9bd8-4eb5-8777-3ae34021588e"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.408199 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "7c29377d-9bd8-4eb5-8777-3ae34021588e" (UID: "7c29377d-9bd8-4eb5-8777-3ae34021588e"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.409262 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "7c29377d-9bd8-4eb5-8777-3ae34021588e" (UID: "7c29377d-9bd8-4eb5-8777-3ae34021588e"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.411192 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7c29377d-9bd8-4eb5-8777-3ae34021588e" (UID: "7c29377d-9bd8-4eb5-8777-3ae34021588e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.415112 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "7c29377d-9bd8-4eb5-8777-3ae34021588e" (UID: "7c29377d-9bd8-4eb5-8777-3ae34021588e"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.417481 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-inventory" (OuterVolumeSpecName: "inventory") pod "7c29377d-9bd8-4eb5-8777-3ae34021588e" (UID: "7c29377d-9bd8-4eb5-8777-3ae34021588e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.468434 5050 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.468502 5050 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.468520 5050 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.468536 5050 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.468548 5050 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.468566 5050 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-inventory\") on node \"crc\" DevicePath \"\"" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.468581 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r5nj2\" (UniqueName: \"kubernetes.io/projected/7c29377d-9bd8-4eb5-8777-3ae34021588e-kube-api-access-r5nj2\") on node \"crc\" DevicePath \"\"" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.468593 5050 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7c29377d-9bd8-4eb5-8777-3ae34021588e-ceph\") on node \"crc\" DevicePath \"\"" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.663337 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-7rf78" event={"ID":"7c29377d-9bd8-4eb5-8777-3ae34021588e","Type":"ContainerDied","Data":"872d96ea9a86d181e807b00a45fb8cb7c75ff8aaf69bd07284edfec27b62f03d"} Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.663425 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="872d96ea9a86d181e807b00a45fb8cb7c75ff8aaf69bd07284edfec27b62f03d" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.663423 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-7rf78" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.789078 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-sriov-openstack-openstack-cell1-rz6t4"] Nov 23 17:04:24 crc kubenswrapper[5050]: E1123 17:04:24.789923 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="054236c5-b5b5-4794-bb9a-f52f18150a9e" containerName="keystone-cron" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.789956 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="054236c5-b5b5-4794-bb9a-f52f18150a9e" containerName="keystone-cron" Nov 23 17:04:24 crc kubenswrapper[5050]: E1123 17:04:24.789987 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c29377d-9bd8-4eb5-8777-3ae34021588e" containerName="telemetry-openstack-openstack-cell1" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.790001 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c29377d-9bd8-4eb5-8777-3ae34021588e" containerName="telemetry-openstack-openstack-cell1" Nov 23 17:04:24 crc kubenswrapper[5050]: E1123 17:04:24.790072 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29c74501-18ab-4bab-8811-9a766efc6b17" containerName="registry-server" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.790082 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="29c74501-18ab-4bab-8811-9a766efc6b17" containerName="registry-server" Nov 23 17:04:24 crc kubenswrapper[5050]: E1123 17:04:24.790114 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29c74501-18ab-4bab-8811-9a766efc6b17" containerName="extract-utilities" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.790127 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="29c74501-18ab-4bab-8811-9a766efc6b17" containerName="extract-utilities" Nov 23 17:04:24 crc kubenswrapper[5050]: E1123 17:04:24.790141 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29c74501-18ab-4bab-8811-9a766efc6b17" containerName="extract-content" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.790151 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="29c74501-18ab-4bab-8811-9a766efc6b17" containerName="extract-content" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.790569 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c29377d-9bd8-4eb5-8777-3ae34021588e" containerName="telemetry-openstack-openstack-cell1" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.790632 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="054236c5-b5b5-4794-bb9a-f52f18150a9e" containerName="keystone-cron" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.790661 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="29c74501-18ab-4bab-8811-9a766efc6b17" containerName="registry-server" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.792346 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-rz6t4" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.795633 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-sriov-agent-neutron-config" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.796192 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-ckrpf" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.798166 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.798395 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.799945 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.824935 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-sriov-openstack-openstack-cell1-rz6t4"] Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.880902 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-neutron-sriov-agent-neutron-config-0\") pod \"neutron-sriov-openstack-openstack-cell1-rz6t4\" (UID: \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rz6t4" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.881064 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-inventory\") pod \"neutron-sriov-openstack-openstack-cell1-rz6t4\" (UID: \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rz6t4" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.881158 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-ssh-key\") pod \"neutron-sriov-openstack-openstack-cell1-rz6t4\" (UID: \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rz6t4" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.881408 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-neutron-sriov-combined-ca-bundle\") pod \"neutron-sriov-openstack-openstack-cell1-rz6t4\" (UID: \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rz6t4" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.881568 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-ceph\") pod \"neutron-sriov-openstack-openstack-cell1-rz6t4\" (UID: \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rz6t4" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.881671 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l72n7\" (UniqueName: \"kubernetes.io/projected/7ea51431-6725-4bd0-8f42-7d79d7bb522a-kube-api-access-l72n7\") pod \"neutron-sriov-openstack-openstack-cell1-rz6t4\" (UID: \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rz6t4" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.985111 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l72n7\" (UniqueName: \"kubernetes.io/projected/7ea51431-6725-4bd0-8f42-7d79d7bb522a-kube-api-access-l72n7\") pod \"neutron-sriov-openstack-openstack-cell1-rz6t4\" (UID: \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rz6t4" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.985295 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-neutron-sriov-agent-neutron-config-0\") pod \"neutron-sriov-openstack-openstack-cell1-rz6t4\" (UID: \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rz6t4" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.985401 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-inventory\") pod \"neutron-sriov-openstack-openstack-cell1-rz6t4\" (UID: \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rz6t4" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.985515 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-ssh-key\") pod \"neutron-sriov-openstack-openstack-cell1-rz6t4\" (UID: \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rz6t4" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.985575 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-neutron-sriov-combined-ca-bundle\") pod \"neutron-sriov-openstack-openstack-cell1-rz6t4\" (UID: \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rz6t4" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.985618 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-ceph\") pod \"neutron-sriov-openstack-openstack-cell1-rz6t4\" (UID: \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rz6t4" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.991610 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-ssh-key\") pod \"neutron-sriov-openstack-openstack-cell1-rz6t4\" (UID: \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rz6t4" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.992173 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-inventory\") pod \"neutron-sriov-openstack-openstack-cell1-rz6t4\" (UID: \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rz6t4" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.992661 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-ceph\") pod \"neutron-sriov-openstack-openstack-cell1-rz6t4\" (UID: \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rz6t4" Nov 23 17:04:24 crc kubenswrapper[5050]: I1123 17:04:24.993026 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-neutron-sriov-agent-neutron-config-0\") pod \"neutron-sriov-openstack-openstack-cell1-rz6t4\" (UID: \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rz6t4" Nov 23 17:04:25 crc kubenswrapper[5050]: I1123 17:04:24.994810 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-neutron-sriov-combined-ca-bundle\") pod \"neutron-sriov-openstack-openstack-cell1-rz6t4\" (UID: \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rz6t4" Nov 23 17:04:25 crc kubenswrapper[5050]: I1123 17:04:25.007246 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l72n7\" (UniqueName: \"kubernetes.io/projected/7ea51431-6725-4bd0-8f42-7d79d7bb522a-kube-api-access-l72n7\") pod \"neutron-sriov-openstack-openstack-cell1-rz6t4\" (UID: \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rz6t4" Nov 23 17:04:25 crc kubenswrapper[5050]: I1123 17:04:25.113475 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-rz6t4" Nov 23 17:04:25 crc kubenswrapper[5050]: I1123 17:04:25.566504 5050 scope.go:117] "RemoveContainer" containerID="d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" Nov 23 17:04:25 crc kubenswrapper[5050]: E1123 17:04:25.567645 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:04:25 crc kubenswrapper[5050]: I1123 17:04:25.744658 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-sriov-openstack-openstack-cell1-rz6t4"] Nov 23 17:04:25 crc kubenswrapper[5050]: I1123 17:04:25.755966 5050 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 23 17:04:26 crc kubenswrapper[5050]: I1123 17:04:26.710262 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-rz6t4" event={"ID":"7ea51431-6725-4bd0-8f42-7d79d7bb522a","Type":"ContainerStarted","Data":"bd30af1a878dd1298aa6266b3a0714e60e6e07985cca47dcc7462888cf1f8455"} Nov 23 17:04:26 crc kubenswrapper[5050]: I1123 17:04:26.710726 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-rz6t4" event={"ID":"7ea51431-6725-4bd0-8f42-7d79d7bb522a","Type":"ContainerStarted","Data":"06ad41da53aebd95dcf1325efd5d4d8d206e10d98ba483114f43b39cbe3f85a6"} Nov 23 17:04:26 crc kubenswrapper[5050]: I1123 17:04:26.744900 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-sriov-openstack-openstack-cell1-rz6t4" podStartSLOduration=2.256651856 podStartE2EDuration="2.74487728s" podCreationTimestamp="2025-11-23 17:04:24 +0000 UTC" firstStartedPulling="2025-11-23 17:04:25.755568569 +0000 UTC m=+8560.922565054" lastFinishedPulling="2025-11-23 17:04:26.243793993 +0000 UTC m=+8561.410790478" observedRunningTime="2025-11-23 17:04:26.73141974 +0000 UTC m=+8561.898416225" watchObservedRunningTime="2025-11-23 17:04:26.74487728 +0000 UTC m=+8561.911873765" Nov 23 17:04:39 crc kubenswrapper[5050]: I1123 17:04:39.549085 5050 scope.go:117] "RemoveContainer" containerID="d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" Nov 23 17:04:39 crc kubenswrapper[5050]: E1123 17:04:39.550238 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:04:50 crc kubenswrapper[5050]: I1123 17:04:50.549378 5050 scope.go:117] "RemoveContainer" containerID="d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" Nov 23 17:04:50 crc kubenswrapper[5050]: E1123 17:04:50.550438 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:05:01 crc kubenswrapper[5050]: I1123 17:05:01.548651 5050 scope.go:117] "RemoveContainer" containerID="d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" Nov 23 17:05:01 crc kubenswrapper[5050]: E1123 17:05:01.550096 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:05:14 crc kubenswrapper[5050]: I1123 17:05:14.550154 5050 scope.go:117] "RemoveContainer" containerID="d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" Nov 23 17:05:14 crc kubenswrapper[5050]: E1123 17:05:14.551610 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:05:26 crc kubenswrapper[5050]: I1123 17:05:26.548407 5050 scope.go:117] "RemoveContainer" containerID="d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" Nov 23 17:05:26 crc kubenswrapper[5050]: E1123 17:05:26.549545 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:05:38 crc kubenswrapper[5050]: I1123 17:05:38.549655 5050 scope.go:117] "RemoveContainer" containerID="d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" Nov 23 17:05:39 crc kubenswrapper[5050]: I1123 17:05:39.790374 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"b51b89804155fbed045110dd6b91b5fff9a2c8e177a1ec2576ae32319dd4911f"} Nov 23 17:07:59 crc kubenswrapper[5050]: I1123 17:07:59.224283 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 17:07:59 crc kubenswrapper[5050]: I1123 17:07:59.225420 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 17:08:29 crc kubenswrapper[5050]: I1123 17:08:29.224334 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 17:08:29 crc kubenswrapper[5050]: I1123 17:08:29.225233 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 17:08:59 crc kubenswrapper[5050]: I1123 17:08:59.224087 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 17:08:59 crc kubenswrapper[5050]: I1123 17:08:59.224982 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 17:08:59 crc kubenswrapper[5050]: I1123 17:08:59.225036 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 17:08:59 crc kubenswrapper[5050]: I1123 17:08:59.226398 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b51b89804155fbed045110dd6b91b5fff9a2c8e177a1ec2576ae32319dd4911f"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 17:08:59 crc kubenswrapper[5050]: I1123 17:08:59.226512 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://b51b89804155fbed045110dd6b91b5fff9a2c8e177a1ec2576ae32319dd4911f" gracePeriod=600 Nov 23 17:08:59 crc kubenswrapper[5050]: I1123 17:08:59.701221 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="b51b89804155fbed045110dd6b91b5fff9a2c8e177a1ec2576ae32319dd4911f" exitCode=0 Nov 23 17:08:59 crc kubenswrapper[5050]: I1123 17:08:59.701288 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"b51b89804155fbed045110dd6b91b5fff9a2c8e177a1ec2576ae32319dd4911f"} Nov 23 17:08:59 crc kubenswrapper[5050]: I1123 17:08:59.701856 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda"} Nov 23 17:08:59 crc kubenswrapper[5050]: I1123 17:08:59.701895 5050 scope.go:117] "RemoveContainer" containerID="d4e53edfc30e031f9873726e3c9f60ebf48715860feb40686c4c363c778e0689" Nov 23 17:09:08 crc kubenswrapper[5050]: I1123 17:09:08.177364 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-nb9sl"] Nov 23 17:09:08 crc kubenswrapper[5050]: I1123 17:09:08.181415 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nb9sl" Nov 23 17:09:08 crc kubenswrapper[5050]: I1123 17:09:08.189170 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nb9sl"] Nov 23 17:09:08 crc kubenswrapper[5050]: I1123 17:09:08.221265 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07-utilities\") pod \"redhat-operators-nb9sl\" (UID: \"7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07\") " pod="openshift-marketplace/redhat-operators-nb9sl" Nov 23 17:09:08 crc kubenswrapper[5050]: I1123 17:09:08.221902 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4fmm9\" (UniqueName: \"kubernetes.io/projected/7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07-kube-api-access-4fmm9\") pod \"redhat-operators-nb9sl\" (UID: \"7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07\") " pod="openshift-marketplace/redhat-operators-nb9sl" Nov 23 17:09:08 crc kubenswrapper[5050]: I1123 17:09:08.222012 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07-catalog-content\") pod \"redhat-operators-nb9sl\" (UID: \"7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07\") " pod="openshift-marketplace/redhat-operators-nb9sl" Nov 23 17:09:08 crc kubenswrapper[5050]: I1123 17:09:08.325403 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4fmm9\" (UniqueName: \"kubernetes.io/projected/7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07-kube-api-access-4fmm9\") pod \"redhat-operators-nb9sl\" (UID: \"7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07\") " pod="openshift-marketplace/redhat-operators-nb9sl" Nov 23 17:09:08 crc kubenswrapper[5050]: I1123 17:09:08.325540 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07-catalog-content\") pod \"redhat-operators-nb9sl\" (UID: \"7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07\") " pod="openshift-marketplace/redhat-operators-nb9sl" Nov 23 17:09:08 crc kubenswrapper[5050]: I1123 17:09:08.325750 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07-utilities\") pod \"redhat-operators-nb9sl\" (UID: \"7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07\") " pod="openshift-marketplace/redhat-operators-nb9sl" Nov 23 17:09:08 crc kubenswrapper[5050]: I1123 17:09:08.326365 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07-catalog-content\") pod \"redhat-operators-nb9sl\" (UID: \"7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07\") " pod="openshift-marketplace/redhat-operators-nb9sl" Nov 23 17:09:08 crc kubenswrapper[5050]: I1123 17:09:08.327065 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07-utilities\") pod \"redhat-operators-nb9sl\" (UID: \"7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07\") " pod="openshift-marketplace/redhat-operators-nb9sl" Nov 23 17:09:08 crc kubenswrapper[5050]: I1123 17:09:08.352967 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4fmm9\" (UniqueName: \"kubernetes.io/projected/7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07-kube-api-access-4fmm9\") pod \"redhat-operators-nb9sl\" (UID: \"7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07\") " pod="openshift-marketplace/redhat-operators-nb9sl" Nov 23 17:09:08 crc kubenswrapper[5050]: I1123 17:09:08.527397 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nb9sl" Nov 23 17:09:09 crc kubenswrapper[5050]: I1123 17:09:09.119734 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nb9sl"] Nov 23 17:09:09 crc kubenswrapper[5050]: I1123 17:09:09.564269 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lz7vc"] Nov 23 17:09:09 crc kubenswrapper[5050]: I1123 17:09:09.567750 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lz7vc" Nov 23 17:09:09 crc kubenswrapper[5050]: I1123 17:09:09.571403 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lz7vc"] Nov 23 17:09:09 crc kubenswrapper[5050]: I1123 17:09:09.656143 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd799923-afa0-426e-a8b9-ac8bb231f329-catalog-content\") pod \"certified-operators-lz7vc\" (UID: \"fd799923-afa0-426e-a8b9-ac8bb231f329\") " pod="openshift-marketplace/certified-operators-lz7vc" Nov 23 17:09:09 crc kubenswrapper[5050]: I1123 17:09:09.656254 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8gxf\" (UniqueName: \"kubernetes.io/projected/fd799923-afa0-426e-a8b9-ac8bb231f329-kube-api-access-h8gxf\") pod \"certified-operators-lz7vc\" (UID: \"fd799923-afa0-426e-a8b9-ac8bb231f329\") " pod="openshift-marketplace/certified-operators-lz7vc" Nov 23 17:09:09 crc kubenswrapper[5050]: I1123 17:09:09.656505 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd799923-afa0-426e-a8b9-ac8bb231f329-utilities\") pod \"certified-operators-lz7vc\" (UID: \"fd799923-afa0-426e-a8b9-ac8bb231f329\") " pod="openshift-marketplace/certified-operators-lz7vc" Nov 23 17:09:09 crc kubenswrapper[5050]: I1123 17:09:09.758196 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd799923-afa0-426e-a8b9-ac8bb231f329-utilities\") pod \"certified-operators-lz7vc\" (UID: \"fd799923-afa0-426e-a8b9-ac8bb231f329\") " pod="openshift-marketplace/certified-operators-lz7vc" Nov 23 17:09:09 crc kubenswrapper[5050]: I1123 17:09:09.758298 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd799923-afa0-426e-a8b9-ac8bb231f329-catalog-content\") pod \"certified-operators-lz7vc\" (UID: \"fd799923-afa0-426e-a8b9-ac8bb231f329\") " pod="openshift-marketplace/certified-operators-lz7vc" Nov 23 17:09:09 crc kubenswrapper[5050]: I1123 17:09:09.758352 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8gxf\" (UniqueName: \"kubernetes.io/projected/fd799923-afa0-426e-a8b9-ac8bb231f329-kube-api-access-h8gxf\") pod \"certified-operators-lz7vc\" (UID: \"fd799923-afa0-426e-a8b9-ac8bb231f329\") " pod="openshift-marketplace/certified-operators-lz7vc" Nov 23 17:09:09 crc kubenswrapper[5050]: I1123 17:09:09.758786 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd799923-afa0-426e-a8b9-ac8bb231f329-utilities\") pod \"certified-operators-lz7vc\" (UID: \"fd799923-afa0-426e-a8b9-ac8bb231f329\") " pod="openshift-marketplace/certified-operators-lz7vc" Nov 23 17:09:09 crc kubenswrapper[5050]: I1123 17:09:09.759058 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd799923-afa0-426e-a8b9-ac8bb231f329-catalog-content\") pod \"certified-operators-lz7vc\" (UID: \"fd799923-afa0-426e-a8b9-ac8bb231f329\") " pod="openshift-marketplace/certified-operators-lz7vc" Nov 23 17:09:09 crc kubenswrapper[5050]: I1123 17:09:09.781079 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8gxf\" (UniqueName: \"kubernetes.io/projected/fd799923-afa0-426e-a8b9-ac8bb231f329-kube-api-access-h8gxf\") pod \"certified-operators-lz7vc\" (UID: \"fd799923-afa0-426e-a8b9-ac8bb231f329\") " pod="openshift-marketplace/certified-operators-lz7vc" Nov 23 17:09:09 crc kubenswrapper[5050]: I1123 17:09:09.827151 5050 generic.go:334] "Generic (PLEG): container finished" podID="7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07" containerID="dfb0627483b6b94791d230741a7f6f16c1fb1cc9c300a15de8efd4674f7c5068" exitCode=0 Nov 23 17:09:09 crc kubenswrapper[5050]: I1123 17:09:09.827213 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nb9sl" event={"ID":"7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07","Type":"ContainerDied","Data":"dfb0627483b6b94791d230741a7f6f16c1fb1cc9c300a15de8efd4674f7c5068"} Nov 23 17:09:09 crc kubenswrapper[5050]: I1123 17:09:09.827246 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nb9sl" event={"ID":"7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07","Type":"ContainerStarted","Data":"aa603cbef4923e29b81f53599d486b91d0696534cac9bc858c8bd6d4962d0b02"} Nov 23 17:09:09 crc kubenswrapper[5050]: I1123 17:09:09.888464 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lz7vc" Nov 23 17:09:10 crc kubenswrapper[5050]: I1123 17:09:10.517046 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lz7vc"] Nov 23 17:09:10 crc kubenswrapper[5050]: W1123 17:09:10.524169 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfd799923_afa0_426e_a8b9_ac8bb231f329.slice/crio-9188b95ab609a929fc9d6fa10b567f297593a6616f7ed33691f3779fea785e70 WatchSource:0}: Error finding container 9188b95ab609a929fc9d6fa10b567f297593a6616f7ed33691f3779fea785e70: Status 404 returned error can't find the container with id 9188b95ab609a929fc9d6fa10b567f297593a6616f7ed33691f3779fea785e70 Nov 23 17:09:10 crc kubenswrapper[5050]: I1123 17:09:10.842268 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lz7vc" event={"ID":"fd799923-afa0-426e-a8b9-ac8bb231f329","Type":"ContainerStarted","Data":"9188b95ab609a929fc9d6fa10b567f297593a6616f7ed33691f3779fea785e70"} Nov 23 17:09:10 crc kubenswrapper[5050]: I1123 17:09:10.847942 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nb9sl" event={"ID":"7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07","Type":"ContainerStarted","Data":"2bd589973a94a0249871d6c71bf596c7aa69b2f533d3aaf36a7274934882cc0d"} Nov 23 17:09:11 crc kubenswrapper[5050]: I1123 17:09:11.866005 5050 generic.go:334] "Generic (PLEG): container finished" podID="7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07" containerID="2bd589973a94a0249871d6c71bf596c7aa69b2f533d3aaf36a7274934882cc0d" exitCode=0 Nov 23 17:09:11 crc kubenswrapper[5050]: I1123 17:09:11.866075 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nb9sl" event={"ID":"7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07","Type":"ContainerDied","Data":"2bd589973a94a0249871d6c71bf596c7aa69b2f533d3aaf36a7274934882cc0d"} Nov 23 17:09:11 crc kubenswrapper[5050]: I1123 17:09:11.872024 5050 generic.go:334] "Generic (PLEG): container finished" podID="fd799923-afa0-426e-a8b9-ac8bb231f329" containerID="d76cad6b6ab824ed593c3699113f4b132a02cae2ed544b054bfb2b24473eef5d" exitCode=0 Nov 23 17:09:11 crc kubenswrapper[5050]: I1123 17:09:11.872135 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lz7vc" event={"ID":"fd799923-afa0-426e-a8b9-ac8bb231f329","Type":"ContainerDied","Data":"d76cad6b6ab824ed593c3699113f4b132a02cae2ed544b054bfb2b24473eef5d"} Nov 23 17:09:12 crc kubenswrapper[5050]: I1123 17:09:12.885707 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lz7vc" event={"ID":"fd799923-afa0-426e-a8b9-ac8bb231f329","Type":"ContainerStarted","Data":"d07501b14052eed48f00548446a10479da91657fb62577228a62b00d7818e4d7"} Nov 23 17:09:12 crc kubenswrapper[5050]: I1123 17:09:12.891902 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nb9sl" event={"ID":"7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07","Type":"ContainerStarted","Data":"20989fe492544a29433da3a6fbfe247b4b44b2a19a9a95df292ac9fbe30c94e8"} Nov 23 17:09:12 crc kubenswrapper[5050]: I1123 17:09:12.963114 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-nb9sl" podStartSLOduration=2.509798283 podStartE2EDuration="4.963093336s" podCreationTimestamp="2025-11-23 17:09:08 +0000 UTC" firstStartedPulling="2025-11-23 17:09:09.830141855 +0000 UTC m=+8844.997138340" lastFinishedPulling="2025-11-23 17:09:12.283436908 +0000 UTC m=+8847.450433393" observedRunningTime="2025-11-23 17:09:12.957942891 +0000 UTC m=+8848.124939406" watchObservedRunningTime="2025-11-23 17:09:12.963093336 +0000 UTC m=+8848.130089821" Nov 23 17:09:14 crc kubenswrapper[5050]: I1123 17:09:14.565521 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bwcgt"] Nov 23 17:09:14 crc kubenswrapper[5050]: I1123 17:09:14.573830 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bwcgt" Nov 23 17:09:14 crc kubenswrapper[5050]: I1123 17:09:14.583331 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bwcgt"] Nov 23 17:09:14 crc kubenswrapper[5050]: I1123 17:09:14.604829 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58537a23-a8ff-45cd-8163-4c3ef248cb11-catalog-content\") pod \"redhat-marketplace-bwcgt\" (UID: \"58537a23-a8ff-45cd-8163-4c3ef248cb11\") " pod="openshift-marketplace/redhat-marketplace-bwcgt" Nov 23 17:09:14 crc kubenswrapper[5050]: I1123 17:09:14.605486 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58537a23-a8ff-45cd-8163-4c3ef248cb11-utilities\") pod \"redhat-marketplace-bwcgt\" (UID: \"58537a23-a8ff-45cd-8163-4c3ef248cb11\") " pod="openshift-marketplace/redhat-marketplace-bwcgt" Nov 23 17:09:14 crc kubenswrapper[5050]: I1123 17:09:14.605620 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8j9k\" (UniqueName: \"kubernetes.io/projected/58537a23-a8ff-45cd-8163-4c3ef248cb11-kube-api-access-l8j9k\") pod \"redhat-marketplace-bwcgt\" (UID: \"58537a23-a8ff-45cd-8163-4c3ef248cb11\") " pod="openshift-marketplace/redhat-marketplace-bwcgt" Nov 23 17:09:14 crc kubenswrapper[5050]: I1123 17:09:14.706801 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58537a23-a8ff-45cd-8163-4c3ef248cb11-utilities\") pod \"redhat-marketplace-bwcgt\" (UID: \"58537a23-a8ff-45cd-8163-4c3ef248cb11\") " pod="openshift-marketplace/redhat-marketplace-bwcgt" Nov 23 17:09:14 crc kubenswrapper[5050]: I1123 17:09:14.706849 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8j9k\" (UniqueName: \"kubernetes.io/projected/58537a23-a8ff-45cd-8163-4c3ef248cb11-kube-api-access-l8j9k\") pod \"redhat-marketplace-bwcgt\" (UID: \"58537a23-a8ff-45cd-8163-4c3ef248cb11\") " pod="openshift-marketplace/redhat-marketplace-bwcgt" Nov 23 17:09:14 crc kubenswrapper[5050]: I1123 17:09:14.706949 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58537a23-a8ff-45cd-8163-4c3ef248cb11-catalog-content\") pod \"redhat-marketplace-bwcgt\" (UID: \"58537a23-a8ff-45cd-8163-4c3ef248cb11\") " pod="openshift-marketplace/redhat-marketplace-bwcgt" Nov 23 17:09:14 crc kubenswrapper[5050]: I1123 17:09:14.707461 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58537a23-a8ff-45cd-8163-4c3ef248cb11-catalog-content\") pod \"redhat-marketplace-bwcgt\" (UID: \"58537a23-a8ff-45cd-8163-4c3ef248cb11\") " pod="openshift-marketplace/redhat-marketplace-bwcgt" Nov 23 17:09:14 crc kubenswrapper[5050]: I1123 17:09:14.707570 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58537a23-a8ff-45cd-8163-4c3ef248cb11-utilities\") pod \"redhat-marketplace-bwcgt\" (UID: \"58537a23-a8ff-45cd-8163-4c3ef248cb11\") " pod="openshift-marketplace/redhat-marketplace-bwcgt" Nov 23 17:09:14 crc kubenswrapper[5050]: I1123 17:09:14.729533 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8j9k\" (UniqueName: \"kubernetes.io/projected/58537a23-a8ff-45cd-8163-4c3ef248cb11-kube-api-access-l8j9k\") pod \"redhat-marketplace-bwcgt\" (UID: \"58537a23-a8ff-45cd-8163-4c3ef248cb11\") " pod="openshift-marketplace/redhat-marketplace-bwcgt" Nov 23 17:09:14 crc kubenswrapper[5050]: I1123 17:09:14.903382 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bwcgt" Nov 23 17:09:15 crc kubenswrapper[5050]: I1123 17:09:15.474091 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bwcgt"] Nov 23 17:09:15 crc kubenswrapper[5050]: W1123 17:09:15.487380 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod58537a23_a8ff_45cd_8163_4c3ef248cb11.slice/crio-d183d12aafa84e541ecae511bc39f969dd596cae150adfeb48d69523391b7b0d WatchSource:0}: Error finding container d183d12aafa84e541ecae511bc39f969dd596cae150adfeb48d69523391b7b0d: Status 404 returned error can't find the container with id d183d12aafa84e541ecae511bc39f969dd596cae150adfeb48d69523391b7b0d Nov 23 17:09:15 crc kubenswrapper[5050]: I1123 17:09:15.932280 5050 generic.go:334] "Generic (PLEG): container finished" podID="fd799923-afa0-426e-a8b9-ac8bb231f329" containerID="d07501b14052eed48f00548446a10479da91657fb62577228a62b00d7818e4d7" exitCode=0 Nov 23 17:09:15 crc kubenswrapper[5050]: I1123 17:09:15.932710 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lz7vc" event={"ID":"fd799923-afa0-426e-a8b9-ac8bb231f329","Type":"ContainerDied","Data":"d07501b14052eed48f00548446a10479da91657fb62577228a62b00d7818e4d7"} Nov 23 17:09:15 crc kubenswrapper[5050]: I1123 17:09:15.936092 5050 generic.go:334] "Generic (PLEG): container finished" podID="58537a23-a8ff-45cd-8163-4c3ef248cb11" containerID="5cd7495584c2d5bb6536f94d1b12800138574546b92bd232c4f50c2f9a364a7c" exitCode=0 Nov 23 17:09:15 crc kubenswrapper[5050]: I1123 17:09:15.936158 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bwcgt" event={"ID":"58537a23-a8ff-45cd-8163-4c3ef248cb11","Type":"ContainerDied","Data":"5cd7495584c2d5bb6536f94d1b12800138574546b92bd232c4f50c2f9a364a7c"} Nov 23 17:09:15 crc kubenswrapper[5050]: I1123 17:09:15.936229 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bwcgt" event={"ID":"58537a23-a8ff-45cd-8163-4c3ef248cb11","Type":"ContainerStarted","Data":"d183d12aafa84e541ecae511bc39f969dd596cae150adfeb48d69523391b7b0d"} Nov 23 17:09:16 crc kubenswrapper[5050]: I1123 17:09:16.950365 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lz7vc" event={"ID":"fd799923-afa0-426e-a8b9-ac8bb231f329","Type":"ContainerStarted","Data":"179242da22fb62a49df7cf5e92b0ab73e4d20539915405b3bcf1b7a67a3806b6"} Nov 23 17:09:16 crc kubenswrapper[5050]: I1123 17:09:16.976210 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lz7vc" podStartSLOduration=3.5233591300000002 podStartE2EDuration="7.976183076s" podCreationTimestamp="2025-11-23 17:09:09 +0000 UTC" firstStartedPulling="2025-11-23 17:09:11.875751107 +0000 UTC m=+8847.042747602" lastFinishedPulling="2025-11-23 17:09:16.328575053 +0000 UTC m=+8851.495571548" observedRunningTime="2025-11-23 17:09:16.967392408 +0000 UTC m=+8852.134388883" watchObservedRunningTime="2025-11-23 17:09:16.976183076 +0000 UTC m=+8852.143179561" Nov 23 17:09:17 crc kubenswrapper[5050]: I1123 17:09:17.964598 5050 generic.go:334] "Generic (PLEG): container finished" podID="58537a23-a8ff-45cd-8163-4c3ef248cb11" containerID="61924871dc72da97e188c7c061eb7ccb3e063fe26e985537198b8db2cd1d8ff8" exitCode=0 Nov 23 17:09:17 crc kubenswrapper[5050]: I1123 17:09:17.964721 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bwcgt" event={"ID":"58537a23-a8ff-45cd-8163-4c3ef248cb11","Type":"ContainerDied","Data":"61924871dc72da97e188c7c061eb7ccb3e063fe26e985537198b8db2cd1d8ff8"} Nov 23 17:09:18 crc kubenswrapper[5050]: I1123 17:09:18.527808 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-nb9sl" Nov 23 17:09:18 crc kubenswrapper[5050]: I1123 17:09:18.528732 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-nb9sl" Nov 23 17:09:18 crc kubenswrapper[5050]: I1123 17:09:18.981792 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bwcgt" event={"ID":"58537a23-a8ff-45cd-8163-4c3ef248cb11","Type":"ContainerStarted","Data":"7288aba8036408feb0ae022e8ee15ca5ae4067f2466a97eba9492f6226737854"} Nov 23 17:09:19 crc kubenswrapper[5050]: I1123 17:09:19.004195 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bwcgt" podStartSLOduration=2.506762672 podStartE2EDuration="5.004170261s" podCreationTimestamp="2025-11-23 17:09:14 +0000 UTC" firstStartedPulling="2025-11-23 17:09:15.9378114 +0000 UTC m=+8851.104807915" lastFinishedPulling="2025-11-23 17:09:18.435219019 +0000 UTC m=+8853.602215504" observedRunningTime="2025-11-23 17:09:19.002094822 +0000 UTC m=+8854.169091317" watchObservedRunningTime="2025-11-23 17:09:19.004170261 +0000 UTC m=+8854.171166746" Nov 23 17:09:19 crc kubenswrapper[5050]: I1123 17:09:19.598528 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-nb9sl" podUID="7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07" containerName="registry-server" probeResult="failure" output=< Nov 23 17:09:19 crc kubenswrapper[5050]: timeout: failed to connect service ":50051" within 1s Nov 23 17:09:19 crc kubenswrapper[5050]: > Nov 23 17:09:19 crc kubenswrapper[5050]: I1123 17:09:19.888637 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lz7vc" Nov 23 17:09:19 crc kubenswrapper[5050]: I1123 17:09:19.888714 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lz7vc" Nov 23 17:09:20 crc kubenswrapper[5050]: I1123 17:09:20.950969 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-lz7vc" podUID="fd799923-afa0-426e-a8b9-ac8bb231f329" containerName="registry-server" probeResult="failure" output=< Nov 23 17:09:20 crc kubenswrapper[5050]: timeout: failed to connect service ":50051" within 1s Nov 23 17:09:20 crc kubenswrapper[5050]: > Nov 23 17:09:24 crc kubenswrapper[5050]: I1123 17:09:24.904123 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bwcgt" Nov 23 17:09:24 crc kubenswrapper[5050]: I1123 17:09:24.905264 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bwcgt" Nov 23 17:09:25 crc kubenswrapper[5050]: I1123 17:09:25.037969 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bwcgt" Nov 23 17:09:25 crc kubenswrapper[5050]: I1123 17:09:25.138764 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bwcgt" Nov 23 17:09:27 crc kubenswrapper[5050]: I1123 17:09:27.157855 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bwcgt"] Nov 23 17:09:27 crc kubenswrapper[5050]: I1123 17:09:27.158606 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bwcgt" podUID="58537a23-a8ff-45cd-8163-4c3ef248cb11" containerName="registry-server" containerID="cri-o://7288aba8036408feb0ae022e8ee15ca5ae4067f2466a97eba9492f6226737854" gracePeriod=2 Nov 23 17:09:27 crc kubenswrapper[5050]: E1123 17:09:27.298558 5050 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod58537a23_a8ff_45cd_8163_4c3ef248cb11.slice/crio-conmon-7288aba8036408feb0ae022e8ee15ca5ae4067f2466a97eba9492f6226737854.scope\": RecentStats: unable to find data in memory cache]" Nov 23 17:09:27 crc kubenswrapper[5050]: I1123 17:09:27.760118 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bwcgt" Nov 23 17:09:27 crc kubenswrapper[5050]: I1123 17:09:27.799522 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58537a23-a8ff-45cd-8163-4c3ef248cb11-utilities\") pod \"58537a23-a8ff-45cd-8163-4c3ef248cb11\" (UID: \"58537a23-a8ff-45cd-8163-4c3ef248cb11\") " Nov 23 17:09:27 crc kubenswrapper[5050]: I1123 17:09:27.800198 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58537a23-a8ff-45cd-8163-4c3ef248cb11-catalog-content\") pod \"58537a23-a8ff-45cd-8163-4c3ef248cb11\" (UID: \"58537a23-a8ff-45cd-8163-4c3ef248cb11\") " Nov 23 17:09:27 crc kubenswrapper[5050]: I1123 17:09:27.800341 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l8j9k\" (UniqueName: \"kubernetes.io/projected/58537a23-a8ff-45cd-8163-4c3ef248cb11-kube-api-access-l8j9k\") pod \"58537a23-a8ff-45cd-8163-4c3ef248cb11\" (UID: \"58537a23-a8ff-45cd-8163-4c3ef248cb11\") " Nov 23 17:09:27 crc kubenswrapper[5050]: I1123 17:09:27.801557 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58537a23-a8ff-45cd-8163-4c3ef248cb11-utilities" (OuterVolumeSpecName: "utilities") pod "58537a23-a8ff-45cd-8163-4c3ef248cb11" (UID: "58537a23-a8ff-45cd-8163-4c3ef248cb11"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:09:27 crc kubenswrapper[5050]: I1123 17:09:27.810806 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58537a23-a8ff-45cd-8163-4c3ef248cb11-kube-api-access-l8j9k" (OuterVolumeSpecName: "kube-api-access-l8j9k") pod "58537a23-a8ff-45cd-8163-4c3ef248cb11" (UID: "58537a23-a8ff-45cd-8163-4c3ef248cb11"). InnerVolumeSpecName "kube-api-access-l8j9k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:09:27 crc kubenswrapper[5050]: I1123 17:09:27.842882 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58537a23-a8ff-45cd-8163-4c3ef248cb11-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "58537a23-a8ff-45cd-8163-4c3ef248cb11" (UID: "58537a23-a8ff-45cd-8163-4c3ef248cb11"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:09:27 crc kubenswrapper[5050]: I1123 17:09:27.903681 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58537a23-a8ff-45cd-8163-4c3ef248cb11-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 17:09:27 crc kubenswrapper[5050]: I1123 17:09:27.903815 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58537a23-a8ff-45cd-8163-4c3ef248cb11-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 17:09:27 crc kubenswrapper[5050]: I1123 17:09:27.903834 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l8j9k\" (UniqueName: \"kubernetes.io/projected/58537a23-a8ff-45cd-8163-4c3ef248cb11-kube-api-access-l8j9k\") on node \"crc\" DevicePath \"\"" Nov 23 17:09:28 crc kubenswrapper[5050]: I1123 17:09:28.112757 5050 generic.go:334] "Generic (PLEG): container finished" podID="58537a23-a8ff-45cd-8163-4c3ef248cb11" containerID="7288aba8036408feb0ae022e8ee15ca5ae4067f2466a97eba9492f6226737854" exitCode=0 Nov 23 17:09:28 crc kubenswrapper[5050]: I1123 17:09:28.112812 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bwcgt" event={"ID":"58537a23-a8ff-45cd-8163-4c3ef248cb11","Type":"ContainerDied","Data":"7288aba8036408feb0ae022e8ee15ca5ae4067f2466a97eba9492f6226737854"} Nov 23 17:09:28 crc kubenswrapper[5050]: I1123 17:09:28.112847 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bwcgt" event={"ID":"58537a23-a8ff-45cd-8163-4c3ef248cb11","Type":"ContainerDied","Data":"d183d12aafa84e541ecae511bc39f969dd596cae150adfeb48d69523391b7b0d"} Nov 23 17:09:28 crc kubenswrapper[5050]: I1123 17:09:28.112871 5050 scope.go:117] "RemoveContainer" containerID="7288aba8036408feb0ae022e8ee15ca5ae4067f2466a97eba9492f6226737854" Nov 23 17:09:28 crc kubenswrapper[5050]: I1123 17:09:28.113171 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bwcgt" Nov 23 17:09:28 crc kubenswrapper[5050]: I1123 17:09:28.164807 5050 scope.go:117] "RemoveContainer" containerID="61924871dc72da97e188c7c061eb7ccb3e063fe26e985537198b8db2cd1d8ff8" Nov 23 17:09:28 crc kubenswrapper[5050]: I1123 17:09:28.165059 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bwcgt"] Nov 23 17:09:28 crc kubenswrapper[5050]: I1123 17:09:28.178295 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-bwcgt"] Nov 23 17:09:28 crc kubenswrapper[5050]: I1123 17:09:28.193601 5050 scope.go:117] "RemoveContainer" containerID="5cd7495584c2d5bb6536f94d1b12800138574546b92bd232c4f50c2f9a364a7c" Nov 23 17:09:28 crc kubenswrapper[5050]: I1123 17:09:28.260039 5050 scope.go:117] "RemoveContainer" containerID="7288aba8036408feb0ae022e8ee15ca5ae4067f2466a97eba9492f6226737854" Nov 23 17:09:28 crc kubenswrapper[5050]: E1123 17:09:28.261179 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7288aba8036408feb0ae022e8ee15ca5ae4067f2466a97eba9492f6226737854\": container with ID starting with 7288aba8036408feb0ae022e8ee15ca5ae4067f2466a97eba9492f6226737854 not found: ID does not exist" containerID="7288aba8036408feb0ae022e8ee15ca5ae4067f2466a97eba9492f6226737854" Nov 23 17:09:28 crc kubenswrapper[5050]: I1123 17:09:28.261231 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7288aba8036408feb0ae022e8ee15ca5ae4067f2466a97eba9492f6226737854"} err="failed to get container status \"7288aba8036408feb0ae022e8ee15ca5ae4067f2466a97eba9492f6226737854\": rpc error: code = NotFound desc = could not find container \"7288aba8036408feb0ae022e8ee15ca5ae4067f2466a97eba9492f6226737854\": container with ID starting with 7288aba8036408feb0ae022e8ee15ca5ae4067f2466a97eba9492f6226737854 not found: ID does not exist" Nov 23 17:09:28 crc kubenswrapper[5050]: I1123 17:09:28.261270 5050 scope.go:117] "RemoveContainer" containerID="61924871dc72da97e188c7c061eb7ccb3e063fe26e985537198b8db2cd1d8ff8" Nov 23 17:09:28 crc kubenswrapper[5050]: E1123 17:09:28.262080 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61924871dc72da97e188c7c061eb7ccb3e063fe26e985537198b8db2cd1d8ff8\": container with ID starting with 61924871dc72da97e188c7c061eb7ccb3e063fe26e985537198b8db2cd1d8ff8 not found: ID does not exist" containerID="61924871dc72da97e188c7c061eb7ccb3e063fe26e985537198b8db2cd1d8ff8" Nov 23 17:09:28 crc kubenswrapper[5050]: I1123 17:09:28.262118 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61924871dc72da97e188c7c061eb7ccb3e063fe26e985537198b8db2cd1d8ff8"} err="failed to get container status \"61924871dc72da97e188c7c061eb7ccb3e063fe26e985537198b8db2cd1d8ff8\": rpc error: code = NotFound desc = could not find container \"61924871dc72da97e188c7c061eb7ccb3e063fe26e985537198b8db2cd1d8ff8\": container with ID starting with 61924871dc72da97e188c7c061eb7ccb3e063fe26e985537198b8db2cd1d8ff8 not found: ID does not exist" Nov 23 17:09:28 crc kubenswrapper[5050]: I1123 17:09:28.262172 5050 scope.go:117] "RemoveContainer" containerID="5cd7495584c2d5bb6536f94d1b12800138574546b92bd232c4f50c2f9a364a7c" Nov 23 17:09:28 crc kubenswrapper[5050]: E1123 17:09:28.262599 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5cd7495584c2d5bb6536f94d1b12800138574546b92bd232c4f50c2f9a364a7c\": container with ID starting with 5cd7495584c2d5bb6536f94d1b12800138574546b92bd232c4f50c2f9a364a7c not found: ID does not exist" containerID="5cd7495584c2d5bb6536f94d1b12800138574546b92bd232c4f50c2f9a364a7c" Nov 23 17:09:28 crc kubenswrapper[5050]: I1123 17:09:28.262624 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5cd7495584c2d5bb6536f94d1b12800138574546b92bd232c4f50c2f9a364a7c"} err="failed to get container status \"5cd7495584c2d5bb6536f94d1b12800138574546b92bd232c4f50c2f9a364a7c\": rpc error: code = NotFound desc = could not find container \"5cd7495584c2d5bb6536f94d1b12800138574546b92bd232c4f50c2f9a364a7c\": container with ID starting with 5cd7495584c2d5bb6536f94d1b12800138574546b92bd232c4f50c2f9a364a7c not found: ID does not exist" Nov 23 17:09:28 crc kubenswrapper[5050]: I1123 17:09:28.591301 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-nb9sl" Nov 23 17:09:28 crc kubenswrapper[5050]: I1123 17:09:28.642072 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-nb9sl" Nov 23 17:09:29 crc kubenswrapper[5050]: I1123 17:09:29.572807 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58537a23-a8ff-45cd-8163-4c3ef248cb11" path="/var/lib/kubelet/pods/58537a23-a8ff-45cd-8163-4c3ef248cb11/volumes" Nov 23 17:09:29 crc kubenswrapper[5050]: I1123 17:09:29.960619 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lz7vc" Nov 23 17:09:30 crc kubenswrapper[5050]: I1123 17:09:30.073337 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lz7vc" Nov 23 17:09:30 crc kubenswrapper[5050]: I1123 17:09:30.952721 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nb9sl"] Nov 23 17:09:30 crc kubenswrapper[5050]: I1123 17:09:30.953137 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-nb9sl" podUID="7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07" containerName="registry-server" containerID="cri-o://20989fe492544a29433da3a6fbfe247b4b44b2a19a9a95df292ac9fbe30c94e8" gracePeriod=2 Nov 23 17:09:31 crc kubenswrapper[5050]: I1123 17:09:31.172279 5050 generic.go:334] "Generic (PLEG): container finished" podID="7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07" containerID="20989fe492544a29433da3a6fbfe247b4b44b2a19a9a95df292ac9fbe30c94e8" exitCode=0 Nov 23 17:09:31 crc kubenswrapper[5050]: I1123 17:09:31.172420 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nb9sl" event={"ID":"7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07","Type":"ContainerDied","Data":"20989fe492544a29433da3a6fbfe247b4b44b2a19a9a95df292ac9fbe30c94e8"} Nov 23 17:09:31 crc kubenswrapper[5050]: I1123 17:09:31.506956 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nb9sl" Nov 23 17:09:31 crc kubenswrapper[5050]: I1123 17:09:31.706508 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07-catalog-content\") pod \"7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07\" (UID: \"7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07\") " Nov 23 17:09:31 crc kubenswrapper[5050]: I1123 17:09:31.707194 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4fmm9\" (UniqueName: \"kubernetes.io/projected/7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07-kube-api-access-4fmm9\") pod \"7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07\" (UID: \"7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07\") " Nov 23 17:09:31 crc kubenswrapper[5050]: I1123 17:09:31.707315 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07-utilities\") pod \"7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07\" (UID: \"7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07\") " Nov 23 17:09:31 crc kubenswrapper[5050]: I1123 17:09:31.708438 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07-utilities" (OuterVolumeSpecName: "utilities") pod "7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07" (UID: "7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:09:31 crc kubenswrapper[5050]: I1123 17:09:31.716475 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 17:09:31 crc kubenswrapper[5050]: I1123 17:09:31.717877 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07-kube-api-access-4fmm9" (OuterVolumeSpecName: "kube-api-access-4fmm9") pod "7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07" (UID: "7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07"). InnerVolumeSpecName "kube-api-access-4fmm9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:09:31 crc kubenswrapper[5050]: I1123 17:09:31.820438 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4fmm9\" (UniqueName: \"kubernetes.io/projected/7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07-kube-api-access-4fmm9\") on node \"crc\" DevicePath \"\"" Nov 23 17:09:31 crc kubenswrapper[5050]: I1123 17:09:31.826960 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07" (UID: "7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:09:31 crc kubenswrapper[5050]: I1123 17:09:31.922732 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 17:09:32 crc kubenswrapper[5050]: I1123 17:09:32.189214 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nb9sl" event={"ID":"7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07","Type":"ContainerDied","Data":"aa603cbef4923e29b81f53599d486b91d0696534cac9bc858c8bd6d4962d0b02"} Nov 23 17:09:32 crc kubenswrapper[5050]: I1123 17:09:32.189288 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nb9sl" Nov 23 17:09:32 crc kubenswrapper[5050]: I1123 17:09:32.189329 5050 scope.go:117] "RemoveContainer" containerID="20989fe492544a29433da3a6fbfe247b4b44b2a19a9a95df292ac9fbe30c94e8" Nov 23 17:09:32 crc kubenswrapper[5050]: I1123 17:09:32.231532 5050 scope.go:117] "RemoveContainer" containerID="2bd589973a94a0249871d6c71bf596c7aa69b2f533d3aaf36a7274934882cc0d" Nov 23 17:09:32 crc kubenswrapper[5050]: I1123 17:09:32.240101 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nb9sl"] Nov 23 17:09:32 crc kubenswrapper[5050]: I1123 17:09:32.252502 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-nb9sl"] Nov 23 17:09:32 crc kubenswrapper[5050]: I1123 17:09:32.270560 5050 scope.go:117] "RemoveContainer" containerID="dfb0627483b6b94791d230741a7f6f16c1fb1cc9c300a15de8efd4674f7c5068" Nov 23 17:09:33 crc kubenswrapper[5050]: I1123 17:09:33.363313 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lz7vc"] Nov 23 17:09:33 crc kubenswrapper[5050]: I1123 17:09:33.364806 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-lz7vc" podUID="fd799923-afa0-426e-a8b9-ac8bb231f329" containerName="registry-server" containerID="cri-o://179242da22fb62a49df7cf5e92b0ab73e4d20539915405b3bcf1b7a67a3806b6" gracePeriod=2 Nov 23 17:09:33 crc kubenswrapper[5050]: I1123 17:09:33.585193 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07" path="/var/lib/kubelet/pods/7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07/volumes" Nov 23 17:09:33 crc kubenswrapper[5050]: I1123 17:09:33.930457 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lz7vc" Nov 23 17:09:34 crc kubenswrapper[5050]: I1123 17:09:34.003152 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd799923-afa0-426e-a8b9-ac8bb231f329-utilities\") pod \"fd799923-afa0-426e-a8b9-ac8bb231f329\" (UID: \"fd799923-afa0-426e-a8b9-ac8bb231f329\") " Nov 23 17:09:34 crc kubenswrapper[5050]: I1123 17:09:34.003310 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8gxf\" (UniqueName: \"kubernetes.io/projected/fd799923-afa0-426e-a8b9-ac8bb231f329-kube-api-access-h8gxf\") pod \"fd799923-afa0-426e-a8b9-ac8bb231f329\" (UID: \"fd799923-afa0-426e-a8b9-ac8bb231f329\") " Nov 23 17:09:34 crc kubenswrapper[5050]: I1123 17:09:34.004273 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd799923-afa0-426e-a8b9-ac8bb231f329-utilities" (OuterVolumeSpecName: "utilities") pod "fd799923-afa0-426e-a8b9-ac8bb231f329" (UID: "fd799923-afa0-426e-a8b9-ac8bb231f329"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:09:34 crc kubenswrapper[5050]: I1123 17:09:34.004568 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd799923-afa0-426e-a8b9-ac8bb231f329-catalog-content\") pod \"fd799923-afa0-426e-a8b9-ac8bb231f329\" (UID: \"fd799923-afa0-426e-a8b9-ac8bb231f329\") " Nov 23 17:09:34 crc kubenswrapper[5050]: I1123 17:09:34.005424 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd799923-afa0-426e-a8b9-ac8bb231f329-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 17:09:34 crc kubenswrapper[5050]: I1123 17:09:34.011168 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd799923-afa0-426e-a8b9-ac8bb231f329-kube-api-access-h8gxf" (OuterVolumeSpecName: "kube-api-access-h8gxf") pod "fd799923-afa0-426e-a8b9-ac8bb231f329" (UID: "fd799923-afa0-426e-a8b9-ac8bb231f329"). InnerVolumeSpecName "kube-api-access-h8gxf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:09:34 crc kubenswrapper[5050]: I1123 17:09:34.056395 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd799923-afa0-426e-a8b9-ac8bb231f329-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fd799923-afa0-426e-a8b9-ac8bb231f329" (UID: "fd799923-afa0-426e-a8b9-ac8bb231f329"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:09:34 crc kubenswrapper[5050]: I1123 17:09:34.107404 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd799923-afa0-426e-a8b9-ac8bb231f329-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 17:09:34 crc kubenswrapper[5050]: I1123 17:09:34.107560 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8gxf\" (UniqueName: \"kubernetes.io/projected/fd799923-afa0-426e-a8b9-ac8bb231f329-kube-api-access-h8gxf\") on node \"crc\" DevicePath \"\"" Nov 23 17:09:34 crc kubenswrapper[5050]: I1123 17:09:34.224823 5050 generic.go:334] "Generic (PLEG): container finished" podID="fd799923-afa0-426e-a8b9-ac8bb231f329" containerID="179242da22fb62a49df7cf5e92b0ab73e4d20539915405b3bcf1b7a67a3806b6" exitCode=0 Nov 23 17:09:34 crc kubenswrapper[5050]: I1123 17:09:34.224896 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lz7vc" event={"ID":"fd799923-afa0-426e-a8b9-ac8bb231f329","Type":"ContainerDied","Data":"179242da22fb62a49df7cf5e92b0ab73e4d20539915405b3bcf1b7a67a3806b6"} Nov 23 17:09:34 crc kubenswrapper[5050]: I1123 17:09:34.225197 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lz7vc" event={"ID":"fd799923-afa0-426e-a8b9-ac8bb231f329","Type":"ContainerDied","Data":"9188b95ab609a929fc9d6fa10b567f297593a6616f7ed33691f3779fea785e70"} Nov 23 17:09:34 crc kubenswrapper[5050]: I1123 17:09:34.225014 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lz7vc" Nov 23 17:09:34 crc kubenswrapper[5050]: I1123 17:09:34.225229 5050 scope.go:117] "RemoveContainer" containerID="179242da22fb62a49df7cf5e92b0ab73e4d20539915405b3bcf1b7a67a3806b6" Nov 23 17:09:34 crc kubenswrapper[5050]: I1123 17:09:34.285063 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lz7vc"] Nov 23 17:09:34 crc kubenswrapper[5050]: I1123 17:09:34.296802 5050 scope.go:117] "RemoveContainer" containerID="d07501b14052eed48f00548446a10479da91657fb62577228a62b00d7818e4d7" Nov 23 17:09:34 crc kubenswrapper[5050]: I1123 17:09:34.323356 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-lz7vc"] Nov 23 17:09:34 crc kubenswrapper[5050]: I1123 17:09:34.342298 5050 scope.go:117] "RemoveContainer" containerID="d76cad6b6ab824ed593c3699113f4b132a02cae2ed544b054bfb2b24473eef5d" Nov 23 17:09:34 crc kubenswrapper[5050]: I1123 17:09:34.384316 5050 scope.go:117] "RemoveContainer" containerID="179242da22fb62a49df7cf5e92b0ab73e4d20539915405b3bcf1b7a67a3806b6" Nov 23 17:09:34 crc kubenswrapper[5050]: E1123 17:09:34.385046 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"179242da22fb62a49df7cf5e92b0ab73e4d20539915405b3bcf1b7a67a3806b6\": container with ID starting with 179242da22fb62a49df7cf5e92b0ab73e4d20539915405b3bcf1b7a67a3806b6 not found: ID does not exist" containerID="179242da22fb62a49df7cf5e92b0ab73e4d20539915405b3bcf1b7a67a3806b6" Nov 23 17:09:34 crc kubenswrapper[5050]: I1123 17:09:34.385103 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"179242da22fb62a49df7cf5e92b0ab73e4d20539915405b3bcf1b7a67a3806b6"} err="failed to get container status \"179242da22fb62a49df7cf5e92b0ab73e4d20539915405b3bcf1b7a67a3806b6\": rpc error: code = NotFound desc = could not find container \"179242da22fb62a49df7cf5e92b0ab73e4d20539915405b3bcf1b7a67a3806b6\": container with ID starting with 179242da22fb62a49df7cf5e92b0ab73e4d20539915405b3bcf1b7a67a3806b6 not found: ID does not exist" Nov 23 17:09:34 crc kubenswrapper[5050]: I1123 17:09:34.385136 5050 scope.go:117] "RemoveContainer" containerID="d07501b14052eed48f00548446a10479da91657fb62577228a62b00d7818e4d7" Nov 23 17:09:34 crc kubenswrapper[5050]: E1123 17:09:34.385548 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d07501b14052eed48f00548446a10479da91657fb62577228a62b00d7818e4d7\": container with ID starting with d07501b14052eed48f00548446a10479da91657fb62577228a62b00d7818e4d7 not found: ID does not exist" containerID="d07501b14052eed48f00548446a10479da91657fb62577228a62b00d7818e4d7" Nov 23 17:09:34 crc kubenswrapper[5050]: I1123 17:09:34.385611 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d07501b14052eed48f00548446a10479da91657fb62577228a62b00d7818e4d7"} err="failed to get container status \"d07501b14052eed48f00548446a10479da91657fb62577228a62b00d7818e4d7\": rpc error: code = NotFound desc = could not find container \"d07501b14052eed48f00548446a10479da91657fb62577228a62b00d7818e4d7\": container with ID starting with d07501b14052eed48f00548446a10479da91657fb62577228a62b00d7818e4d7 not found: ID does not exist" Nov 23 17:09:34 crc kubenswrapper[5050]: I1123 17:09:34.385646 5050 scope.go:117] "RemoveContainer" containerID="d76cad6b6ab824ed593c3699113f4b132a02cae2ed544b054bfb2b24473eef5d" Nov 23 17:09:34 crc kubenswrapper[5050]: E1123 17:09:34.385986 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d76cad6b6ab824ed593c3699113f4b132a02cae2ed544b054bfb2b24473eef5d\": container with ID starting with d76cad6b6ab824ed593c3699113f4b132a02cae2ed544b054bfb2b24473eef5d not found: ID does not exist" containerID="d76cad6b6ab824ed593c3699113f4b132a02cae2ed544b054bfb2b24473eef5d" Nov 23 17:09:34 crc kubenswrapper[5050]: I1123 17:09:34.386023 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d76cad6b6ab824ed593c3699113f4b132a02cae2ed544b054bfb2b24473eef5d"} err="failed to get container status \"d76cad6b6ab824ed593c3699113f4b132a02cae2ed544b054bfb2b24473eef5d\": rpc error: code = NotFound desc = could not find container \"d76cad6b6ab824ed593c3699113f4b132a02cae2ed544b054bfb2b24473eef5d\": container with ID starting with d76cad6b6ab824ed593c3699113f4b132a02cae2ed544b054bfb2b24473eef5d not found: ID does not exist" Nov 23 17:09:35 crc kubenswrapper[5050]: I1123 17:09:35.571138 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd799923-afa0-426e-a8b9-ac8bb231f329" path="/var/lib/kubelet/pods/fd799923-afa0-426e-a8b9-ac8bb231f329/volumes" Nov 23 17:10:59 crc kubenswrapper[5050]: I1123 17:10:59.224539 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 17:10:59 crc kubenswrapper[5050]: I1123 17:10:59.227615 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 17:11:07 crc kubenswrapper[5050]: I1123 17:11:07.603127 5050 generic.go:334] "Generic (PLEG): container finished" podID="7ea51431-6725-4bd0-8f42-7d79d7bb522a" containerID="bd30af1a878dd1298aa6266b3a0714e60e6e07985cca47dcc7462888cf1f8455" exitCode=0 Nov 23 17:11:07 crc kubenswrapper[5050]: I1123 17:11:07.603240 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-rz6t4" event={"ID":"7ea51431-6725-4bd0-8f42-7d79d7bb522a","Type":"ContainerDied","Data":"bd30af1a878dd1298aa6266b3a0714e60e6e07985cca47dcc7462888cf1f8455"} Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.219949 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-rz6t4" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.397030 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-ceph\") pod \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\" (UID: \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\") " Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.397508 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-inventory\") pod \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\" (UID: \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\") " Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.397650 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-ssh-key\") pod \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\" (UID: \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\") " Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.397875 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l72n7\" (UniqueName: \"kubernetes.io/projected/7ea51431-6725-4bd0-8f42-7d79d7bb522a-kube-api-access-l72n7\") pod \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\" (UID: \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\") " Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.398924 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-neutron-sriov-agent-neutron-config-0\") pod \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\" (UID: \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\") " Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.399024 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-neutron-sriov-combined-ca-bundle\") pod \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\" (UID: \"7ea51431-6725-4bd0-8f42-7d79d7bb522a\") " Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.405931 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ea51431-6725-4bd0-8f42-7d79d7bb522a-kube-api-access-l72n7" (OuterVolumeSpecName: "kube-api-access-l72n7") pod "7ea51431-6725-4bd0-8f42-7d79d7bb522a" (UID: "7ea51431-6725-4bd0-8f42-7d79d7bb522a"). InnerVolumeSpecName "kube-api-access-l72n7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.410820 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-ceph" (OuterVolumeSpecName: "ceph") pod "7ea51431-6725-4bd0-8f42-7d79d7bb522a" (UID: "7ea51431-6725-4bd0-8f42-7d79d7bb522a"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.411188 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-neutron-sriov-combined-ca-bundle" (OuterVolumeSpecName: "neutron-sriov-combined-ca-bundle") pod "7ea51431-6725-4bd0-8f42-7d79d7bb522a" (UID: "7ea51431-6725-4bd0-8f42-7d79d7bb522a"). InnerVolumeSpecName "neutron-sriov-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.434165 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7ea51431-6725-4bd0-8f42-7d79d7bb522a" (UID: "7ea51431-6725-4bd0-8f42-7d79d7bb522a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.436383 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-neutron-sriov-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-sriov-agent-neutron-config-0") pod "7ea51431-6725-4bd0-8f42-7d79d7bb522a" (UID: "7ea51431-6725-4bd0-8f42-7d79d7bb522a"). InnerVolumeSpecName "neutron-sriov-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.463430 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-inventory" (OuterVolumeSpecName: "inventory") pod "7ea51431-6725-4bd0-8f42-7d79d7bb522a" (UID: "7ea51431-6725-4bd0-8f42-7d79d7bb522a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.502472 5050 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.502522 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l72n7\" (UniqueName: \"kubernetes.io/projected/7ea51431-6725-4bd0-8f42-7d79d7bb522a-kube-api-access-l72n7\") on node \"crc\" DevicePath \"\"" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.502542 5050 reconciler_common.go:293] "Volume detached for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-neutron-sriov-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.502558 5050 reconciler_common.go:293] "Volume detached for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-neutron-sriov-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.502573 5050 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-ceph\") on node \"crc\" DevicePath \"\"" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.502586 5050 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7ea51431-6725-4bd0-8f42-7d79d7bb522a-inventory\") on node \"crc\" DevicePath \"\"" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.636392 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-rz6t4" event={"ID":"7ea51431-6725-4bd0-8f42-7d79d7bb522a","Type":"ContainerDied","Data":"06ad41da53aebd95dcf1325efd5d4d8d206e10d98ba483114f43b39cbe3f85a6"} Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.636462 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="06ad41da53aebd95dcf1325efd5d4d8d206e10d98ba483114f43b39cbe3f85a6" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.636576 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-rz6t4" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.754981 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj"] Nov 23 17:11:09 crc kubenswrapper[5050]: E1123 17:11:09.755555 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07" containerName="registry-server" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.755578 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07" containerName="registry-server" Nov 23 17:11:09 crc kubenswrapper[5050]: E1123 17:11:09.755590 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58537a23-a8ff-45cd-8163-4c3ef248cb11" containerName="extract-utilities" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.755597 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="58537a23-a8ff-45cd-8163-4c3ef248cb11" containerName="extract-utilities" Nov 23 17:11:09 crc kubenswrapper[5050]: E1123 17:11:09.755614 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58537a23-a8ff-45cd-8163-4c3ef248cb11" containerName="extract-content" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.755622 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="58537a23-a8ff-45cd-8163-4c3ef248cb11" containerName="extract-content" Nov 23 17:11:09 crc kubenswrapper[5050]: E1123 17:11:09.755631 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd799923-afa0-426e-a8b9-ac8bb231f329" containerName="extract-utilities" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.755638 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd799923-afa0-426e-a8b9-ac8bb231f329" containerName="extract-utilities" Nov 23 17:11:09 crc kubenswrapper[5050]: E1123 17:11:09.755663 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07" containerName="extract-content" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.755669 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07" containerName="extract-content" Nov 23 17:11:09 crc kubenswrapper[5050]: E1123 17:11:09.755685 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd799923-afa0-426e-a8b9-ac8bb231f329" containerName="registry-server" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.755695 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd799923-afa0-426e-a8b9-ac8bb231f329" containerName="registry-server" Nov 23 17:11:09 crc kubenswrapper[5050]: E1123 17:11:09.755705 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07" containerName="extract-utilities" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.755711 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07" containerName="extract-utilities" Nov 23 17:11:09 crc kubenswrapper[5050]: E1123 17:11:09.755721 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ea51431-6725-4bd0-8f42-7d79d7bb522a" containerName="neutron-sriov-openstack-openstack-cell1" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.755728 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ea51431-6725-4bd0-8f42-7d79d7bb522a" containerName="neutron-sriov-openstack-openstack-cell1" Nov 23 17:11:09 crc kubenswrapper[5050]: E1123 17:11:09.755747 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd799923-afa0-426e-a8b9-ac8bb231f329" containerName="extract-content" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.755754 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd799923-afa0-426e-a8b9-ac8bb231f329" containerName="extract-content" Nov 23 17:11:09 crc kubenswrapper[5050]: E1123 17:11:09.755762 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58537a23-a8ff-45cd-8163-4c3ef248cb11" containerName="registry-server" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.755769 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="58537a23-a8ff-45cd-8163-4c3ef248cb11" containerName="registry-server" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.755973 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ea51431-6725-4bd0-8f42-7d79d7bb522a" containerName="neutron-sriov-openstack-openstack-cell1" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.756005 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e2d257b-bbd5-4ac7-b7fb-b34e35c39b07" containerName="registry-server" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.756016 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="58537a23-a8ff-45cd-8163-4c3ef248cb11" containerName="registry-server" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.756027 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd799923-afa0-426e-a8b9-ac8bb231f329" containerName="registry-server" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.756952 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.759953 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-dhcp-agent-neutron-config" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.760182 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-ckrpf" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.760688 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.761622 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.762725 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.780788 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj"] Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.914155 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-neutron-dhcp-combined-ca-bundle\") pod \"neutron-dhcp-openstack-openstack-cell1-q2ptj\" (UID: \"9623322c-0a8c-4fbb-8209-51641891664b\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.914521 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pjsg8\" (UniqueName: \"kubernetes.io/projected/9623322c-0a8c-4fbb-8209-51641891664b-kube-api-access-pjsg8\") pod \"neutron-dhcp-openstack-openstack-cell1-q2ptj\" (UID: \"9623322c-0a8c-4fbb-8209-51641891664b\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.914608 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-ceph\") pod \"neutron-dhcp-openstack-openstack-cell1-q2ptj\" (UID: \"9623322c-0a8c-4fbb-8209-51641891664b\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.914905 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-neutron-dhcp-agent-neutron-config-0\") pod \"neutron-dhcp-openstack-openstack-cell1-q2ptj\" (UID: \"9623322c-0a8c-4fbb-8209-51641891664b\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.915057 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-inventory\") pod \"neutron-dhcp-openstack-openstack-cell1-q2ptj\" (UID: \"9623322c-0a8c-4fbb-8209-51641891664b\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj" Nov 23 17:11:09 crc kubenswrapper[5050]: I1123 17:11:09.915765 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-ssh-key\") pod \"neutron-dhcp-openstack-openstack-cell1-q2ptj\" (UID: \"9623322c-0a8c-4fbb-8209-51641891664b\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj" Nov 23 17:11:10 crc kubenswrapper[5050]: I1123 17:11:10.018281 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-ssh-key\") pod \"neutron-dhcp-openstack-openstack-cell1-q2ptj\" (UID: \"9623322c-0a8c-4fbb-8209-51641891664b\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj" Nov 23 17:11:10 crc kubenswrapper[5050]: I1123 17:11:10.018360 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-neutron-dhcp-combined-ca-bundle\") pod \"neutron-dhcp-openstack-openstack-cell1-q2ptj\" (UID: \"9623322c-0a8c-4fbb-8209-51641891664b\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj" Nov 23 17:11:10 crc kubenswrapper[5050]: I1123 17:11:10.018431 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pjsg8\" (UniqueName: \"kubernetes.io/projected/9623322c-0a8c-4fbb-8209-51641891664b-kube-api-access-pjsg8\") pod \"neutron-dhcp-openstack-openstack-cell1-q2ptj\" (UID: \"9623322c-0a8c-4fbb-8209-51641891664b\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj" Nov 23 17:11:10 crc kubenswrapper[5050]: I1123 17:11:10.018479 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-ceph\") pod \"neutron-dhcp-openstack-openstack-cell1-q2ptj\" (UID: \"9623322c-0a8c-4fbb-8209-51641891664b\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj" Nov 23 17:11:10 crc kubenswrapper[5050]: I1123 17:11:10.018540 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-neutron-dhcp-agent-neutron-config-0\") pod \"neutron-dhcp-openstack-openstack-cell1-q2ptj\" (UID: \"9623322c-0a8c-4fbb-8209-51641891664b\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj" Nov 23 17:11:10 crc kubenswrapper[5050]: I1123 17:11:10.018584 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-inventory\") pod \"neutron-dhcp-openstack-openstack-cell1-q2ptj\" (UID: \"9623322c-0a8c-4fbb-8209-51641891664b\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj" Nov 23 17:11:10 crc kubenswrapper[5050]: I1123 17:11:10.022749 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-neutron-dhcp-combined-ca-bundle\") pod \"neutron-dhcp-openstack-openstack-cell1-q2ptj\" (UID: \"9623322c-0a8c-4fbb-8209-51641891664b\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj" Nov 23 17:11:10 crc kubenswrapper[5050]: I1123 17:11:10.023581 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-inventory\") pod \"neutron-dhcp-openstack-openstack-cell1-q2ptj\" (UID: \"9623322c-0a8c-4fbb-8209-51641891664b\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj" Nov 23 17:11:10 crc kubenswrapper[5050]: I1123 17:11:10.023851 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-ceph\") pod \"neutron-dhcp-openstack-openstack-cell1-q2ptj\" (UID: \"9623322c-0a8c-4fbb-8209-51641891664b\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj" Nov 23 17:11:10 crc kubenswrapper[5050]: I1123 17:11:10.023969 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-ssh-key\") pod \"neutron-dhcp-openstack-openstack-cell1-q2ptj\" (UID: \"9623322c-0a8c-4fbb-8209-51641891664b\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj" Nov 23 17:11:10 crc kubenswrapper[5050]: I1123 17:11:10.025117 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-neutron-dhcp-agent-neutron-config-0\") pod \"neutron-dhcp-openstack-openstack-cell1-q2ptj\" (UID: \"9623322c-0a8c-4fbb-8209-51641891664b\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj" Nov 23 17:11:10 crc kubenswrapper[5050]: I1123 17:11:10.045294 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pjsg8\" (UniqueName: \"kubernetes.io/projected/9623322c-0a8c-4fbb-8209-51641891664b-kube-api-access-pjsg8\") pod \"neutron-dhcp-openstack-openstack-cell1-q2ptj\" (UID: \"9623322c-0a8c-4fbb-8209-51641891664b\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj" Nov 23 17:11:10 crc kubenswrapper[5050]: I1123 17:11:10.084472 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj" Nov 23 17:11:10 crc kubenswrapper[5050]: I1123 17:11:10.709085 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj"] Nov 23 17:11:10 crc kubenswrapper[5050]: I1123 17:11:10.720861 5050 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 23 17:11:11 crc kubenswrapper[5050]: I1123 17:11:11.691387 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj" event={"ID":"9623322c-0a8c-4fbb-8209-51641891664b","Type":"ContainerStarted","Data":"1589579b05523724eaf251f2fffd039b4c7d2f5ba814e1351f31fa8409076f94"} Nov 23 17:11:11 crc kubenswrapper[5050]: I1123 17:11:11.692505 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj" event={"ID":"9623322c-0a8c-4fbb-8209-51641891664b","Type":"ContainerStarted","Data":"823364608e2d68ad081106af17bfafb7a9803e8d716dea050106a81989da1a5a"} Nov 23 17:11:11 crc kubenswrapper[5050]: I1123 17:11:11.713164 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj" podStartSLOduration=2.160576966 podStartE2EDuration="2.713142372s" podCreationTimestamp="2025-11-23 17:11:09 +0000 UTC" firstStartedPulling="2025-11-23 17:11:10.720572918 +0000 UTC m=+8965.887569413" lastFinishedPulling="2025-11-23 17:11:11.273138294 +0000 UTC m=+8966.440134819" observedRunningTime="2025-11-23 17:11:11.708277195 +0000 UTC m=+8966.875273680" watchObservedRunningTime="2025-11-23 17:11:11.713142372 +0000 UTC m=+8966.880138857" Nov 23 17:11:17 crc kubenswrapper[5050]: I1123 17:11:17.505051 5050 patch_prober.go:28] interesting pod/route-controller-manager-8579f458bf-spd5n container/route-controller-manager namespace/openshift-route-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.49:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 23 17:11:17 crc kubenswrapper[5050]: I1123 17:11:17.520392 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-route-controller-manager/route-controller-manager-8579f458bf-spd5n" podUID="4e3592b6-bb29-4266-be9d-661160354156" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.49:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 23 17:11:29 crc kubenswrapper[5050]: I1123 17:11:29.224211 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 17:11:29 crc kubenswrapper[5050]: I1123 17:11:29.225090 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 17:11:39 crc kubenswrapper[5050]: I1123 17:11:39.074967 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5hk57"] Nov 23 17:11:39 crc kubenswrapper[5050]: I1123 17:11:39.080657 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5hk57" Nov 23 17:11:39 crc kubenswrapper[5050]: I1123 17:11:39.096586 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5hk57"] Nov 23 17:11:39 crc kubenswrapper[5050]: I1123 17:11:39.162004 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb23bc48-8872-4304-8d99-df732321a5b5-utilities\") pod \"community-operators-5hk57\" (UID: \"eb23bc48-8872-4304-8d99-df732321a5b5\") " pod="openshift-marketplace/community-operators-5hk57" Nov 23 17:11:39 crc kubenswrapper[5050]: I1123 17:11:39.162116 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwv9f\" (UniqueName: \"kubernetes.io/projected/eb23bc48-8872-4304-8d99-df732321a5b5-kube-api-access-qwv9f\") pod \"community-operators-5hk57\" (UID: \"eb23bc48-8872-4304-8d99-df732321a5b5\") " pod="openshift-marketplace/community-operators-5hk57" Nov 23 17:11:39 crc kubenswrapper[5050]: I1123 17:11:39.162524 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb23bc48-8872-4304-8d99-df732321a5b5-catalog-content\") pod \"community-operators-5hk57\" (UID: \"eb23bc48-8872-4304-8d99-df732321a5b5\") " pod="openshift-marketplace/community-operators-5hk57" Nov 23 17:11:39 crc kubenswrapper[5050]: I1123 17:11:39.265108 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb23bc48-8872-4304-8d99-df732321a5b5-utilities\") pod \"community-operators-5hk57\" (UID: \"eb23bc48-8872-4304-8d99-df732321a5b5\") " pod="openshift-marketplace/community-operators-5hk57" Nov 23 17:11:39 crc kubenswrapper[5050]: I1123 17:11:39.265203 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qwv9f\" (UniqueName: \"kubernetes.io/projected/eb23bc48-8872-4304-8d99-df732321a5b5-kube-api-access-qwv9f\") pod \"community-operators-5hk57\" (UID: \"eb23bc48-8872-4304-8d99-df732321a5b5\") " pod="openshift-marketplace/community-operators-5hk57" Nov 23 17:11:39 crc kubenswrapper[5050]: I1123 17:11:39.265274 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb23bc48-8872-4304-8d99-df732321a5b5-catalog-content\") pod \"community-operators-5hk57\" (UID: \"eb23bc48-8872-4304-8d99-df732321a5b5\") " pod="openshift-marketplace/community-operators-5hk57" Nov 23 17:11:39 crc kubenswrapper[5050]: I1123 17:11:39.265909 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb23bc48-8872-4304-8d99-df732321a5b5-catalog-content\") pod \"community-operators-5hk57\" (UID: \"eb23bc48-8872-4304-8d99-df732321a5b5\") " pod="openshift-marketplace/community-operators-5hk57" Nov 23 17:11:39 crc kubenswrapper[5050]: I1123 17:11:39.266210 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb23bc48-8872-4304-8d99-df732321a5b5-utilities\") pod \"community-operators-5hk57\" (UID: \"eb23bc48-8872-4304-8d99-df732321a5b5\") " pod="openshift-marketplace/community-operators-5hk57" Nov 23 17:11:39 crc kubenswrapper[5050]: I1123 17:11:39.290411 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwv9f\" (UniqueName: \"kubernetes.io/projected/eb23bc48-8872-4304-8d99-df732321a5b5-kube-api-access-qwv9f\") pod \"community-operators-5hk57\" (UID: \"eb23bc48-8872-4304-8d99-df732321a5b5\") " pod="openshift-marketplace/community-operators-5hk57" Nov 23 17:11:39 crc kubenswrapper[5050]: I1123 17:11:39.423807 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5hk57" Nov 23 17:11:40 crc kubenswrapper[5050]: I1123 17:11:40.036208 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5hk57"] Nov 23 17:11:40 crc kubenswrapper[5050]: I1123 17:11:40.906265 5050 generic.go:334] "Generic (PLEG): container finished" podID="eb23bc48-8872-4304-8d99-df732321a5b5" containerID="b3fc0925158a09d444130de926e1a77e0c34dd907e641751adf8f0d733bc2eff" exitCode=0 Nov 23 17:11:40 crc kubenswrapper[5050]: I1123 17:11:40.906415 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5hk57" event={"ID":"eb23bc48-8872-4304-8d99-df732321a5b5","Type":"ContainerDied","Data":"b3fc0925158a09d444130de926e1a77e0c34dd907e641751adf8f0d733bc2eff"} Nov 23 17:11:40 crc kubenswrapper[5050]: I1123 17:11:40.908239 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5hk57" event={"ID":"eb23bc48-8872-4304-8d99-df732321a5b5","Type":"ContainerStarted","Data":"34d306e946aae795e9890533c23d6194b82f41c78074a7d7bd8d70fa16563b1b"} Nov 23 17:11:42 crc kubenswrapper[5050]: I1123 17:11:42.943020 5050 generic.go:334] "Generic (PLEG): container finished" podID="eb23bc48-8872-4304-8d99-df732321a5b5" containerID="541bb4d302e94cc3745fd99cad531c6a5292944e903cb10c15c17f0b8eb6e2b5" exitCode=0 Nov 23 17:11:42 crc kubenswrapper[5050]: I1123 17:11:42.943133 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5hk57" event={"ID":"eb23bc48-8872-4304-8d99-df732321a5b5","Type":"ContainerDied","Data":"541bb4d302e94cc3745fd99cad531c6a5292944e903cb10c15c17f0b8eb6e2b5"} Nov 23 17:11:43 crc kubenswrapper[5050]: I1123 17:11:43.981906 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5hk57" event={"ID":"eb23bc48-8872-4304-8d99-df732321a5b5","Type":"ContainerStarted","Data":"a411a186186ddbd48a3c675e7fd3ec66f827590d05d9008972b831d4793bc416"} Nov 23 17:11:44 crc kubenswrapper[5050]: I1123 17:11:44.022930 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5hk57" podStartSLOduration=2.580698038 podStartE2EDuration="5.022891595s" podCreationTimestamp="2025-11-23 17:11:39 +0000 UTC" firstStartedPulling="2025-11-23 17:11:40.911068058 +0000 UTC m=+8996.078064553" lastFinishedPulling="2025-11-23 17:11:43.353261585 +0000 UTC m=+8998.520258110" observedRunningTime="2025-11-23 17:11:44.007747657 +0000 UTC m=+8999.174744152" watchObservedRunningTime="2025-11-23 17:11:44.022891595 +0000 UTC m=+8999.189888080" Nov 23 17:11:49 crc kubenswrapper[5050]: I1123 17:11:49.423959 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5hk57" Nov 23 17:11:49 crc kubenswrapper[5050]: I1123 17:11:49.424752 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5hk57" Nov 23 17:11:49 crc kubenswrapper[5050]: I1123 17:11:49.501907 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5hk57" Nov 23 17:11:50 crc kubenswrapper[5050]: I1123 17:11:50.132055 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5hk57" Nov 23 17:11:50 crc kubenswrapper[5050]: I1123 17:11:50.193748 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5hk57"] Nov 23 17:11:52 crc kubenswrapper[5050]: I1123 17:11:52.101594 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-5hk57" podUID="eb23bc48-8872-4304-8d99-df732321a5b5" containerName="registry-server" containerID="cri-o://a411a186186ddbd48a3c675e7fd3ec66f827590d05d9008972b831d4793bc416" gracePeriod=2 Nov 23 17:11:52 crc kubenswrapper[5050]: I1123 17:11:52.656544 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5hk57" Nov 23 17:11:52 crc kubenswrapper[5050]: I1123 17:11:52.752083 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb23bc48-8872-4304-8d99-df732321a5b5-utilities\") pod \"eb23bc48-8872-4304-8d99-df732321a5b5\" (UID: \"eb23bc48-8872-4304-8d99-df732321a5b5\") " Nov 23 17:11:52 crc kubenswrapper[5050]: I1123 17:11:52.752261 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb23bc48-8872-4304-8d99-df732321a5b5-catalog-content\") pod \"eb23bc48-8872-4304-8d99-df732321a5b5\" (UID: \"eb23bc48-8872-4304-8d99-df732321a5b5\") " Nov 23 17:11:52 crc kubenswrapper[5050]: I1123 17:11:52.752377 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qwv9f\" (UniqueName: \"kubernetes.io/projected/eb23bc48-8872-4304-8d99-df732321a5b5-kube-api-access-qwv9f\") pod \"eb23bc48-8872-4304-8d99-df732321a5b5\" (UID: \"eb23bc48-8872-4304-8d99-df732321a5b5\") " Nov 23 17:11:52 crc kubenswrapper[5050]: I1123 17:11:52.754631 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb23bc48-8872-4304-8d99-df732321a5b5-utilities" (OuterVolumeSpecName: "utilities") pod "eb23bc48-8872-4304-8d99-df732321a5b5" (UID: "eb23bc48-8872-4304-8d99-df732321a5b5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:11:52 crc kubenswrapper[5050]: I1123 17:11:52.781541 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb23bc48-8872-4304-8d99-df732321a5b5-kube-api-access-qwv9f" (OuterVolumeSpecName: "kube-api-access-qwv9f") pod "eb23bc48-8872-4304-8d99-df732321a5b5" (UID: "eb23bc48-8872-4304-8d99-df732321a5b5"). InnerVolumeSpecName "kube-api-access-qwv9f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:11:52 crc kubenswrapper[5050]: I1123 17:11:52.856408 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qwv9f\" (UniqueName: \"kubernetes.io/projected/eb23bc48-8872-4304-8d99-df732321a5b5-kube-api-access-qwv9f\") on node \"crc\" DevicePath \"\"" Nov 23 17:11:52 crc kubenswrapper[5050]: I1123 17:11:52.856452 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb23bc48-8872-4304-8d99-df732321a5b5-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 17:11:52 crc kubenswrapper[5050]: I1123 17:11:52.933259 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb23bc48-8872-4304-8d99-df732321a5b5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "eb23bc48-8872-4304-8d99-df732321a5b5" (UID: "eb23bc48-8872-4304-8d99-df732321a5b5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:11:52 crc kubenswrapper[5050]: I1123 17:11:52.959822 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb23bc48-8872-4304-8d99-df732321a5b5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 17:11:53 crc kubenswrapper[5050]: I1123 17:11:53.125471 5050 generic.go:334] "Generic (PLEG): container finished" podID="eb23bc48-8872-4304-8d99-df732321a5b5" containerID="a411a186186ddbd48a3c675e7fd3ec66f827590d05d9008972b831d4793bc416" exitCode=0 Nov 23 17:11:53 crc kubenswrapper[5050]: I1123 17:11:53.126196 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5hk57" event={"ID":"eb23bc48-8872-4304-8d99-df732321a5b5","Type":"ContainerDied","Data":"a411a186186ddbd48a3c675e7fd3ec66f827590d05d9008972b831d4793bc416"} Nov 23 17:11:53 crc kubenswrapper[5050]: I1123 17:11:53.126322 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5hk57" Nov 23 17:11:53 crc kubenswrapper[5050]: I1123 17:11:53.126566 5050 scope.go:117] "RemoveContainer" containerID="a411a186186ddbd48a3c675e7fd3ec66f827590d05d9008972b831d4793bc416" Nov 23 17:11:53 crc kubenswrapper[5050]: I1123 17:11:53.126530 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5hk57" event={"ID":"eb23bc48-8872-4304-8d99-df732321a5b5","Type":"ContainerDied","Data":"34d306e946aae795e9890533c23d6194b82f41c78074a7d7bd8d70fa16563b1b"} Nov 23 17:11:53 crc kubenswrapper[5050]: I1123 17:11:53.190654 5050 scope.go:117] "RemoveContainer" containerID="541bb4d302e94cc3745fd99cad531c6a5292944e903cb10c15c17f0b8eb6e2b5" Nov 23 17:11:53 crc kubenswrapper[5050]: I1123 17:11:53.201348 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5hk57"] Nov 23 17:11:53 crc kubenswrapper[5050]: I1123 17:11:53.214722 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-5hk57"] Nov 23 17:11:53 crc kubenswrapper[5050]: I1123 17:11:53.233107 5050 scope.go:117] "RemoveContainer" containerID="b3fc0925158a09d444130de926e1a77e0c34dd907e641751adf8f0d733bc2eff" Nov 23 17:11:53 crc kubenswrapper[5050]: I1123 17:11:53.301770 5050 scope.go:117] "RemoveContainer" containerID="a411a186186ddbd48a3c675e7fd3ec66f827590d05d9008972b831d4793bc416" Nov 23 17:11:53 crc kubenswrapper[5050]: E1123 17:11:53.302681 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a411a186186ddbd48a3c675e7fd3ec66f827590d05d9008972b831d4793bc416\": container with ID starting with a411a186186ddbd48a3c675e7fd3ec66f827590d05d9008972b831d4793bc416 not found: ID does not exist" containerID="a411a186186ddbd48a3c675e7fd3ec66f827590d05d9008972b831d4793bc416" Nov 23 17:11:53 crc kubenswrapper[5050]: I1123 17:11:53.302818 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a411a186186ddbd48a3c675e7fd3ec66f827590d05d9008972b831d4793bc416"} err="failed to get container status \"a411a186186ddbd48a3c675e7fd3ec66f827590d05d9008972b831d4793bc416\": rpc error: code = NotFound desc = could not find container \"a411a186186ddbd48a3c675e7fd3ec66f827590d05d9008972b831d4793bc416\": container with ID starting with a411a186186ddbd48a3c675e7fd3ec66f827590d05d9008972b831d4793bc416 not found: ID does not exist" Nov 23 17:11:53 crc kubenswrapper[5050]: I1123 17:11:53.302865 5050 scope.go:117] "RemoveContainer" containerID="541bb4d302e94cc3745fd99cad531c6a5292944e903cb10c15c17f0b8eb6e2b5" Nov 23 17:11:53 crc kubenswrapper[5050]: E1123 17:11:53.303747 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"541bb4d302e94cc3745fd99cad531c6a5292944e903cb10c15c17f0b8eb6e2b5\": container with ID starting with 541bb4d302e94cc3745fd99cad531c6a5292944e903cb10c15c17f0b8eb6e2b5 not found: ID does not exist" containerID="541bb4d302e94cc3745fd99cad531c6a5292944e903cb10c15c17f0b8eb6e2b5" Nov 23 17:11:53 crc kubenswrapper[5050]: I1123 17:11:53.303813 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"541bb4d302e94cc3745fd99cad531c6a5292944e903cb10c15c17f0b8eb6e2b5"} err="failed to get container status \"541bb4d302e94cc3745fd99cad531c6a5292944e903cb10c15c17f0b8eb6e2b5\": rpc error: code = NotFound desc = could not find container \"541bb4d302e94cc3745fd99cad531c6a5292944e903cb10c15c17f0b8eb6e2b5\": container with ID starting with 541bb4d302e94cc3745fd99cad531c6a5292944e903cb10c15c17f0b8eb6e2b5 not found: ID does not exist" Nov 23 17:11:53 crc kubenswrapper[5050]: I1123 17:11:53.303855 5050 scope.go:117] "RemoveContainer" containerID="b3fc0925158a09d444130de926e1a77e0c34dd907e641751adf8f0d733bc2eff" Nov 23 17:11:53 crc kubenswrapper[5050]: E1123 17:11:53.305033 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3fc0925158a09d444130de926e1a77e0c34dd907e641751adf8f0d733bc2eff\": container with ID starting with b3fc0925158a09d444130de926e1a77e0c34dd907e641751adf8f0d733bc2eff not found: ID does not exist" containerID="b3fc0925158a09d444130de926e1a77e0c34dd907e641751adf8f0d733bc2eff" Nov 23 17:11:53 crc kubenswrapper[5050]: I1123 17:11:53.305126 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3fc0925158a09d444130de926e1a77e0c34dd907e641751adf8f0d733bc2eff"} err="failed to get container status \"b3fc0925158a09d444130de926e1a77e0c34dd907e641751adf8f0d733bc2eff\": rpc error: code = NotFound desc = could not find container \"b3fc0925158a09d444130de926e1a77e0c34dd907e641751adf8f0d733bc2eff\": container with ID starting with b3fc0925158a09d444130de926e1a77e0c34dd907e641751adf8f0d733bc2eff not found: ID does not exist" Nov 23 17:11:53 crc kubenswrapper[5050]: I1123 17:11:53.564248 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb23bc48-8872-4304-8d99-df732321a5b5" path="/var/lib/kubelet/pods/eb23bc48-8872-4304-8d99-df732321a5b5/volumes" Nov 23 17:11:59 crc kubenswrapper[5050]: I1123 17:11:59.224994 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 17:11:59 crc kubenswrapper[5050]: I1123 17:11:59.227696 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 17:11:59 crc kubenswrapper[5050]: I1123 17:11:59.227802 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 17:11:59 crc kubenswrapper[5050]: I1123 17:11:59.228837 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 17:11:59 crc kubenswrapper[5050]: I1123 17:11:59.229013 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" gracePeriod=600 Nov 23 17:11:59 crc kubenswrapper[5050]: E1123 17:11:59.360687 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:12:00 crc kubenswrapper[5050]: I1123 17:12:00.233690 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" exitCode=0 Nov 23 17:12:00 crc kubenswrapper[5050]: I1123 17:12:00.233897 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda"} Nov 23 17:12:00 crc kubenswrapper[5050]: I1123 17:12:00.234360 5050 scope.go:117] "RemoveContainer" containerID="b51b89804155fbed045110dd6b91b5fff9a2c8e177a1ec2576ae32319dd4911f" Nov 23 17:12:00 crc kubenswrapper[5050]: I1123 17:12:00.235546 5050 scope.go:117] "RemoveContainer" containerID="87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" Nov 23 17:12:00 crc kubenswrapper[5050]: E1123 17:12:00.236096 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:12:14 crc kubenswrapper[5050]: I1123 17:12:14.548436 5050 scope.go:117] "RemoveContainer" containerID="87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" Nov 23 17:12:14 crc kubenswrapper[5050]: E1123 17:12:14.549620 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:12:26 crc kubenswrapper[5050]: I1123 17:12:26.549293 5050 scope.go:117] "RemoveContainer" containerID="87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" Nov 23 17:12:26 crc kubenswrapper[5050]: E1123 17:12:26.550290 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:12:38 crc kubenswrapper[5050]: I1123 17:12:38.550127 5050 scope.go:117] "RemoveContainer" containerID="87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" Nov 23 17:12:38 crc kubenswrapper[5050]: E1123 17:12:38.551564 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:12:51 crc kubenswrapper[5050]: I1123 17:12:51.550347 5050 scope.go:117] "RemoveContainer" containerID="87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" Nov 23 17:12:51 crc kubenswrapper[5050]: E1123 17:12:51.551939 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:13:06 crc kubenswrapper[5050]: I1123 17:13:06.549570 5050 scope.go:117] "RemoveContainer" containerID="87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" Nov 23 17:13:06 crc kubenswrapper[5050]: E1123 17:13:06.550898 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:13:20 crc kubenswrapper[5050]: I1123 17:13:20.549400 5050 scope.go:117] "RemoveContainer" containerID="87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" Nov 23 17:13:20 crc kubenswrapper[5050]: E1123 17:13:20.550580 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:13:33 crc kubenswrapper[5050]: I1123 17:13:33.548854 5050 scope.go:117] "RemoveContainer" containerID="87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" Nov 23 17:13:33 crc kubenswrapper[5050]: E1123 17:13:33.549876 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:13:44 crc kubenswrapper[5050]: I1123 17:13:44.549199 5050 scope.go:117] "RemoveContainer" containerID="87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" Nov 23 17:13:44 crc kubenswrapper[5050]: E1123 17:13:44.550420 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:13:59 crc kubenswrapper[5050]: I1123 17:13:59.549000 5050 scope.go:117] "RemoveContainer" containerID="87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" Nov 23 17:13:59 crc kubenswrapper[5050]: E1123 17:13:59.550297 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:14:12 crc kubenswrapper[5050]: I1123 17:14:12.549689 5050 scope.go:117] "RemoveContainer" containerID="87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" Nov 23 17:14:12 crc kubenswrapper[5050]: E1123 17:14:12.550694 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:14:25 crc kubenswrapper[5050]: I1123 17:14:25.573855 5050 scope.go:117] "RemoveContainer" containerID="87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" Nov 23 17:14:25 crc kubenswrapper[5050]: E1123 17:14:25.575107 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:14:37 crc kubenswrapper[5050]: I1123 17:14:37.549019 5050 scope.go:117] "RemoveContainer" containerID="87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" Nov 23 17:14:37 crc kubenswrapper[5050]: E1123 17:14:37.550156 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:14:51 crc kubenswrapper[5050]: I1123 17:14:51.549129 5050 scope.go:117] "RemoveContainer" containerID="87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" Nov 23 17:14:51 crc kubenswrapper[5050]: E1123 17:14:51.550399 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:15:00 crc kubenswrapper[5050]: I1123 17:15:00.168933 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398635-4pxmr"] Nov 23 17:15:00 crc kubenswrapper[5050]: E1123 17:15:00.170218 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb23bc48-8872-4304-8d99-df732321a5b5" containerName="registry-server" Nov 23 17:15:00 crc kubenswrapper[5050]: I1123 17:15:00.170238 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb23bc48-8872-4304-8d99-df732321a5b5" containerName="registry-server" Nov 23 17:15:00 crc kubenswrapper[5050]: E1123 17:15:00.170257 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb23bc48-8872-4304-8d99-df732321a5b5" containerName="extract-content" Nov 23 17:15:00 crc kubenswrapper[5050]: I1123 17:15:00.170266 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb23bc48-8872-4304-8d99-df732321a5b5" containerName="extract-content" Nov 23 17:15:00 crc kubenswrapper[5050]: E1123 17:15:00.170309 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb23bc48-8872-4304-8d99-df732321a5b5" containerName="extract-utilities" Nov 23 17:15:00 crc kubenswrapper[5050]: I1123 17:15:00.170318 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb23bc48-8872-4304-8d99-df732321a5b5" containerName="extract-utilities" Nov 23 17:15:00 crc kubenswrapper[5050]: I1123 17:15:00.170676 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb23bc48-8872-4304-8d99-df732321a5b5" containerName="registry-server" Nov 23 17:15:00 crc kubenswrapper[5050]: I1123 17:15:00.171772 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398635-4pxmr" Nov 23 17:15:00 crc kubenswrapper[5050]: I1123 17:15:00.175306 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 23 17:15:00 crc kubenswrapper[5050]: I1123 17:15:00.176682 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 23 17:15:00 crc kubenswrapper[5050]: I1123 17:15:00.183504 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398635-4pxmr"] Nov 23 17:15:00 crc kubenswrapper[5050]: I1123 17:15:00.266518 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc-secret-volume\") pod \"collect-profiles-29398635-4pxmr\" (UID: \"5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398635-4pxmr" Nov 23 17:15:00 crc kubenswrapper[5050]: I1123 17:15:00.267020 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kl2h2\" (UniqueName: \"kubernetes.io/projected/5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc-kube-api-access-kl2h2\") pod \"collect-profiles-29398635-4pxmr\" (UID: \"5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398635-4pxmr" Nov 23 17:15:00 crc kubenswrapper[5050]: I1123 17:15:00.267195 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc-config-volume\") pod \"collect-profiles-29398635-4pxmr\" (UID: \"5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398635-4pxmr" Nov 23 17:15:00 crc kubenswrapper[5050]: I1123 17:15:00.370009 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc-secret-volume\") pod \"collect-profiles-29398635-4pxmr\" (UID: \"5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398635-4pxmr" Nov 23 17:15:00 crc kubenswrapper[5050]: I1123 17:15:00.370166 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kl2h2\" (UniqueName: \"kubernetes.io/projected/5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc-kube-api-access-kl2h2\") pod \"collect-profiles-29398635-4pxmr\" (UID: \"5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398635-4pxmr" Nov 23 17:15:00 crc kubenswrapper[5050]: I1123 17:15:00.370251 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc-config-volume\") pod \"collect-profiles-29398635-4pxmr\" (UID: \"5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398635-4pxmr" Nov 23 17:15:00 crc kubenswrapper[5050]: I1123 17:15:00.371888 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc-config-volume\") pod \"collect-profiles-29398635-4pxmr\" (UID: \"5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398635-4pxmr" Nov 23 17:15:00 crc kubenswrapper[5050]: I1123 17:15:00.377472 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc-secret-volume\") pod \"collect-profiles-29398635-4pxmr\" (UID: \"5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398635-4pxmr" Nov 23 17:15:00 crc kubenswrapper[5050]: I1123 17:15:00.389523 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kl2h2\" (UniqueName: \"kubernetes.io/projected/5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc-kube-api-access-kl2h2\") pod \"collect-profiles-29398635-4pxmr\" (UID: \"5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398635-4pxmr" Nov 23 17:15:00 crc kubenswrapper[5050]: I1123 17:15:00.514281 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398635-4pxmr" Nov 23 17:15:01 crc kubenswrapper[5050]: I1123 17:15:01.054782 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398635-4pxmr"] Nov 23 17:15:01 crc kubenswrapper[5050]: I1123 17:15:01.860402 5050 generic.go:334] "Generic (PLEG): container finished" podID="5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc" containerID="b600d91222cd7dbf7787a906b0018920efa3169452f7bfe6b5e6e30e3ecc8d83" exitCode=0 Nov 23 17:15:01 crc kubenswrapper[5050]: I1123 17:15:01.860498 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398635-4pxmr" event={"ID":"5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc","Type":"ContainerDied","Data":"b600d91222cd7dbf7787a906b0018920efa3169452f7bfe6b5e6e30e3ecc8d83"} Nov 23 17:15:01 crc kubenswrapper[5050]: I1123 17:15:01.860541 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398635-4pxmr" event={"ID":"5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc","Type":"ContainerStarted","Data":"495f6c459ec2a864e60657c457178fd364d836da941d74938b9625f80d477b31"} Nov 23 17:15:03 crc kubenswrapper[5050]: I1123 17:15:03.297894 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398635-4pxmr" Nov 23 17:15:03 crc kubenswrapper[5050]: I1123 17:15:03.479109 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc-secret-volume\") pod \"5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc\" (UID: \"5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc\") " Nov 23 17:15:03 crc kubenswrapper[5050]: I1123 17:15:03.479226 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc-config-volume\") pod \"5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc\" (UID: \"5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc\") " Nov 23 17:15:03 crc kubenswrapper[5050]: I1123 17:15:03.479363 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kl2h2\" (UniqueName: \"kubernetes.io/projected/5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc-kube-api-access-kl2h2\") pod \"5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc\" (UID: \"5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc\") " Nov 23 17:15:03 crc kubenswrapper[5050]: I1123 17:15:03.482540 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc-config-volume" (OuterVolumeSpecName: "config-volume") pod "5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc" (UID: "5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 17:15:03 crc kubenswrapper[5050]: I1123 17:15:03.494607 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc-kube-api-access-kl2h2" (OuterVolumeSpecName: "kube-api-access-kl2h2") pod "5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc" (UID: "5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc"). InnerVolumeSpecName "kube-api-access-kl2h2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:15:03 crc kubenswrapper[5050]: I1123 17:15:03.496049 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc" (UID: "5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:15:03 crc kubenswrapper[5050]: I1123 17:15:03.583074 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kl2h2\" (UniqueName: \"kubernetes.io/projected/5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc-kube-api-access-kl2h2\") on node \"crc\" DevicePath \"\"" Nov 23 17:15:03 crc kubenswrapper[5050]: I1123 17:15:03.583129 5050 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 23 17:15:03 crc kubenswrapper[5050]: I1123 17:15:03.583149 5050 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc-config-volume\") on node \"crc\" DevicePath \"\"" Nov 23 17:15:03 crc kubenswrapper[5050]: I1123 17:15:03.889657 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398635-4pxmr" event={"ID":"5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc","Type":"ContainerDied","Data":"495f6c459ec2a864e60657c457178fd364d836da941d74938b9625f80d477b31"} Nov 23 17:15:03 crc kubenswrapper[5050]: I1123 17:15:03.889709 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398635-4pxmr" Nov 23 17:15:03 crc kubenswrapper[5050]: I1123 17:15:03.889714 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="495f6c459ec2a864e60657c457178fd364d836da941d74938b9625f80d477b31" Nov 23 17:15:04 crc kubenswrapper[5050]: I1123 17:15:04.418358 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398590-s2wsf"] Nov 23 17:15:04 crc kubenswrapper[5050]: I1123 17:15:04.441008 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398590-s2wsf"] Nov 23 17:15:04 crc kubenswrapper[5050]: I1123 17:15:04.550045 5050 scope.go:117] "RemoveContainer" containerID="87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" Nov 23 17:15:04 crc kubenswrapper[5050]: E1123 17:15:04.551029 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:15:05 crc kubenswrapper[5050]: I1123 17:15:05.568925 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23c62781-f73e-4fcd-b3e1-c94afe1cb76a" path="/var/lib/kubelet/pods/23c62781-f73e-4fcd-b3e1-c94afe1cb76a/volumes" Nov 23 17:15:16 crc kubenswrapper[5050]: I1123 17:15:16.548926 5050 scope.go:117] "RemoveContainer" containerID="87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" Nov 23 17:15:16 crc kubenswrapper[5050]: E1123 17:15:16.549690 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:15:21 crc kubenswrapper[5050]: I1123 17:15:21.556491 5050 scope.go:117] "RemoveContainer" containerID="52fc70b7d66863eac9bf8bbb06a91701d213447513d351984bdb03131ff7101b" Nov 23 17:15:31 crc kubenswrapper[5050]: I1123 17:15:31.549276 5050 scope.go:117] "RemoveContainer" containerID="87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" Nov 23 17:15:31 crc kubenswrapper[5050]: E1123 17:15:31.551594 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:15:44 crc kubenswrapper[5050]: I1123 17:15:44.549613 5050 scope.go:117] "RemoveContainer" containerID="87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" Nov 23 17:15:44 crc kubenswrapper[5050]: E1123 17:15:44.550876 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:15:51 crc kubenswrapper[5050]: I1123 17:15:51.593669 5050 generic.go:334] "Generic (PLEG): container finished" podID="9623322c-0a8c-4fbb-8209-51641891664b" containerID="1589579b05523724eaf251f2fffd039b4c7d2f5ba814e1351f31fa8409076f94" exitCode=0 Nov 23 17:15:51 crc kubenswrapper[5050]: I1123 17:15:51.593715 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj" event={"ID":"9623322c-0a8c-4fbb-8209-51641891664b","Type":"ContainerDied","Data":"1589579b05523724eaf251f2fffd039b4c7d2f5ba814e1351f31fa8409076f94"} Nov 23 17:15:53 crc kubenswrapper[5050]: I1123 17:15:53.180259 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj" Nov 23 17:15:53 crc kubenswrapper[5050]: I1123 17:15:53.370995 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-neutron-dhcp-agent-neutron-config-0\") pod \"9623322c-0a8c-4fbb-8209-51641891664b\" (UID: \"9623322c-0a8c-4fbb-8209-51641891664b\") " Nov 23 17:15:53 crc kubenswrapper[5050]: I1123 17:15:53.371094 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjsg8\" (UniqueName: \"kubernetes.io/projected/9623322c-0a8c-4fbb-8209-51641891664b-kube-api-access-pjsg8\") pod \"9623322c-0a8c-4fbb-8209-51641891664b\" (UID: \"9623322c-0a8c-4fbb-8209-51641891664b\") " Nov 23 17:15:53 crc kubenswrapper[5050]: I1123 17:15:53.371202 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-inventory\") pod \"9623322c-0a8c-4fbb-8209-51641891664b\" (UID: \"9623322c-0a8c-4fbb-8209-51641891664b\") " Nov 23 17:15:53 crc kubenswrapper[5050]: I1123 17:15:53.371235 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-ceph\") pod \"9623322c-0a8c-4fbb-8209-51641891664b\" (UID: \"9623322c-0a8c-4fbb-8209-51641891664b\") " Nov 23 17:15:53 crc kubenswrapper[5050]: I1123 17:15:53.371460 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-ssh-key\") pod \"9623322c-0a8c-4fbb-8209-51641891664b\" (UID: \"9623322c-0a8c-4fbb-8209-51641891664b\") " Nov 23 17:15:53 crc kubenswrapper[5050]: I1123 17:15:53.371520 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-neutron-dhcp-combined-ca-bundle\") pod \"9623322c-0a8c-4fbb-8209-51641891664b\" (UID: \"9623322c-0a8c-4fbb-8209-51641891664b\") " Nov 23 17:15:53 crc kubenswrapper[5050]: I1123 17:15:53.380257 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-neutron-dhcp-combined-ca-bundle" (OuterVolumeSpecName: "neutron-dhcp-combined-ca-bundle") pod "9623322c-0a8c-4fbb-8209-51641891664b" (UID: "9623322c-0a8c-4fbb-8209-51641891664b"). InnerVolumeSpecName "neutron-dhcp-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:15:53 crc kubenswrapper[5050]: I1123 17:15:53.380263 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9623322c-0a8c-4fbb-8209-51641891664b-kube-api-access-pjsg8" (OuterVolumeSpecName: "kube-api-access-pjsg8") pod "9623322c-0a8c-4fbb-8209-51641891664b" (UID: "9623322c-0a8c-4fbb-8209-51641891664b"). InnerVolumeSpecName "kube-api-access-pjsg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:15:53 crc kubenswrapper[5050]: I1123 17:15:53.381265 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-ceph" (OuterVolumeSpecName: "ceph") pod "9623322c-0a8c-4fbb-8209-51641891664b" (UID: "9623322c-0a8c-4fbb-8209-51641891664b"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:15:53 crc kubenswrapper[5050]: I1123 17:15:53.410135 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-neutron-dhcp-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-dhcp-agent-neutron-config-0") pod "9623322c-0a8c-4fbb-8209-51641891664b" (UID: "9623322c-0a8c-4fbb-8209-51641891664b"). InnerVolumeSpecName "neutron-dhcp-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:15:53 crc kubenswrapper[5050]: I1123 17:15:53.414326 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "9623322c-0a8c-4fbb-8209-51641891664b" (UID: "9623322c-0a8c-4fbb-8209-51641891664b"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:15:53 crc kubenswrapper[5050]: I1123 17:15:53.417421 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-inventory" (OuterVolumeSpecName: "inventory") pod "9623322c-0a8c-4fbb-8209-51641891664b" (UID: "9623322c-0a8c-4fbb-8209-51641891664b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:15:53 crc kubenswrapper[5050]: I1123 17:15:53.475397 5050 reconciler_common.go:293] "Volume detached for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-neutron-dhcp-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 23 17:15:53 crc kubenswrapper[5050]: I1123 17:15:53.475734 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjsg8\" (UniqueName: \"kubernetes.io/projected/9623322c-0a8c-4fbb-8209-51641891664b-kube-api-access-pjsg8\") on node \"crc\" DevicePath \"\"" Nov 23 17:15:53 crc kubenswrapper[5050]: I1123 17:15:53.475814 5050 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-inventory\") on node \"crc\" DevicePath \"\"" Nov 23 17:15:53 crc kubenswrapper[5050]: I1123 17:15:53.475893 5050 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-ceph\") on node \"crc\" DevicePath \"\"" Nov 23 17:15:53 crc kubenswrapper[5050]: I1123 17:15:53.475959 5050 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 23 17:15:53 crc kubenswrapper[5050]: I1123 17:15:53.476030 5050 reconciler_common.go:293] "Volume detached for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9623322c-0a8c-4fbb-8209-51641891664b-neutron-dhcp-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 17:15:53 crc kubenswrapper[5050]: I1123 17:15:53.628327 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj" event={"ID":"9623322c-0a8c-4fbb-8209-51641891664b","Type":"ContainerDied","Data":"823364608e2d68ad081106af17bfafb7a9803e8d716dea050106a81989da1a5a"} Nov 23 17:15:53 crc kubenswrapper[5050]: I1123 17:15:53.628407 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-q2ptj" Nov 23 17:15:53 crc kubenswrapper[5050]: I1123 17:15:53.631139 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="823364608e2d68ad081106af17bfafb7a9803e8d716dea050106a81989da1a5a" Nov 23 17:15:55 crc kubenswrapper[5050]: I1123 17:15:55.565755 5050 scope.go:117] "RemoveContainer" containerID="87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" Nov 23 17:15:55 crc kubenswrapper[5050]: E1123 17:15:55.567156 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:16:09 crc kubenswrapper[5050]: I1123 17:16:09.550255 5050 scope.go:117] "RemoveContainer" containerID="87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" Nov 23 17:16:09 crc kubenswrapper[5050]: E1123 17:16:09.551306 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:16:21 crc kubenswrapper[5050]: I1123 17:16:21.139164 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 23 17:16:21 crc kubenswrapper[5050]: I1123 17:16:21.140329 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="8a3ba510-87a6-443f-b74c-ac6361726b1b" containerName="nova-cell0-conductor-conductor" containerID="cri-o://1935d93f340a395c5187c9ed793c8e3f3a89c31b0d8f0424a4bc98b1b7156e82" gracePeriod=30 Nov 23 17:16:21 crc kubenswrapper[5050]: I1123 17:16:21.173227 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 23 17:16:21 crc kubenswrapper[5050]: I1123 17:16:21.173538 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="183ebe7a-5aa3-4a3f-8392-bbd7e3433b15" containerName="nova-cell1-conductor-conductor" containerID="cri-o://7ebf5479e4cd1274bcd94267b382aefadd914349b5cce33cceeaa72468c30166" gracePeriod=30 Nov 23 17:16:22 crc kubenswrapper[5050]: I1123 17:16:22.237336 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 23 17:16:22 crc kubenswrapper[5050]: I1123 17:16:22.238839 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="8098c44a-b797-4fba-8d0d-2138ca9b367a" containerName="nova-api-log" containerID="cri-o://13f460433dc3293b2854c9d73e0084d0f750ebd30de0e229dfc7cfd9257c9b61" gracePeriod=30 Nov 23 17:16:22 crc kubenswrapper[5050]: I1123 17:16:22.239013 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="8098c44a-b797-4fba-8d0d-2138ca9b367a" containerName="nova-api-api" containerID="cri-o://6b4b736b2f6317c29343fa48432ccdef3758c170dc6360ed6cb25caec383b0eb" gracePeriod=30 Nov 23 17:16:22 crc kubenswrapper[5050]: I1123 17:16:22.309528 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 17:16:22 crc kubenswrapper[5050]: I1123 17:16:22.310313 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="ddc00b7c-3525-45d6-863f-20e9e26ff02f" containerName="nova-scheduler-scheduler" containerID="cri-o://4ede08fe48e6b2bbb659c27fe3fc3562174453c3e85d985533d4a88c6f997545" gracePeriod=30 Nov 23 17:16:22 crc kubenswrapper[5050]: I1123 17:16:22.318897 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 17:16:22 crc kubenswrapper[5050]: I1123 17:16:22.319348 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="48851bd1-4829-4f8d-ae93-a6cb3266ce68" containerName="nova-metadata-metadata" containerID="cri-o://212e5898b8ac9c7400429c63b5c0b7573e62e87f8d170f83efe286d8cbd392fe" gracePeriod=30 Nov 23 17:16:22 crc kubenswrapper[5050]: I1123 17:16:22.319892 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="48851bd1-4829-4f8d-ae93-a6cb3266ce68" containerName="nova-metadata-log" containerID="cri-o://dcc58da0d08a430d214f6e7965bf67d82bfee9eb6a89de2efe49a48485c28c40" gracePeriod=30 Nov 23 17:16:22 crc kubenswrapper[5050]: E1123 17:16:22.931004 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7ebf5479e4cd1274bcd94267b382aefadd914349b5cce33cceeaa72468c30166" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 23 17:16:22 crc kubenswrapper[5050]: E1123 17:16:22.932576 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7ebf5479e4cd1274bcd94267b382aefadd914349b5cce33cceeaa72468c30166" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 23 17:16:22 crc kubenswrapper[5050]: E1123 17:16:22.933897 5050 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7ebf5479e4cd1274bcd94267b382aefadd914349b5cce33cceeaa72468c30166" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 23 17:16:22 crc kubenswrapper[5050]: E1123 17:16:22.933984 5050 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="183ebe7a-5aa3-4a3f-8392-bbd7e3433b15" containerName="nova-cell1-conductor-conductor" Nov 23 17:16:23 crc kubenswrapper[5050]: I1123 17:16:23.110106 5050 generic.go:334] "Generic (PLEG): container finished" podID="8098c44a-b797-4fba-8d0d-2138ca9b367a" containerID="13f460433dc3293b2854c9d73e0084d0f750ebd30de0e229dfc7cfd9257c9b61" exitCode=143 Nov 23 17:16:23 crc kubenswrapper[5050]: I1123 17:16:23.110347 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8098c44a-b797-4fba-8d0d-2138ca9b367a","Type":"ContainerDied","Data":"13f460433dc3293b2854c9d73e0084d0f750ebd30de0e229dfc7cfd9257c9b61"} Nov 23 17:16:23 crc kubenswrapper[5050]: I1123 17:16:23.119470 5050 generic.go:334] "Generic (PLEG): container finished" podID="48851bd1-4829-4f8d-ae93-a6cb3266ce68" containerID="dcc58da0d08a430d214f6e7965bf67d82bfee9eb6a89de2efe49a48485c28c40" exitCode=143 Nov 23 17:16:23 crc kubenswrapper[5050]: I1123 17:16:23.119623 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"48851bd1-4829-4f8d-ae93-a6cb3266ce68","Type":"ContainerDied","Data":"dcc58da0d08a430d214f6e7965bf67d82bfee9eb6a89de2efe49a48485c28c40"} Nov 23 17:16:23 crc kubenswrapper[5050]: I1123 17:16:23.550693 5050 scope.go:117] "RemoveContainer" containerID="87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" Nov 23 17:16:23 crc kubenswrapper[5050]: E1123 17:16:23.551415 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:16:23 crc kubenswrapper[5050]: I1123 17:16:23.642251 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 23 17:16:23 crc kubenswrapper[5050]: I1123 17:16:23.759216 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a3ba510-87a6-443f-b74c-ac6361726b1b-combined-ca-bundle\") pod \"8a3ba510-87a6-443f-b74c-ac6361726b1b\" (UID: \"8a3ba510-87a6-443f-b74c-ac6361726b1b\") " Nov 23 17:16:23 crc kubenswrapper[5050]: I1123 17:16:23.759616 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hlw4m\" (UniqueName: \"kubernetes.io/projected/8a3ba510-87a6-443f-b74c-ac6361726b1b-kube-api-access-hlw4m\") pod \"8a3ba510-87a6-443f-b74c-ac6361726b1b\" (UID: \"8a3ba510-87a6-443f-b74c-ac6361726b1b\") " Nov 23 17:16:23 crc kubenswrapper[5050]: I1123 17:16:23.759683 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a3ba510-87a6-443f-b74c-ac6361726b1b-config-data\") pod \"8a3ba510-87a6-443f-b74c-ac6361726b1b\" (UID: \"8a3ba510-87a6-443f-b74c-ac6361726b1b\") " Nov 23 17:16:23 crc kubenswrapper[5050]: I1123 17:16:23.785824 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a3ba510-87a6-443f-b74c-ac6361726b1b-kube-api-access-hlw4m" (OuterVolumeSpecName: "kube-api-access-hlw4m") pod "8a3ba510-87a6-443f-b74c-ac6361726b1b" (UID: "8a3ba510-87a6-443f-b74c-ac6361726b1b"). InnerVolumeSpecName "kube-api-access-hlw4m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:16:23 crc kubenswrapper[5050]: I1123 17:16:23.810400 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a3ba510-87a6-443f-b74c-ac6361726b1b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8a3ba510-87a6-443f-b74c-ac6361726b1b" (UID: "8a3ba510-87a6-443f-b74c-ac6361726b1b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:16:23 crc kubenswrapper[5050]: I1123 17:16:23.814483 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a3ba510-87a6-443f-b74c-ac6361726b1b-config-data" (OuterVolumeSpecName: "config-data") pod "8a3ba510-87a6-443f-b74c-ac6361726b1b" (UID: "8a3ba510-87a6-443f-b74c-ac6361726b1b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:16:23 crc kubenswrapper[5050]: I1123 17:16:23.862366 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hlw4m\" (UniqueName: \"kubernetes.io/projected/8a3ba510-87a6-443f-b74c-ac6361726b1b-kube-api-access-hlw4m\") on node \"crc\" DevicePath \"\"" Nov 23 17:16:23 crc kubenswrapper[5050]: I1123 17:16:23.862403 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a3ba510-87a6-443f-b74c-ac6361726b1b-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 17:16:23 crc kubenswrapper[5050]: I1123 17:16:23.862414 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a3ba510-87a6-443f-b74c-ac6361726b1b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.133038 5050 generic.go:334] "Generic (PLEG): container finished" podID="8a3ba510-87a6-443f-b74c-ac6361726b1b" containerID="1935d93f340a395c5187c9ed793c8e3f3a89c31b0d8f0424a4bc98b1b7156e82" exitCode=0 Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.133134 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.133131 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"8a3ba510-87a6-443f-b74c-ac6361726b1b","Type":"ContainerDied","Data":"1935d93f340a395c5187c9ed793c8e3f3a89c31b0d8f0424a4bc98b1b7156e82"} Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.133317 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"8a3ba510-87a6-443f-b74c-ac6361726b1b","Type":"ContainerDied","Data":"a70583a4269e073dd033e8b75dc0adacec3dcab3c1ea87e532196798f633095c"} Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.133361 5050 scope.go:117] "RemoveContainer" containerID="1935d93f340a395c5187c9ed793c8e3f3a89c31b0d8f0424a4bc98b1b7156e82" Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.189354 5050 scope.go:117] "RemoveContainer" containerID="1935d93f340a395c5187c9ed793c8e3f3a89c31b0d8f0424a4bc98b1b7156e82" Nov 23 17:16:24 crc kubenswrapper[5050]: E1123 17:16:24.190016 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1935d93f340a395c5187c9ed793c8e3f3a89c31b0d8f0424a4bc98b1b7156e82\": container with ID starting with 1935d93f340a395c5187c9ed793c8e3f3a89c31b0d8f0424a4bc98b1b7156e82 not found: ID does not exist" containerID="1935d93f340a395c5187c9ed793c8e3f3a89c31b0d8f0424a4bc98b1b7156e82" Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.190073 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1935d93f340a395c5187c9ed793c8e3f3a89c31b0d8f0424a4bc98b1b7156e82"} err="failed to get container status \"1935d93f340a395c5187c9ed793c8e3f3a89c31b0d8f0424a4bc98b1b7156e82\": rpc error: code = NotFound desc = could not find container \"1935d93f340a395c5187c9ed793c8e3f3a89c31b0d8f0424a4bc98b1b7156e82\": container with ID starting with 1935d93f340a395c5187c9ed793c8e3f3a89c31b0d8f0424a4bc98b1b7156e82 not found: ID does not exist" Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.192474 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.201199 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.249978 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 23 17:16:24 crc kubenswrapper[5050]: E1123 17:16:24.251026 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9623322c-0a8c-4fbb-8209-51641891664b" containerName="neutron-dhcp-openstack-openstack-cell1" Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.251066 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="9623322c-0a8c-4fbb-8209-51641891664b" containerName="neutron-dhcp-openstack-openstack-cell1" Nov 23 17:16:24 crc kubenswrapper[5050]: E1123 17:16:24.251099 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a3ba510-87a6-443f-b74c-ac6361726b1b" containerName="nova-cell0-conductor-conductor" Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.251132 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a3ba510-87a6-443f-b74c-ac6361726b1b" containerName="nova-cell0-conductor-conductor" Nov 23 17:16:24 crc kubenswrapper[5050]: E1123 17:16:24.251193 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc" containerName="collect-profiles" Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.251207 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc" containerName="collect-profiles" Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.251622 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="9623322c-0a8c-4fbb-8209-51641891664b" containerName="neutron-dhcp-openstack-openstack-cell1" Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.251666 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="5bd6eada-dd5d-4d4a-b5d3-68be848d6fbc" containerName="collect-profiles" Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.251698 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a3ba510-87a6-443f-b74c-ac6361726b1b" containerName="nova-cell0-conductor-conductor" Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.253345 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.263533 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.266685 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.384384 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f807c69-cd14-40a1-9dd1-c50271693387-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"8f807c69-cd14-40a1-9dd1-c50271693387\") " pod="openstack/nova-cell0-conductor-0" Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.384549 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f807c69-cd14-40a1-9dd1-c50271693387-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"8f807c69-cd14-40a1-9dd1-c50271693387\") " pod="openstack/nova-cell0-conductor-0" Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.384717 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k4wvf\" (UniqueName: \"kubernetes.io/projected/8f807c69-cd14-40a1-9dd1-c50271693387-kube-api-access-k4wvf\") pod \"nova-cell0-conductor-0\" (UID: \"8f807c69-cd14-40a1-9dd1-c50271693387\") " pod="openstack/nova-cell0-conductor-0" Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.487252 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f807c69-cd14-40a1-9dd1-c50271693387-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"8f807c69-cd14-40a1-9dd1-c50271693387\") " pod="openstack/nova-cell0-conductor-0" Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.487384 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f807c69-cd14-40a1-9dd1-c50271693387-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"8f807c69-cd14-40a1-9dd1-c50271693387\") " pod="openstack/nova-cell0-conductor-0" Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.487550 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k4wvf\" (UniqueName: \"kubernetes.io/projected/8f807c69-cd14-40a1-9dd1-c50271693387-kube-api-access-k4wvf\") pod \"nova-cell0-conductor-0\" (UID: \"8f807c69-cd14-40a1-9dd1-c50271693387\") " pod="openstack/nova-cell0-conductor-0" Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.501780 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f807c69-cd14-40a1-9dd1-c50271693387-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"8f807c69-cd14-40a1-9dd1-c50271693387\") " pod="openstack/nova-cell0-conductor-0" Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.510978 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f807c69-cd14-40a1-9dd1-c50271693387-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"8f807c69-cd14-40a1-9dd1-c50271693387\") " pod="openstack/nova-cell0-conductor-0" Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.517534 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k4wvf\" (UniqueName: \"kubernetes.io/projected/8f807c69-cd14-40a1-9dd1-c50271693387-kube-api-access-k4wvf\") pod \"nova-cell0-conductor-0\" (UID: \"8f807c69-cd14-40a1-9dd1-c50271693387\") " pod="openstack/nova-cell0-conductor-0" Nov 23 17:16:24 crc kubenswrapper[5050]: I1123 17:16:24.584644 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 23 17:16:25 crc kubenswrapper[5050]: I1123 17:16:25.157472 5050 generic.go:334] "Generic (PLEG): container finished" podID="183ebe7a-5aa3-4a3f-8392-bbd7e3433b15" containerID="7ebf5479e4cd1274bcd94267b382aefadd914349b5cce33cceeaa72468c30166" exitCode=0 Nov 23 17:16:25 crc kubenswrapper[5050]: I1123 17:16:25.158429 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"183ebe7a-5aa3-4a3f-8392-bbd7e3433b15","Type":"ContainerDied","Data":"7ebf5479e4cd1274bcd94267b382aefadd914349b5cce33cceeaa72468c30166"} Nov 23 17:16:25 crc kubenswrapper[5050]: I1123 17:16:25.163669 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 23 17:16:25 crc kubenswrapper[5050]: I1123 17:16:25.371105 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 23 17:16:25 crc kubenswrapper[5050]: I1123 17:16:25.505375 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="48851bd1-4829-4f8d-ae93-a6cb3266ce68" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.82:8775/\": read tcp 10.217.0.2:46300->10.217.1.82:8775: read: connection reset by peer" Nov 23 17:16:25 crc kubenswrapper[5050]: I1123 17:16:25.505423 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="48851bd1-4829-4f8d-ae93-a6cb3266ce68" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.82:8775/\": read tcp 10.217.0.2:46296->10.217.1.82:8775: read: connection reset by peer" Nov 23 17:16:25 crc kubenswrapper[5050]: I1123 17:16:25.517660 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hxqx6\" (UniqueName: \"kubernetes.io/projected/183ebe7a-5aa3-4a3f-8392-bbd7e3433b15-kube-api-access-hxqx6\") pod \"183ebe7a-5aa3-4a3f-8392-bbd7e3433b15\" (UID: \"183ebe7a-5aa3-4a3f-8392-bbd7e3433b15\") " Nov 23 17:16:25 crc kubenswrapper[5050]: I1123 17:16:25.517721 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/183ebe7a-5aa3-4a3f-8392-bbd7e3433b15-combined-ca-bundle\") pod \"183ebe7a-5aa3-4a3f-8392-bbd7e3433b15\" (UID: \"183ebe7a-5aa3-4a3f-8392-bbd7e3433b15\") " Nov 23 17:16:25 crc kubenswrapper[5050]: I1123 17:16:25.519040 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/183ebe7a-5aa3-4a3f-8392-bbd7e3433b15-config-data\") pod \"183ebe7a-5aa3-4a3f-8392-bbd7e3433b15\" (UID: \"183ebe7a-5aa3-4a3f-8392-bbd7e3433b15\") " Nov 23 17:16:25 crc kubenswrapper[5050]: I1123 17:16:25.538525 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/183ebe7a-5aa3-4a3f-8392-bbd7e3433b15-kube-api-access-hxqx6" (OuterVolumeSpecName: "kube-api-access-hxqx6") pod "183ebe7a-5aa3-4a3f-8392-bbd7e3433b15" (UID: "183ebe7a-5aa3-4a3f-8392-bbd7e3433b15"). InnerVolumeSpecName "kube-api-access-hxqx6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:16:25 crc kubenswrapper[5050]: I1123 17:16:25.566799 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/183ebe7a-5aa3-4a3f-8392-bbd7e3433b15-config-data" (OuterVolumeSpecName: "config-data") pod "183ebe7a-5aa3-4a3f-8392-bbd7e3433b15" (UID: "183ebe7a-5aa3-4a3f-8392-bbd7e3433b15"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:16:25 crc kubenswrapper[5050]: I1123 17:16:25.568283 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/183ebe7a-5aa3-4a3f-8392-bbd7e3433b15-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "183ebe7a-5aa3-4a3f-8392-bbd7e3433b15" (UID: "183ebe7a-5aa3-4a3f-8392-bbd7e3433b15"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:16:25 crc kubenswrapper[5050]: I1123 17:16:25.575937 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a3ba510-87a6-443f-b74c-ac6361726b1b" path="/var/lib/kubelet/pods/8a3ba510-87a6-443f-b74c-ac6361726b1b/volumes" Nov 23 17:16:25 crc kubenswrapper[5050]: I1123 17:16:25.623032 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/183ebe7a-5aa3-4a3f-8392-bbd7e3433b15-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 17:16:25 crc kubenswrapper[5050]: I1123 17:16:25.623082 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hxqx6\" (UniqueName: \"kubernetes.io/projected/183ebe7a-5aa3-4a3f-8392-bbd7e3433b15-kube-api-access-hxqx6\") on node \"crc\" DevicePath \"\"" Nov 23 17:16:25 crc kubenswrapper[5050]: I1123 17:16:25.623095 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/183ebe7a-5aa3-4a3f-8392-bbd7e3433b15-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.173798 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.184596 5050 generic.go:334] "Generic (PLEG): container finished" podID="8098c44a-b797-4fba-8d0d-2138ca9b367a" containerID="6b4b736b2f6317c29343fa48432ccdef3758c170dc6360ed6cb25caec383b0eb" exitCode=0 Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.184661 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8098c44a-b797-4fba-8d0d-2138ca9b367a","Type":"ContainerDied","Data":"6b4b736b2f6317c29343fa48432ccdef3758c170dc6360ed6cb25caec383b0eb"} Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.184752 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8098c44a-b797-4fba-8d0d-2138ca9b367a","Type":"ContainerDied","Data":"45fa5f45e24923a6c1ca202a72ce406f17a5be3147314c86610275116cd2dbeb"} Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.184781 5050 scope.go:117] "RemoveContainer" containerID="6b4b736b2f6317c29343fa48432ccdef3758c170dc6360ed6cb25caec383b0eb" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.186415 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.189344 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"183ebe7a-5aa3-4a3f-8392-bbd7e3433b15","Type":"ContainerDied","Data":"baf8437cd84cdd6c08362052a56b76dd8d2fcb882ddc022c5ae4282f47a42d5a"} Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.189427 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.194085 5050 generic.go:334] "Generic (PLEG): container finished" podID="48851bd1-4829-4f8d-ae93-a6cb3266ce68" containerID="212e5898b8ac9c7400429c63b5c0b7573e62e87f8d170f83efe286d8cbd392fe" exitCode=0 Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.194168 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"48851bd1-4829-4f8d-ae93-a6cb3266ce68","Type":"ContainerDied","Data":"212e5898b8ac9c7400429c63b5c0b7573e62e87f8d170f83efe286d8cbd392fe"} Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.194232 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"48851bd1-4829-4f8d-ae93-a6cb3266ce68","Type":"ContainerDied","Data":"c3e7f6296d2a3d30c488de62908ccfe5a8c475977f5675533dfa4038c34f1449"} Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.194314 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.237062 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"8f807c69-cd14-40a1-9dd1-c50271693387","Type":"ContainerStarted","Data":"074c6b709d03677a400593bace5f17d14aa9fd0f569a7a896bad918371ea42dc"} Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.237124 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"8f807c69-cd14-40a1-9dd1-c50271693387","Type":"ContainerStarted","Data":"aa40d012e9b6dd7c5f5a59674070d97c3ee19f50d75b5bc546770a38fa80daba"} Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.237228 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.247152 5050 generic.go:334] "Generic (PLEG): container finished" podID="ddc00b7c-3525-45d6-863f-20e9e26ff02f" containerID="4ede08fe48e6b2bbb659c27fe3fc3562174453c3e85d985533d4a88c6f997545" exitCode=0 Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.247212 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ddc00b7c-3525-45d6-863f-20e9e26ff02f","Type":"ContainerDied","Data":"4ede08fe48e6b2bbb659c27fe3fc3562174453c3e85d985533d4a88c6f997545"} Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.271520 5050 scope.go:117] "RemoveContainer" containerID="13f460433dc3293b2854c9d73e0084d0f750ebd30de0e229dfc7cfd9257c9b61" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.304982 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.320958 5050 scope.go:117] "RemoveContainer" containerID="6b4b736b2f6317c29343fa48432ccdef3758c170dc6360ed6cb25caec383b0eb" Nov 23 17:16:26 crc kubenswrapper[5050]: E1123 17:16:26.321723 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6b4b736b2f6317c29343fa48432ccdef3758c170dc6360ed6cb25caec383b0eb\": container with ID starting with 6b4b736b2f6317c29343fa48432ccdef3758c170dc6360ed6cb25caec383b0eb not found: ID does not exist" containerID="6b4b736b2f6317c29343fa48432ccdef3758c170dc6360ed6cb25caec383b0eb" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.321757 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b4b736b2f6317c29343fa48432ccdef3758c170dc6360ed6cb25caec383b0eb"} err="failed to get container status \"6b4b736b2f6317c29343fa48432ccdef3758c170dc6360ed6cb25caec383b0eb\": rpc error: code = NotFound desc = could not find container \"6b4b736b2f6317c29343fa48432ccdef3758c170dc6360ed6cb25caec383b0eb\": container with ID starting with 6b4b736b2f6317c29343fa48432ccdef3758c170dc6360ed6cb25caec383b0eb not found: ID does not exist" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.321786 5050 scope.go:117] "RemoveContainer" containerID="13f460433dc3293b2854c9d73e0084d0f750ebd30de0e229dfc7cfd9257c9b61" Nov 23 17:16:26 crc kubenswrapper[5050]: E1123 17:16:26.322291 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13f460433dc3293b2854c9d73e0084d0f750ebd30de0e229dfc7cfd9257c9b61\": container with ID starting with 13f460433dc3293b2854c9d73e0084d0f750ebd30de0e229dfc7cfd9257c9b61 not found: ID does not exist" containerID="13f460433dc3293b2854c9d73e0084d0f750ebd30de0e229dfc7cfd9257c9b61" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.322356 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13f460433dc3293b2854c9d73e0084d0f750ebd30de0e229dfc7cfd9257c9b61"} err="failed to get container status \"13f460433dc3293b2854c9d73e0084d0f750ebd30de0e229dfc7cfd9257c9b61\": rpc error: code = NotFound desc = could not find container \"13f460433dc3293b2854c9d73e0084d0f750ebd30de0e229dfc7cfd9257c9b61\": container with ID starting with 13f460433dc3293b2854c9d73e0084d0f750ebd30de0e229dfc7cfd9257c9b61 not found: ID does not exist" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.322394 5050 scope.go:117] "RemoveContainer" containerID="7ebf5479e4cd1274bcd94267b382aefadd914349b5cce33cceeaa72468c30166" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.332019 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.343370 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hpjkr\" (UniqueName: \"kubernetes.io/projected/8098c44a-b797-4fba-8d0d-2138ca9b367a-kube-api-access-hpjkr\") pod \"8098c44a-b797-4fba-8d0d-2138ca9b367a\" (UID: \"8098c44a-b797-4fba-8d0d-2138ca9b367a\") " Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.343488 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8098c44a-b797-4fba-8d0d-2138ca9b367a-combined-ca-bundle\") pod \"8098c44a-b797-4fba-8d0d-2138ca9b367a\" (UID: \"8098c44a-b797-4fba-8d0d-2138ca9b367a\") " Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.343550 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8098c44a-b797-4fba-8d0d-2138ca9b367a-logs\") pod \"8098c44a-b797-4fba-8d0d-2138ca9b367a\" (UID: \"8098c44a-b797-4fba-8d0d-2138ca9b367a\") " Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.343603 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48851bd1-4829-4f8d-ae93-a6cb3266ce68-combined-ca-bundle\") pod \"48851bd1-4829-4f8d-ae93-a6cb3266ce68\" (UID: \"48851bd1-4829-4f8d-ae93-a6cb3266ce68\") " Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.343741 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8098c44a-b797-4fba-8d0d-2138ca9b367a-config-data\") pod \"8098c44a-b797-4fba-8d0d-2138ca9b367a\" (UID: \"8098c44a-b797-4fba-8d0d-2138ca9b367a\") " Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.343776 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q9mkv\" (UniqueName: \"kubernetes.io/projected/48851bd1-4829-4f8d-ae93-a6cb3266ce68-kube-api-access-q9mkv\") pod \"48851bd1-4829-4f8d-ae93-a6cb3266ce68\" (UID: \"48851bd1-4829-4f8d-ae93-a6cb3266ce68\") " Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.344000 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/48851bd1-4829-4f8d-ae93-a6cb3266ce68-logs\") pod \"48851bd1-4829-4f8d-ae93-a6cb3266ce68\" (UID: \"48851bd1-4829-4f8d-ae93-a6cb3266ce68\") " Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.344057 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48851bd1-4829-4f8d-ae93-a6cb3266ce68-config-data\") pod \"48851bd1-4829-4f8d-ae93-a6cb3266ce68\" (UID: \"48851bd1-4829-4f8d-ae93-a6cb3266ce68\") " Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.348226 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8098c44a-b797-4fba-8d0d-2138ca9b367a-logs" (OuterVolumeSpecName: "logs") pod "8098c44a-b797-4fba-8d0d-2138ca9b367a" (UID: "8098c44a-b797-4fba-8d0d-2138ca9b367a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.351774 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/48851bd1-4829-4f8d-ae93-a6cb3266ce68-logs" (OuterVolumeSpecName: "logs") pod "48851bd1-4829-4f8d-ae93-a6cb3266ce68" (UID: "48851bd1-4829-4f8d-ae93-a6cb3266ce68"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.352073 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.352051385 podStartE2EDuration="2.352051385s" podCreationTimestamp="2025-11-23 17:16:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 17:16:26.282224304 +0000 UTC m=+9281.449220789" watchObservedRunningTime="2025-11-23 17:16:26.352051385 +0000 UTC m=+9281.519047870" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.378319 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48851bd1-4829-4f8d-ae93-a6cb3266ce68-kube-api-access-q9mkv" (OuterVolumeSpecName: "kube-api-access-q9mkv") pod "48851bd1-4829-4f8d-ae93-a6cb3266ce68" (UID: "48851bd1-4829-4f8d-ae93-a6cb3266ce68"). InnerVolumeSpecName "kube-api-access-q9mkv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.379978 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8098c44a-b797-4fba-8d0d-2138ca9b367a-kube-api-access-hpjkr" (OuterVolumeSpecName: "kube-api-access-hpjkr") pod "8098c44a-b797-4fba-8d0d-2138ca9b367a" (UID: "8098c44a-b797-4fba-8d0d-2138ca9b367a"). InnerVolumeSpecName "kube-api-access-hpjkr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.387580 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8098c44a-b797-4fba-8d0d-2138ca9b367a-config-data" (OuterVolumeSpecName: "config-data") pod "8098c44a-b797-4fba-8d0d-2138ca9b367a" (UID: "8098c44a-b797-4fba-8d0d-2138ca9b367a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.391291 5050 scope.go:117] "RemoveContainer" containerID="212e5898b8ac9c7400429c63b5c0b7573e62e87f8d170f83efe286d8cbd392fe" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.398844 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48851bd1-4829-4f8d-ae93-a6cb3266ce68-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "48851bd1-4829-4f8d-ae93-a6cb3266ce68" (UID: "48851bd1-4829-4f8d-ae93-a6cb3266ce68"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.439341 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.447578 5050 scope.go:117] "RemoveContainer" containerID="dcc58da0d08a430d214f6e7965bf67d82bfee9eb6a89de2efe49a48485c28c40" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.451922 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8098c44a-b797-4fba-8d0d-2138ca9b367a-logs\") on node \"crc\" DevicePath \"\"" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.452030 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48851bd1-4829-4f8d-ae93-a6cb3266ce68-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.452047 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8098c44a-b797-4fba-8d0d-2138ca9b367a-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.452058 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q9mkv\" (UniqueName: \"kubernetes.io/projected/48851bd1-4829-4f8d-ae93-a6cb3266ce68-kube-api-access-q9mkv\") on node \"crc\" DevicePath \"\"" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.452070 5050 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/48851bd1-4829-4f8d-ae93-a6cb3266ce68-logs\") on node \"crc\" DevicePath \"\"" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.452085 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hpjkr\" (UniqueName: \"kubernetes.io/projected/8098c44a-b797-4fba-8d0d-2138ca9b367a-kube-api-access-hpjkr\") on node \"crc\" DevicePath \"\"" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.454176 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48851bd1-4829-4f8d-ae93-a6cb3266ce68-config-data" (OuterVolumeSpecName: "config-data") pod "48851bd1-4829-4f8d-ae93-a6cb3266ce68" (UID: "48851bd1-4829-4f8d-ae93-a6cb3266ce68"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.455891 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 23 17:16:26 crc kubenswrapper[5050]: E1123 17:16:26.459470 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8098c44a-b797-4fba-8d0d-2138ca9b367a" containerName="nova-api-api" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.459505 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="8098c44a-b797-4fba-8d0d-2138ca9b367a" containerName="nova-api-api" Nov 23 17:16:26 crc kubenswrapper[5050]: E1123 17:16:26.459559 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddc00b7c-3525-45d6-863f-20e9e26ff02f" containerName="nova-scheduler-scheduler" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.459882 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddc00b7c-3525-45d6-863f-20e9e26ff02f" containerName="nova-scheduler-scheduler" Nov 23 17:16:26 crc kubenswrapper[5050]: E1123 17:16:26.459908 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48851bd1-4829-4f8d-ae93-a6cb3266ce68" containerName="nova-metadata-log" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.459915 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="48851bd1-4829-4f8d-ae93-a6cb3266ce68" containerName="nova-metadata-log" Nov 23 17:16:26 crc kubenswrapper[5050]: E1123 17:16:26.459959 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48851bd1-4829-4f8d-ae93-a6cb3266ce68" containerName="nova-metadata-metadata" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.459965 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="48851bd1-4829-4f8d-ae93-a6cb3266ce68" containerName="nova-metadata-metadata" Nov 23 17:16:26 crc kubenswrapper[5050]: E1123 17:16:26.459997 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="183ebe7a-5aa3-4a3f-8392-bbd7e3433b15" containerName="nova-cell1-conductor-conductor" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.460004 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="183ebe7a-5aa3-4a3f-8392-bbd7e3433b15" containerName="nova-cell1-conductor-conductor" Nov 23 17:16:26 crc kubenswrapper[5050]: E1123 17:16:26.460025 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8098c44a-b797-4fba-8d0d-2138ca9b367a" containerName="nova-api-log" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.460037 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="8098c44a-b797-4fba-8d0d-2138ca9b367a" containerName="nova-api-log" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.459650 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8098c44a-b797-4fba-8d0d-2138ca9b367a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8098c44a-b797-4fba-8d0d-2138ca9b367a" (UID: "8098c44a-b797-4fba-8d0d-2138ca9b367a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.461613 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="183ebe7a-5aa3-4a3f-8392-bbd7e3433b15" containerName="nova-cell1-conductor-conductor" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.461648 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="48851bd1-4829-4f8d-ae93-a6cb3266ce68" containerName="nova-metadata-log" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.461672 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="8098c44a-b797-4fba-8d0d-2138ca9b367a" containerName="nova-api-log" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.461722 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="48851bd1-4829-4f8d-ae93-a6cb3266ce68" containerName="nova-metadata-metadata" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.461742 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="8098c44a-b797-4fba-8d0d-2138ca9b367a" containerName="nova-api-api" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.461762 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddc00b7c-3525-45d6-863f-20e9e26ff02f" containerName="nova-scheduler-scheduler" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.472245 5050 scope.go:117] "RemoveContainer" containerID="212e5898b8ac9c7400429c63b5c0b7573e62e87f8d170f83efe286d8cbd392fe" Nov 23 17:16:26 crc kubenswrapper[5050]: E1123 17:16:26.472902 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"212e5898b8ac9c7400429c63b5c0b7573e62e87f8d170f83efe286d8cbd392fe\": container with ID starting with 212e5898b8ac9c7400429c63b5c0b7573e62e87f8d170f83efe286d8cbd392fe not found: ID does not exist" containerID="212e5898b8ac9c7400429c63b5c0b7573e62e87f8d170f83efe286d8cbd392fe" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.472946 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"212e5898b8ac9c7400429c63b5c0b7573e62e87f8d170f83efe286d8cbd392fe"} err="failed to get container status \"212e5898b8ac9c7400429c63b5c0b7573e62e87f8d170f83efe286d8cbd392fe\": rpc error: code = NotFound desc = could not find container \"212e5898b8ac9c7400429c63b5c0b7573e62e87f8d170f83efe286d8cbd392fe\": container with ID starting with 212e5898b8ac9c7400429c63b5c0b7573e62e87f8d170f83efe286d8cbd392fe not found: ID does not exist" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.472977 5050 scope.go:117] "RemoveContainer" containerID="dcc58da0d08a430d214f6e7965bf67d82bfee9eb6a89de2efe49a48485c28c40" Nov 23 17:16:26 crc kubenswrapper[5050]: E1123 17:16:26.476557 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dcc58da0d08a430d214f6e7965bf67d82bfee9eb6a89de2efe49a48485c28c40\": container with ID starting with dcc58da0d08a430d214f6e7965bf67d82bfee9eb6a89de2efe49a48485c28c40 not found: ID does not exist" containerID="dcc58da0d08a430d214f6e7965bf67d82bfee9eb6a89de2efe49a48485c28c40" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.476592 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dcc58da0d08a430d214f6e7965bf67d82bfee9eb6a89de2efe49a48485c28c40"} err="failed to get container status \"dcc58da0d08a430d214f6e7965bf67d82bfee9eb6a89de2efe49a48485c28c40\": rpc error: code = NotFound desc = could not find container \"dcc58da0d08a430d214f6e7965bf67d82bfee9eb6a89de2efe49a48485c28c40\": container with ID starting with dcc58da0d08a430d214f6e7965bf67d82bfee9eb6a89de2efe49a48485c28c40 not found: ID does not exist" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.527342 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.527498 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.537850 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.565615 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ddc00b7c-3525-45d6-863f-20e9e26ff02f-config-data\") pod \"ddc00b7c-3525-45d6-863f-20e9e26ff02f\" (UID: \"ddc00b7c-3525-45d6-863f-20e9e26ff02f\") " Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.566159 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddc00b7c-3525-45d6-863f-20e9e26ff02f-combined-ca-bundle\") pod \"ddc00b7c-3525-45d6-863f-20e9e26ff02f\" (UID: \"ddc00b7c-3525-45d6-863f-20e9e26ff02f\") " Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.566361 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4n5vd\" (UniqueName: \"kubernetes.io/projected/ddc00b7c-3525-45d6-863f-20e9e26ff02f-kube-api-access-4n5vd\") pod \"ddc00b7c-3525-45d6-863f-20e9e26ff02f\" (UID: \"ddc00b7c-3525-45d6-863f-20e9e26ff02f\") " Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.567056 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48851bd1-4829-4f8d-ae93-a6cb3266ce68-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.567081 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8098c44a-b797-4fba-8d0d-2138ca9b367a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.573981 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ddc00b7c-3525-45d6-863f-20e9e26ff02f-kube-api-access-4n5vd" (OuterVolumeSpecName: "kube-api-access-4n5vd") pod "ddc00b7c-3525-45d6-863f-20e9e26ff02f" (UID: "ddc00b7c-3525-45d6-863f-20e9e26ff02f"). InnerVolumeSpecName "kube-api-access-4n5vd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.592670 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.609857 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.638333 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddc00b7c-3525-45d6-863f-20e9e26ff02f-config-data" (OuterVolumeSpecName: "config-data") pod "ddc00b7c-3525-45d6-863f-20e9e26ff02f" (UID: "ddc00b7c-3525-45d6-863f-20e9e26ff02f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.647736 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddc00b7c-3525-45d6-863f-20e9e26ff02f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ddc00b7c-3525-45d6-863f-20e9e26ff02f" (UID: "ddc00b7c-3525-45d6-863f-20e9e26ff02f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.653937 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.656120 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.668112 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.669659 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.677699 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lbjq5\" (UniqueName: \"kubernetes.io/projected/6563762b-f9aa-4af7-a549-2f0d767f3b46-kube-api-access-lbjq5\") pod \"nova-cell1-conductor-0\" (UID: \"6563762b-f9aa-4af7-a549-2f0d767f3b46\") " pod="openstack/nova-cell1-conductor-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.678498 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6563762b-f9aa-4af7-a549-2f0d767f3b46-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"6563762b-f9aa-4af7-a549-2f0d767f3b46\") " pod="openstack/nova-cell1-conductor-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.678673 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6563762b-f9aa-4af7-a549-2f0d767f3b46-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"6563762b-f9aa-4af7-a549-2f0d767f3b46\") " pod="openstack/nova-cell1-conductor-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.678976 5050 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddc00b7c-3525-45d6-863f-20e9e26ff02f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.678992 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4n5vd\" (UniqueName: \"kubernetes.io/projected/ddc00b7c-3525-45d6-863f-20e9e26ff02f-kube-api-access-4n5vd\") on node \"crc\" DevicePath \"\"" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.679006 5050 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ddc00b7c-3525-45d6-863f-20e9e26ff02f-config-data\") on node \"crc\" DevicePath \"\"" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.784612 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lbjq5\" (UniqueName: \"kubernetes.io/projected/6563762b-f9aa-4af7-a549-2f0d767f3b46-kube-api-access-lbjq5\") pod \"nova-cell1-conductor-0\" (UID: \"6563762b-f9aa-4af7-a549-2f0d767f3b46\") " pod="openstack/nova-cell1-conductor-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.784720 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdmhz\" (UniqueName: \"kubernetes.io/projected/fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91-kube-api-access-mdmhz\") pod \"nova-metadata-0\" (UID: \"fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91\") " pod="openstack/nova-metadata-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.784802 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91-config-data\") pod \"nova-metadata-0\" (UID: \"fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91\") " pod="openstack/nova-metadata-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.787389 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6563762b-f9aa-4af7-a549-2f0d767f3b46-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"6563762b-f9aa-4af7-a549-2f0d767f3b46\") " pod="openstack/nova-cell1-conductor-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.787736 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91-logs\") pod \"nova-metadata-0\" (UID: \"fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91\") " pod="openstack/nova-metadata-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.787764 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91\") " pod="openstack/nova-metadata-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.787908 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6563762b-f9aa-4af7-a549-2f0d767f3b46-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"6563762b-f9aa-4af7-a549-2f0d767f3b46\") " pod="openstack/nova-cell1-conductor-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.793613 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6563762b-f9aa-4af7-a549-2f0d767f3b46-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"6563762b-f9aa-4af7-a549-2f0d767f3b46\") " pod="openstack/nova-cell1-conductor-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.794794 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6563762b-f9aa-4af7-a549-2f0d767f3b46-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"6563762b-f9aa-4af7-a549-2f0d767f3b46\") " pod="openstack/nova-cell1-conductor-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.806058 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lbjq5\" (UniqueName: \"kubernetes.io/projected/6563762b-f9aa-4af7-a549-2f0d767f3b46-kube-api-access-lbjq5\") pod \"nova-cell1-conductor-0\" (UID: \"6563762b-f9aa-4af7-a549-2f0d767f3b46\") " pod="openstack/nova-cell1-conductor-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.867636 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.893199 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91-logs\") pod \"nova-metadata-0\" (UID: \"fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91\") " pod="openstack/nova-metadata-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.893259 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91\") " pod="openstack/nova-metadata-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.893387 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdmhz\" (UniqueName: \"kubernetes.io/projected/fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91-kube-api-access-mdmhz\") pod \"nova-metadata-0\" (UID: \"fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91\") " pod="openstack/nova-metadata-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.893454 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91-config-data\") pod \"nova-metadata-0\" (UID: \"fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91\") " pod="openstack/nova-metadata-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.893778 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91-logs\") pod \"nova-metadata-0\" (UID: \"fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91\") " pod="openstack/nova-metadata-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.898191 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91-config-data\") pod \"nova-metadata-0\" (UID: \"fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91\") " pod="openstack/nova-metadata-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.899266 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91\") " pod="openstack/nova-metadata-0" Nov 23 17:16:26 crc kubenswrapper[5050]: I1123 17:16:26.915381 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdmhz\" (UniqueName: \"kubernetes.io/projected/fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91-kube-api-access-mdmhz\") pod \"nova-metadata-0\" (UID: \"fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91\") " pod="openstack/nova-metadata-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.051973 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.270350 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ddc00b7c-3525-45d6-863f-20e9e26ff02f","Type":"ContainerDied","Data":"20e00326165ce74029db632578475beab87e75703a79c2b8956f30427ba044b9"} Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.270405 5050 scope.go:117] "RemoveContainer" containerID="4ede08fe48e6b2bbb659c27fe3fc3562174453c3e85d985533d4a88c6f997545" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.270589 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.280098 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.341992 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.365797 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.392467 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.418882 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.430887 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.432754 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.435190 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.444028 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.450632 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.452853 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.461230 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.475307 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.489007 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 23 17:16:27 crc kubenswrapper[5050]: W1123 17:16:27.553631 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfd0c3ccb_28f2_47e9_8c4b_bace00fc0b91.slice/crio-a03b1a84f7fa79f74265e3d9d8209d4854307510e743ab163442b0eca2c91741 WatchSource:0}: Error finding container a03b1a84f7fa79f74265e3d9d8209d4854307510e743ab163442b0eca2c91741: Status 404 returned error can't find the container with id a03b1a84f7fa79f74265e3d9d8209d4854307510e743ab163442b0eca2c91741 Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.560951 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="183ebe7a-5aa3-4a3f-8392-bbd7e3433b15" path="/var/lib/kubelet/pods/183ebe7a-5aa3-4a3f-8392-bbd7e3433b15/volumes" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.561650 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="48851bd1-4829-4f8d-ae93-a6cb3266ce68" path="/var/lib/kubelet/pods/48851bd1-4829-4f8d-ae93-a6cb3266ce68/volumes" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.563065 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8098c44a-b797-4fba-8d0d-2138ca9b367a" path="/var/lib/kubelet/pods/8098c44a-b797-4fba-8d0d-2138ca9b367a/volumes" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.564579 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ddc00b7c-3525-45d6-863f-20e9e26ff02f" path="/var/lib/kubelet/pods/ddc00b7c-3525-45d6-863f-20e9e26ff02f/volumes" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.565371 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.611744 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cxtpb\" (UniqueName: \"kubernetes.io/projected/20e24247-59dd-42ad-ab7e-7347775043ad-kube-api-access-cxtpb\") pod \"nova-scheduler-0\" (UID: \"20e24247-59dd-42ad-ab7e-7347775043ad\") " pod="openstack/nova-scheduler-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.611851 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/794b257f-ba4a-49d5-ab39-1137fbea011f-config-data\") pod \"nova-api-0\" (UID: \"794b257f-ba4a-49d5-ab39-1137fbea011f\") " pod="openstack/nova-api-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.611891 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20e24247-59dd-42ad-ab7e-7347775043ad-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"20e24247-59dd-42ad-ab7e-7347775043ad\") " pod="openstack/nova-scheduler-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.611950 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lx6k2\" (UniqueName: \"kubernetes.io/projected/794b257f-ba4a-49d5-ab39-1137fbea011f-kube-api-access-lx6k2\") pod \"nova-api-0\" (UID: \"794b257f-ba4a-49d5-ab39-1137fbea011f\") " pod="openstack/nova-api-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.612040 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20e24247-59dd-42ad-ab7e-7347775043ad-config-data\") pod \"nova-scheduler-0\" (UID: \"20e24247-59dd-42ad-ab7e-7347775043ad\") " pod="openstack/nova-scheduler-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.612097 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/794b257f-ba4a-49d5-ab39-1137fbea011f-logs\") pod \"nova-api-0\" (UID: \"794b257f-ba4a-49d5-ab39-1137fbea011f\") " pod="openstack/nova-api-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.612132 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/794b257f-ba4a-49d5-ab39-1137fbea011f-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"794b257f-ba4a-49d5-ab39-1137fbea011f\") " pod="openstack/nova-api-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.714178 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/794b257f-ba4a-49d5-ab39-1137fbea011f-logs\") pod \"nova-api-0\" (UID: \"794b257f-ba4a-49d5-ab39-1137fbea011f\") " pod="openstack/nova-api-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.714249 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/794b257f-ba4a-49d5-ab39-1137fbea011f-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"794b257f-ba4a-49d5-ab39-1137fbea011f\") " pod="openstack/nova-api-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.714302 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cxtpb\" (UniqueName: \"kubernetes.io/projected/20e24247-59dd-42ad-ab7e-7347775043ad-kube-api-access-cxtpb\") pod \"nova-scheduler-0\" (UID: \"20e24247-59dd-42ad-ab7e-7347775043ad\") " pod="openstack/nova-scheduler-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.714371 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/794b257f-ba4a-49d5-ab39-1137fbea011f-config-data\") pod \"nova-api-0\" (UID: \"794b257f-ba4a-49d5-ab39-1137fbea011f\") " pod="openstack/nova-api-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.714397 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20e24247-59dd-42ad-ab7e-7347775043ad-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"20e24247-59dd-42ad-ab7e-7347775043ad\") " pod="openstack/nova-scheduler-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.714462 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lx6k2\" (UniqueName: \"kubernetes.io/projected/794b257f-ba4a-49d5-ab39-1137fbea011f-kube-api-access-lx6k2\") pod \"nova-api-0\" (UID: \"794b257f-ba4a-49d5-ab39-1137fbea011f\") " pod="openstack/nova-api-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.714497 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20e24247-59dd-42ad-ab7e-7347775043ad-config-data\") pod \"nova-scheduler-0\" (UID: \"20e24247-59dd-42ad-ab7e-7347775043ad\") " pod="openstack/nova-scheduler-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.715431 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/794b257f-ba4a-49d5-ab39-1137fbea011f-logs\") pod \"nova-api-0\" (UID: \"794b257f-ba4a-49d5-ab39-1137fbea011f\") " pod="openstack/nova-api-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.719915 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/794b257f-ba4a-49d5-ab39-1137fbea011f-config-data\") pod \"nova-api-0\" (UID: \"794b257f-ba4a-49d5-ab39-1137fbea011f\") " pod="openstack/nova-api-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.721155 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20e24247-59dd-42ad-ab7e-7347775043ad-config-data\") pod \"nova-scheduler-0\" (UID: \"20e24247-59dd-42ad-ab7e-7347775043ad\") " pod="openstack/nova-scheduler-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.721411 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20e24247-59dd-42ad-ab7e-7347775043ad-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"20e24247-59dd-42ad-ab7e-7347775043ad\") " pod="openstack/nova-scheduler-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.722159 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/794b257f-ba4a-49d5-ab39-1137fbea011f-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"794b257f-ba4a-49d5-ab39-1137fbea011f\") " pod="openstack/nova-api-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.736679 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lx6k2\" (UniqueName: \"kubernetes.io/projected/794b257f-ba4a-49d5-ab39-1137fbea011f-kube-api-access-lx6k2\") pod \"nova-api-0\" (UID: \"794b257f-ba4a-49d5-ab39-1137fbea011f\") " pod="openstack/nova-api-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.740267 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cxtpb\" (UniqueName: \"kubernetes.io/projected/20e24247-59dd-42ad-ab7e-7347775043ad-kube-api-access-cxtpb\") pod \"nova-scheduler-0\" (UID: \"20e24247-59dd-42ad-ab7e-7347775043ad\") " pod="openstack/nova-scheduler-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.770671 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 23 17:16:27 crc kubenswrapper[5050]: I1123 17:16:27.782846 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 23 17:16:28 crc kubenswrapper[5050]: I1123 17:16:28.313703 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91","Type":"ContainerStarted","Data":"7566277b71226da8940d485abcfaeaf4ddfd1177cc62899309401cc5d0bf6621"} Nov 23 17:16:28 crc kubenswrapper[5050]: I1123 17:16:28.313977 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91","Type":"ContainerStarted","Data":"89d02948de68d80e027dd1087e2b8dcf4e8ce317ffffaab7d33719bcc5c3c86c"} Nov 23 17:16:28 crc kubenswrapper[5050]: I1123 17:16:28.314000 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91","Type":"ContainerStarted","Data":"a03b1a84f7fa79f74265e3d9d8209d4854307510e743ab163442b0eca2c91741"} Nov 23 17:16:28 crc kubenswrapper[5050]: I1123 17:16:28.318483 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"6563762b-f9aa-4af7-a549-2f0d767f3b46","Type":"ContainerStarted","Data":"49bbed90a1cfce7f53e4cd952db3cec66714bc4f46c456fbff1bf47e7a38182c"} Nov 23 17:16:28 crc kubenswrapper[5050]: I1123 17:16:28.318560 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"6563762b-f9aa-4af7-a549-2f0d767f3b46","Type":"ContainerStarted","Data":"bd1ba16ab46082e8d1773c8107b32e4cdb2372868b86de893d68c324d533272a"} Nov 23 17:16:28 crc kubenswrapper[5050]: I1123 17:16:28.318701 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 23 17:16:28 crc kubenswrapper[5050]: I1123 17:16:28.318778 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 23 17:16:28 crc kubenswrapper[5050]: W1123 17:16:28.334564 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod794b257f_ba4a_49d5_ab39_1137fbea011f.slice/crio-2fa342fbbf5f8bc8bf615dcba2a2320c8c85ba76dea84e9d796a1874f65be8c0 WatchSource:0}: Error finding container 2fa342fbbf5f8bc8bf615dcba2a2320c8c85ba76dea84e9d796a1874f65be8c0: Status 404 returned error can't find the container with id 2fa342fbbf5f8bc8bf615dcba2a2320c8c85ba76dea84e9d796a1874f65be8c0 Nov 23 17:16:28 crc kubenswrapper[5050]: I1123 17:16:28.336711 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.336693878 podStartE2EDuration="2.336693878s" podCreationTimestamp="2025-11-23 17:16:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 17:16:28.335595247 +0000 UTC m=+9283.502591742" watchObservedRunningTime="2025-11-23 17:16:28.336693878 +0000 UTC m=+9283.503690363" Nov 23 17:16:28 crc kubenswrapper[5050]: I1123 17:16:28.361722 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.3616590029999998 podStartE2EDuration="2.361659003s" podCreationTimestamp="2025-11-23 17:16:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 17:16:28.352431163 +0000 UTC m=+9283.519427638" watchObservedRunningTime="2025-11-23 17:16:28.361659003 +0000 UTC m=+9283.528655498" Nov 23 17:16:28 crc kubenswrapper[5050]: I1123 17:16:28.436284 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 23 17:16:29 crc kubenswrapper[5050]: I1123 17:16:29.337200 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"20e24247-59dd-42ad-ab7e-7347775043ad","Type":"ContainerStarted","Data":"0b093f1f1d48295c9db84236fc007608469ce43db0b9be8e359ee7a886c2f1d4"} Nov 23 17:16:29 crc kubenswrapper[5050]: I1123 17:16:29.337614 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"20e24247-59dd-42ad-ab7e-7347775043ad","Type":"ContainerStarted","Data":"0b883b9342f9a05426e4aa7175982e71d40501d72876792d5e23ec9485644407"} Nov 23 17:16:29 crc kubenswrapper[5050]: I1123 17:16:29.343139 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"794b257f-ba4a-49d5-ab39-1137fbea011f","Type":"ContainerStarted","Data":"07e8bd483d11387092980a6d37b95f41c8bbd6e52ea42bca8cd61f591584675b"} Nov 23 17:16:29 crc kubenswrapper[5050]: I1123 17:16:29.343246 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"794b257f-ba4a-49d5-ab39-1137fbea011f","Type":"ContainerStarted","Data":"7e8f2b684104289dcef23be569d0e2236ec6c066a7b985a883205e7a90067457"} Nov 23 17:16:29 crc kubenswrapper[5050]: I1123 17:16:29.343281 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"794b257f-ba4a-49d5-ab39-1137fbea011f","Type":"ContainerStarted","Data":"2fa342fbbf5f8bc8bf615dcba2a2320c8c85ba76dea84e9d796a1874f65be8c0"} Nov 23 17:16:29 crc kubenswrapper[5050]: I1123 17:16:29.375588 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.375523408 podStartE2EDuration="2.375523408s" podCreationTimestamp="2025-11-23 17:16:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 17:16:29.366481253 +0000 UTC m=+9284.533477768" watchObservedRunningTime="2025-11-23 17:16:29.375523408 +0000 UTC m=+9284.542519923" Nov 23 17:16:29 crc kubenswrapper[5050]: I1123 17:16:29.395030 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.394997078 podStartE2EDuration="2.394997078s" podCreationTimestamp="2025-11-23 17:16:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 17:16:29.38832963 +0000 UTC m=+9284.555326155" watchObservedRunningTime="2025-11-23 17:16:29.394997078 +0000 UTC m=+9284.561993593" Nov 23 17:16:32 crc kubenswrapper[5050]: I1123 17:16:32.052632 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 23 17:16:32 crc kubenswrapper[5050]: I1123 17:16:32.053432 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 23 17:16:32 crc kubenswrapper[5050]: I1123 17:16:32.772329 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 23 17:16:34 crc kubenswrapper[5050]: I1123 17:16:34.643697 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 23 17:16:35 crc kubenswrapper[5050]: I1123 17:16:35.560015 5050 scope.go:117] "RemoveContainer" containerID="87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" Nov 23 17:16:35 crc kubenswrapper[5050]: E1123 17:16:35.560794 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:16:36 crc kubenswrapper[5050]: I1123 17:16:36.901896 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 23 17:16:37 crc kubenswrapper[5050]: I1123 17:16:37.052145 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 23 17:16:37 crc kubenswrapper[5050]: I1123 17:16:37.052214 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 23 17:16:37 crc kubenswrapper[5050]: I1123 17:16:37.772329 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 23 17:16:37 crc kubenswrapper[5050]: I1123 17:16:37.784688 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 23 17:16:37 crc kubenswrapper[5050]: I1123 17:16:37.784798 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 23 17:16:37 crc kubenswrapper[5050]: I1123 17:16:37.811164 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 23 17:16:38 crc kubenswrapper[5050]: I1123 17:16:38.093894 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.197:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 23 17:16:38 crc kubenswrapper[5050]: I1123 17:16:38.094001 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.197:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 23 17:16:38 crc kubenswrapper[5050]: I1123 17:16:38.521363 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 23 17:16:38 crc kubenswrapper[5050]: I1123 17:16:38.867911 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="794b257f-ba4a-49d5-ab39-1137fbea011f" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.199:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 23 17:16:38 crc kubenswrapper[5050]: I1123 17:16:38.867927 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="794b257f-ba4a-49d5-ab39-1137fbea011f" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.199:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 23 17:16:47 crc kubenswrapper[5050]: I1123 17:16:47.055813 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 23 17:16:47 crc kubenswrapper[5050]: I1123 17:16:47.056820 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 23 17:16:47 crc kubenswrapper[5050]: I1123 17:16:47.058948 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 23 17:16:47 crc kubenswrapper[5050]: I1123 17:16:47.060369 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 23 17:16:47 crc kubenswrapper[5050]: I1123 17:16:47.549160 5050 scope.go:117] "RemoveContainer" containerID="87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" Nov 23 17:16:47 crc kubenswrapper[5050]: E1123 17:16:47.549951 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:16:47 crc kubenswrapper[5050]: I1123 17:16:47.789765 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 23 17:16:47 crc kubenswrapper[5050]: I1123 17:16:47.790633 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 23 17:16:47 crc kubenswrapper[5050]: I1123 17:16:47.791912 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 23 17:16:47 crc kubenswrapper[5050]: I1123 17:16:47.797049 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 23 17:16:48 crc kubenswrapper[5050]: I1123 17:16:48.636057 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 23 17:16:48 crc kubenswrapper[5050]: I1123 17:16:48.641228 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.797568 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj"] Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.800070 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.802526 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.803077 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-cells-global-config" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.803388 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.803789 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.804083 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.804191 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.804393 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-ckrpf" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.809489 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj"] Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.882147 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cells-global-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.882219 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.882279 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.882312 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-inventory\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.882523 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cells-global-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.882633 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.882807 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.882900 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.883086 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-ssh-key\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.883229 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-ceph\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.883252 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dnmjm\" (UniqueName: \"kubernetes.io/projected/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-kube-api-access-dnmjm\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.984210 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.984286 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.984358 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-ssh-key\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.984407 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-ceph\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.984424 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dnmjm\" (UniqueName: \"kubernetes.io/projected/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-kube-api-access-dnmjm\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.984480 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cells-global-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.984509 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.984551 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.984582 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-inventory\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.984626 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cells-global-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.984660 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.986779 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cells-global-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.987124 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cells-global-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.992869 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.993137 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-ceph\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.993620 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.994024 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-ssh-key\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:49 crc kubenswrapper[5050]: I1123 17:16:49.994236 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:50 crc kubenswrapper[5050]: I1123 17:16:49.995381 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:50 crc kubenswrapper[5050]: I1123 17:16:49.998406 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:50 crc kubenswrapper[5050]: I1123 17:16:50.002717 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-inventory\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:50 crc kubenswrapper[5050]: I1123 17:16:50.007797 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dnmjm\" (UniqueName: \"kubernetes.io/projected/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-kube-api-access-dnmjm\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:50 crc kubenswrapper[5050]: I1123 17:16:50.121679 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:16:50 crc kubenswrapper[5050]: I1123 17:16:50.775426 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj"] Nov 23 17:16:50 crc kubenswrapper[5050]: W1123 17:16:50.786253 5050 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podef9265cb_a8fa_4d93_9cb5_a7a94f62fcdd.slice/crio-466e5e5d8deac463f87527bd75f3bf8f52b4120e10086b03fce0746f689a3c4d WatchSource:0}: Error finding container 466e5e5d8deac463f87527bd75f3bf8f52b4120e10086b03fce0746f689a3c4d: Status 404 returned error can't find the container with id 466e5e5d8deac463f87527bd75f3bf8f52b4120e10086b03fce0746f689a3c4d Nov 23 17:16:50 crc kubenswrapper[5050]: I1123 17:16:50.798329 5050 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 23 17:16:51 crc kubenswrapper[5050]: I1123 17:16:51.680428 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" event={"ID":"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd","Type":"ContainerStarted","Data":"466e5e5d8deac463f87527bd75f3bf8f52b4120e10086b03fce0746f689a3c4d"} Nov 23 17:16:52 crc kubenswrapper[5050]: I1123 17:16:52.697198 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" event={"ID":"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd","Type":"ContainerStarted","Data":"c7be81ef0fcae3ddf0cde77f5bfc68578cbc26552745b96d9c66f73c15f9152e"} Nov 23 17:16:52 crc kubenswrapper[5050]: I1123 17:16:52.731264 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" podStartSLOduration=3.126880018 podStartE2EDuration="3.731234665s" podCreationTimestamp="2025-11-23 17:16:49 +0000 UTC" firstStartedPulling="2025-11-23 17:16:50.797980561 +0000 UTC m=+9305.964977056" lastFinishedPulling="2025-11-23 17:16:51.402335198 +0000 UTC m=+9306.569331703" observedRunningTime="2025-11-23 17:16:52.723041434 +0000 UTC m=+9307.890037929" watchObservedRunningTime="2025-11-23 17:16:52.731234665 +0000 UTC m=+9307.898231160" Nov 23 17:16:58 crc kubenswrapper[5050]: I1123 17:16:58.549304 5050 scope.go:117] "RemoveContainer" containerID="87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" Nov 23 17:16:58 crc kubenswrapper[5050]: E1123 17:16:58.550140 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:17:12 crc kubenswrapper[5050]: I1123 17:17:12.549286 5050 scope.go:117] "RemoveContainer" containerID="87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" Nov 23 17:17:12 crc kubenswrapper[5050]: I1123 17:17:12.986697 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"9236198824a90101986e5e90e6d261e3cd9c806e7f49444e40d6e87a2229720f"} Nov 23 17:19:29 crc kubenswrapper[5050]: I1123 17:19:29.224679 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 17:19:29 crc kubenswrapper[5050]: I1123 17:19:29.227070 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 17:19:46 crc kubenswrapper[5050]: I1123 17:19:46.290326 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-qbgn9"] Nov 23 17:19:46 crc kubenswrapper[5050]: I1123 17:19:46.294311 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qbgn9" Nov 23 17:19:46 crc kubenswrapper[5050]: I1123 17:19:46.339041 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qbgn9"] Nov 23 17:19:46 crc kubenswrapper[5050]: I1123 17:19:46.394243 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38440237-e101-41de-bc9f-d3a5b7985824-catalog-content\") pod \"redhat-operators-qbgn9\" (UID: \"38440237-e101-41de-bc9f-d3a5b7985824\") " pod="openshift-marketplace/redhat-operators-qbgn9" Nov 23 17:19:46 crc kubenswrapper[5050]: I1123 17:19:46.394371 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38440237-e101-41de-bc9f-d3a5b7985824-utilities\") pod \"redhat-operators-qbgn9\" (UID: \"38440237-e101-41de-bc9f-d3a5b7985824\") " pod="openshift-marketplace/redhat-operators-qbgn9" Nov 23 17:19:46 crc kubenswrapper[5050]: I1123 17:19:46.394542 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lr2bt\" (UniqueName: \"kubernetes.io/projected/38440237-e101-41de-bc9f-d3a5b7985824-kube-api-access-lr2bt\") pod \"redhat-operators-qbgn9\" (UID: \"38440237-e101-41de-bc9f-d3a5b7985824\") " pod="openshift-marketplace/redhat-operators-qbgn9" Nov 23 17:19:46 crc kubenswrapper[5050]: I1123 17:19:46.496799 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lr2bt\" (UniqueName: \"kubernetes.io/projected/38440237-e101-41de-bc9f-d3a5b7985824-kube-api-access-lr2bt\") pod \"redhat-operators-qbgn9\" (UID: \"38440237-e101-41de-bc9f-d3a5b7985824\") " pod="openshift-marketplace/redhat-operators-qbgn9" Nov 23 17:19:46 crc kubenswrapper[5050]: I1123 17:19:46.496916 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38440237-e101-41de-bc9f-d3a5b7985824-catalog-content\") pod \"redhat-operators-qbgn9\" (UID: \"38440237-e101-41de-bc9f-d3a5b7985824\") " pod="openshift-marketplace/redhat-operators-qbgn9" Nov 23 17:19:46 crc kubenswrapper[5050]: I1123 17:19:46.496980 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38440237-e101-41de-bc9f-d3a5b7985824-utilities\") pod \"redhat-operators-qbgn9\" (UID: \"38440237-e101-41de-bc9f-d3a5b7985824\") " pod="openshift-marketplace/redhat-operators-qbgn9" Nov 23 17:19:46 crc kubenswrapper[5050]: I1123 17:19:46.497568 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38440237-e101-41de-bc9f-d3a5b7985824-utilities\") pod \"redhat-operators-qbgn9\" (UID: \"38440237-e101-41de-bc9f-d3a5b7985824\") " pod="openshift-marketplace/redhat-operators-qbgn9" Nov 23 17:19:46 crc kubenswrapper[5050]: I1123 17:19:46.498214 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38440237-e101-41de-bc9f-d3a5b7985824-catalog-content\") pod \"redhat-operators-qbgn9\" (UID: \"38440237-e101-41de-bc9f-d3a5b7985824\") " pod="openshift-marketplace/redhat-operators-qbgn9" Nov 23 17:19:46 crc kubenswrapper[5050]: I1123 17:19:46.526376 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lr2bt\" (UniqueName: \"kubernetes.io/projected/38440237-e101-41de-bc9f-d3a5b7985824-kube-api-access-lr2bt\") pod \"redhat-operators-qbgn9\" (UID: \"38440237-e101-41de-bc9f-d3a5b7985824\") " pod="openshift-marketplace/redhat-operators-qbgn9" Nov 23 17:19:46 crc kubenswrapper[5050]: I1123 17:19:46.656092 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qbgn9" Nov 23 17:19:47 crc kubenswrapper[5050]: I1123 17:19:47.222276 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qbgn9"] Nov 23 17:19:47 crc kubenswrapper[5050]: I1123 17:19:47.270387 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qbgn9" event={"ID":"38440237-e101-41de-bc9f-d3a5b7985824","Type":"ContainerStarted","Data":"0a23d26a79672ce7c349242c9ca170650b3f1e565ab2f17992f67d392c650a45"} Nov 23 17:19:48 crc kubenswrapper[5050]: I1123 17:19:48.289024 5050 generic.go:334] "Generic (PLEG): container finished" podID="38440237-e101-41de-bc9f-d3a5b7985824" containerID="2d35920776ddbac29ff663caa3a7cb7926d957681528c6da310893a3b00f9bce" exitCode=0 Nov 23 17:19:48 crc kubenswrapper[5050]: I1123 17:19:48.289638 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qbgn9" event={"ID":"38440237-e101-41de-bc9f-d3a5b7985824","Type":"ContainerDied","Data":"2d35920776ddbac29ff663caa3a7cb7926d957681528c6da310893a3b00f9bce"} Nov 23 17:19:49 crc kubenswrapper[5050]: I1123 17:19:49.304793 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qbgn9" event={"ID":"38440237-e101-41de-bc9f-d3a5b7985824","Type":"ContainerStarted","Data":"3d0e06971fa111745a4f320a4a517f0176e06121481023ed14f814cf85689f88"} Nov 23 17:19:54 crc kubenswrapper[5050]: I1123 17:19:54.388255 5050 generic.go:334] "Generic (PLEG): container finished" podID="38440237-e101-41de-bc9f-d3a5b7985824" containerID="3d0e06971fa111745a4f320a4a517f0176e06121481023ed14f814cf85689f88" exitCode=0 Nov 23 17:19:54 crc kubenswrapper[5050]: I1123 17:19:54.388359 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qbgn9" event={"ID":"38440237-e101-41de-bc9f-d3a5b7985824","Type":"ContainerDied","Data":"3d0e06971fa111745a4f320a4a517f0176e06121481023ed14f814cf85689f88"} Nov 23 17:19:55 crc kubenswrapper[5050]: I1123 17:19:55.407277 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qbgn9" event={"ID":"38440237-e101-41de-bc9f-d3a5b7985824","Type":"ContainerStarted","Data":"ee93efd589268a027874018f28afbd4dc6a27677fcf935c55e4640cb6382fd4f"} Nov 23 17:19:55 crc kubenswrapper[5050]: I1123 17:19:55.437175 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-qbgn9" podStartSLOduration=2.775108441 podStartE2EDuration="9.437150629s" podCreationTimestamp="2025-11-23 17:19:46 +0000 UTC" firstStartedPulling="2025-11-23 17:19:48.296977017 +0000 UTC m=+9483.463973502" lastFinishedPulling="2025-11-23 17:19:54.959019165 +0000 UTC m=+9490.126015690" observedRunningTime="2025-11-23 17:19:55.429872134 +0000 UTC m=+9490.596868649" watchObservedRunningTime="2025-11-23 17:19:55.437150629 +0000 UTC m=+9490.604147114" Nov 23 17:19:56 crc kubenswrapper[5050]: I1123 17:19:56.656204 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-qbgn9" Nov 23 17:19:56 crc kubenswrapper[5050]: I1123 17:19:56.657290 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-qbgn9" Nov 23 17:19:57 crc kubenswrapper[5050]: I1123 17:19:57.729525 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-qbgn9" podUID="38440237-e101-41de-bc9f-d3a5b7985824" containerName="registry-server" probeResult="failure" output=< Nov 23 17:19:57 crc kubenswrapper[5050]: timeout: failed to connect service ":50051" within 1s Nov 23 17:19:57 crc kubenswrapper[5050]: > Nov 23 17:19:59 crc kubenswrapper[5050]: I1123 17:19:59.223978 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 17:19:59 crc kubenswrapper[5050]: I1123 17:19:59.224492 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 17:20:06 crc kubenswrapper[5050]: I1123 17:20:06.728035 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-qbgn9" Nov 23 17:20:06 crc kubenswrapper[5050]: I1123 17:20:06.805604 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-qbgn9" Nov 23 17:20:06 crc kubenswrapper[5050]: I1123 17:20:06.978053 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qbgn9"] Nov 23 17:20:08 crc kubenswrapper[5050]: I1123 17:20:08.628076 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-qbgn9" podUID="38440237-e101-41de-bc9f-d3a5b7985824" containerName="registry-server" containerID="cri-o://ee93efd589268a027874018f28afbd4dc6a27677fcf935c55e4640cb6382fd4f" gracePeriod=2 Nov 23 17:20:09 crc kubenswrapper[5050]: I1123 17:20:09.188191 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qbgn9" Nov 23 17:20:09 crc kubenswrapper[5050]: I1123 17:20:09.286883 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lr2bt\" (UniqueName: \"kubernetes.io/projected/38440237-e101-41de-bc9f-d3a5b7985824-kube-api-access-lr2bt\") pod \"38440237-e101-41de-bc9f-d3a5b7985824\" (UID: \"38440237-e101-41de-bc9f-d3a5b7985824\") " Nov 23 17:20:09 crc kubenswrapper[5050]: I1123 17:20:09.287282 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38440237-e101-41de-bc9f-d3a5b7985824-utilities\") pod \"38440237-e101-41de-bc9f-d3a5b7985824\" (UID: \"38440237-e101-41de-bc9f-d3a5b7985824\") " Nov 23 17:20:09 crc kubenswrapper[5050]: I1123 17:20:09.287321 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38440237-e101-41de-bc9f-d3a5b7985824-catalog-content\") pod \"38440237-e101-41de-bc9f-d3a5b7985824\" (UID: \"38440237-e101-41de-bc9f-d3a5b7985824\") " Nov 23 17:20:09 crc kubenswrapper[5050]: I1123 17:20:09.288267 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38440237-e101-41de-bc9f-d3a5b7985824-utilities" (OuterVolumeSpecName: "utilities") pod "38440237-e101-41de-bc9f-d3a5b7985824" (UID: "38440237-e101-41de-bc9f-d3a5b7985824"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:20:09 crc kubenswrapper[5050]: I1123 17:20:09.296927 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38440237-e101-41de-bc9f-d3a5b7985824-kube-api-access-lr2bt" (OuterVolumeSpecName: "kube-api-access-lr2bt") pod "38440237-e101-41de-bc9f-d3a5b7985824" (UID: "38440237-e101-41de-bc9f-d3a5b7985824"). InnerVolumeSpecName "kube-api-access-lr2bt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:20:09 crc kubenswrapper[5050]: I1123 17:20:09.390225 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38440237-e101-41de-bc9f-d3a5b7985824-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 17:20:09 crc kubenswrapper[5050]: I1123 17:20:09.390262 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lr2bt\" (UniqueName: \"kubernetes.io/projected/38440237-e101-41de-bc9f-d3a5b7985824-kube-api-access-lr2bt\") on node \"crc\" DevicePath \"\"" Nov 23 17:20:09 crc kubenswrapper[5050]: I1123 17:20:09.395410 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38440237-e101-41de-bc9f-d3a5b7985824-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "38440237-e101-41de-bc9f-d3a5b7985824" (UID: "38440237-e101-41de-bc9f-d3a5b7985824"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:20:09 crc kubenswrapper[5050]: I1123 17:20:09.492362 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38440237-e101-41de-bc9f-d3a5b7985824-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 17:20:09 crc kubenswrapper[5050]: I1123 17:20:09.637786 5050 generic.go:334] "Generic (PLEG): container finished" podID="38440237-e101-41de-bc9f-d3a5b7985824" containerID="ee93efd589268a027874018f28afbd4dc6a27677fcf935c55e4640cb6382fd4f" exitCode=0 Nov 23 17:20:09 crc kubenswrapper[5050]: I1123 17:20:09.637966 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qbgn9" event={"ID":"38440237-e101-41de-bc9f-d3a5b7985824","Type":"ContainerDied","Data":"ee93efd589268a027874018f28afbd4dc6a27677fcf935c55e4640cb6382fd4f"} Nov 23 17:20:09 crc kubenswrapper[5050]: I1123 17:20:09.638482 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qbgn9" event={"ID":"38440237-e101-41de-bc9f-d3a5b7985824","Type":"ContainerDied","Data":"0a23d26a79672ce7c349242c9ca170650b3f1e565ab2f17992f67d392c650a45"} Nov 23 17:20:09 crc kubenswrapper[5050]: I1123 17:20:09.638529 5050 scope.go:117] "RemoveContainer" containerID="ee93efd589268a027874018f28afbd4dc6a27677fcf935c55e4640cb6382fd4f" Nov 23 17:20:09 crc kubenswrapper[5050]: I1123 17:20:09.638150 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qbgn9" Nov 23 17:20:09 crc kubenswrapper[5050]: I1123 17:20:09.684696 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qbgn9"] Nov 23 17:20:09 crc kubenswrapper[5050]: I1123 17:20:09.697727 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-qbgn9"] Nov 23 17:20:09 crc kubenswrapper[5050]: I1123 17:20:09.703528 5050 scope.go:117] "RemoveContainer" containerID="3d0e06971fa111745a4f320a4a517f0176e06121481023ed14f814cf85689f88" Nov 23 17:20:09 crc kubenswrapper[5050]: I1123 17:20:09.741539 5050 scope.go:117] "RemoveContainer" containerID="2d35920776ddbac29ff663caa3a7cb7926d957681528c6da310893a3b00f9bce" Nov 23 17:20:09 crc kubenswrapper[5050]: I1123 17:20:09.801902 5050 scope.go:117] "RemoveContainer" containerID="ee93efd589268a027874018f28afbd4dc6a27677fcf935c55e4640cb6382fd4f" Nov 23 17:20:09 crc kubenswrapper[5050]: E1123 17:20:09.802557 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee93efd589268a027874018f28afbd4dc6a27677fcf935c55e4640cb6382fd4f\": container with ID starting with ee93efd589268a027874018f28afbd4dc6a27677fcf935c55e4640cb6382fd4f not found: ID does not exist" containerID="ee93efd589268a027874018f28afbd4dc6a27677fcf935c55e4640cb6382fd4f" Nov 23 17:20:09 crc kubenswrapper[5050]: I1123 17:20:09.802683 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee93efd589268a027874018f28afbd4dc6a27677fcf935c55e4640cb6382fd4f"} err="failed to get container status \"ee93efd589268a027874018f28afbd4dc6a27677fcf935c55e4640cb6382fd4f\": rpc error: code = NotFound desc = could not find container \"ee93efd589268a027874018f28afbd4dc6a27677fcf935c55e4640cb6382fd4f\": container with ID starting with ee93efd589268a027874018f28afbd4dc6a27677fcf935c55e4640cb6382fd4f not found: ID does not exist" Nov 23 17:20:09 crc kubenswrapper[5050]: I1123 17:20:09.802787 5050 scope.go:117] "RemoveContainer" containerID="3d0e06971fa111745a4f320a4a517f0176e06121481023ed14f814cf85689f88" Nov 23 17:20:09 crc kubenswrapper[5050]: E1123 17:20:09.803231 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d0e06971fa111745a4f320a4a517f0176e06121481023ed14f814cf85689f88\": container with ID starting with 3d0e06971fa111745a4f320a4a517f0176e06121481023ed14f814cf85689f88 not found: ID does not exist" containerID="3d0e06971fa111745a4f320a4a517f0176e06121481023ed14f814cf85689f88" Nov 23 17:20:09 crc kubenswrapper[5050]: I1123 17:20:09.803308 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d0e06971fa111745a4f320a4a517f0176e06121481023ed14f814cf85689f88"} err="failed to get container status \"3d0e06971fa111745a4f320a4a517f0176e06121481023ed14f814cf85689f88\": rpc error: code = NotFound desc = could not find container \"3d0e06971fa111745a4f320a4a517f0176e06121481023ed14f814cf85689f88\": container with ID starting with 3d0e06971fa111745a4f320a4a517f0176e06121481023ed14f814cf85689f88 not found: ID does not exist" Nov 23 17:20:09 crc kubenswrapper[5050]: I1123 17:20:09.803335 5050 scope.go:117] "RemoveContainer" containerID="2d35920776ddbac29ff663caa3a7cb7926d957681528c6da310893a3b00f9bce" Nov 23 17:20:09 crc kubenswrapper[5050]: E1123 17:20:09.803706 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d35920776ddbac29ff663caa3a7cb7926d957681528c6da310893a3b00f9bce\": container with ID starting with 2d35920776ddbac29ff663caa3a7cb7926d957681528c6da310893a3b00f9bce not found: ID does not exist" containerID="2d35920776ddbac29ff663caa3a7cb7926d957681528c6da310893a3b00f9bce" Nov 23 17:20:09 crc kubenswrapper[5050]: I1123 17:20:09.803799 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d35920776ddbac29ff663caa3a7cb7926d957681528c6da310893a3b00f9bce"} err="failed to get container status \"2d35920776ddbac29ff663caa3a7cb7926d957681528c6da310893a3b00f9bce\": rpc error: code = NotFound desc = could not find container \"2d35920776ddbac29ff663caa3a7cb7926d957681528c6da310893a3b00f9bce\": container with ID starting with 2d35920776ddbac29ff663caa3a7cb7926d957681528c6da310893a3b00f9bce not found: ID does not exist" Nov 23 17:20:11 crc kubenswrapper[5050]: I1123 17:20:11.586303 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38440237-e101-41de-bc9f-d3a5b7985824" path="/var/lib/kubelet/pods/38440237-e101-41de-bc9f-d3a5b7985824/volumes" Nov 23 17:20:29 crc kubenswrapper[5050]: I1123 17:20:29.224016 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 17:20:29 crc kubenswrapper[5050]: I1123 17:20:29.224996 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 17:20:29 crc kubenswrapper[5050]: I1123 17:20:29.225055 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 17:20:29 crc kubenswrapper[5050]: I1123 17:20:29.226300 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9236198824a90101986e5e90e6d261e3cd9c806e7f49444e40d6e87a2229720f"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 17:20:29 crc kubenswrapper[5050]: I1123 17:20:29.226373 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://9236198824a90101986e5e90e6d261e3cd9c806e7f49444e40d6e87a2229720f" gracePeriod=600 Nov 23 17:20:29 crc kubenswrapper[5050]: E1123 17:20:29.443323 5050 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1d998909_9470_47ef_87e8_b34f0473682f.slice/crio-9236198824a90101986e5e90e6d261e3cd9c806e7f49444e40d6e87a2229720f.scope\": RecentStats: unable to find data in memory cache]" Nov 23 17:20:29 crc kubenswrapper[5050]: I1123 17:20:29.947409 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="9236198824a90101986e5e90e6d261e3cd9c806e7f49444e40d6e87a2229720f" exitCode=0 Nov 23 17:20:29 crc kubenswrapper[5050]: I1123 17:20:29.947574 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"9236198824a90101986e5e90e6d261e3cd9c806e7f49444e40d6e87a2229720f"} Nov 23 17:20:29 crc kubenswrapper[5050]: I1123 17:20:29.947633 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3"} Nov 23 17:20:29 crc kubenswrapper[5050]: I1123 17:20:29.947670 5050 scope.go:117] "RemoveContainer" containerID="87924a967acda0996de5f2211b02af6200dd82f1a857843afe7f7b8fe5bfdcda" Nov 23 17:21:55 crc kubenswrapper[5050]: I1123 17:21:55.592343 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qb45w"] Nov 23 17:21:55 crc kubenswrapper[5050]: E1123 17:21:55.593607 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38440237-e101-41de-bc9f-d3a5b7985824" containerName="extract-utilities" Nov 23 17:21:55 crc kubenswrapper[5050]: I1123 17:21:55.593622 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="38440237-e101-41de-bc9f-d3a5b7985824" containerName="extract-utilities" Nov 23 17:21:55 crc kubenswrapper[5050]: E1123 17:21:55.593664 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38440237-e101-41de-bc9f-d3a5b7985824" containerName="registry-server" Nov 23 17:21:55 crc kubenswrapper[5050]: I1123 17:21:55.593670 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="38440237-e101-41de-bc9f-d3a5b7985824" containerName="registry-server" Nov 23 17:21:55 crc kubenswrapper[5050]: E1123 17:21:55.593692 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38440237-e101-41de-bc9f-d3a5b7985824" containerName="extract-content" Nov 23 17:21:55 crc kubenswrapper[5050]: I1123 17:21:55.593698 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="38440237-e101-41de-bc9f-d3a5b7985824" containerName="extract-content" Nov 23 17:21:55 crc kubenswrapper[5050]: I1123 17:21:55.594039 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="38440237-e101-41de-bc9f-d3a5b7985824" containerName="registry-server" Nov 23 17:21:55 crc kubenswrapper[5050]: I1123 17:21:55.596273 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qb45w" Nov 23 17:21:55 crc kubenswrapper[5050]: I1123 17:21:55.612399 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qb45w"] Nov 23 17:21:55 crc kubenswrapper[5050]: I1123 17:21:55.766025 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ed7ec33-3227-4a3c-962e-2c471b16d3fe-utilities\") pod \"community-operators-qb45w\" (UID: \"8ed7ec33-3227-4a3c-962e-2c471b16d3fe\") " pod="openshift-marketplace/community-operators-qb45w" Nov 23 17:21:55 crc kubenswrapper[5050]: I1123 17:21:55.766106 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ed7ec33-3227-4a3c-962e-2c471b16d3fe-catalog-content\") pod \"community-operators-qb45w\" (UID: \"8ed7ec33-3227-4a3c-962e-2c471b16d3fe\") " pod="openshift-marketplace/community-operators-qb45w" Nov 23 17:21:55 crc kubenswrapper[5050]: I1123 17:21:55.766161 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jrk2c\" (UniqueName: \"kubernetes.io/projected/8ed7ec33-3227-4a3c-962e-2c471b16d3fe-kube-api-access-jrk2c\") pod \"community-operators-qb45w\" (UID: \"8ed7ec33-3227-4a3c-962e-2c471b16d3fe\") " pod="openshift-marketplace/community-operators-qb45w" Nov 23 17:21:55 crc kubenswrapper[5050]: I1123 17:21:55.868327 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ed7ec33-3227-4a3c-962e-2c471b16d3fe-utilities\") pod \"community-operators-qb45w\" (UID: \"8ed7ec33-3227-4a3c-962e-2c471b16d3fe\") " pod="openshift-marketplace/community-operators-qb45w" Nov 23 17:21:55 crc kubenswrapper[5050]: I1123 17:21:55.868813 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ed7ec33-3227-4a3c-962e-2c471b16d3fe-catalog-content\") pod \"community-operators-qb45w\" (UID: \"8ed7ec33-3227-4a3c-962e-2c471b16d3fe\") " pod="openshift-marketplace/community-operators-qb45w" Nov 23 17:21:55 crc kubenswrapper[5050]: I1123 17:21:55.868993 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jrk2c\" (UniqueName: \"kubernetes.io/projected/8ed7ec33-3227-4a3c-962e-2c471b16d3fe-kube-api-access-jrk2c\") pod \"community-operators-qb45w\" (UID: \"8ed7ec33-3227-4a3c-962e-2c471b16d3fe\") " pod="openshift-marketplace/community-operators-qb45w" Nov 23 17:21:55 crc kubenswrapper[5050]: I1123 17:21:55.869030 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ed7ec33-3227-4a3c-962e-2c471b16d3fe-utilities\") pod \"community-operators-qb45w\" (UID: \"8ed7ec33-3227-4a3c-962e-2c471b16d3fe\") " pod="openshift-marketplace/community-operators-qb45w" Nov 23 17:21:55 crc kubenswrapper[5050]: I1123 17:21:55.869419 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ed7ec33-3227-4a3c-962e-2c471b16d3fe-catalog-content\") pod \"community-operators-qb45w\" (UID: \"8ed7ec33-3227-4a3c-962e-2c471b16d3fe\") " pod="openshift-marketplace/community-operators-qb45w" Nov 23 17:21:55 crc kubenswrapper[5050]: I1123 17:21:55.889335 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jrk2c\" (UniqueName: \"kubernetes.io/projected/8ed7ec33-3227-4a3c-962e-2c471b16d3fe-kube-api-access-jrk2c\") pod \"community-operators-qb45w\" (UID: \"8ed7ec33-3227-4a3c-962e-2c471b16d3fe\") " pod="openshift-marketplace/community-operators-qb45w" Nov 23 17:21:55 crc kubenswrapper[5050]: I1123 17:21:55.928809 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qb45w" Nov 23 17:21:56 crc kubenswrapper[5050]: I1123 17:21:56.529243 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qb45w"] Nov 23 17:21:57 crc kubenswrapper[5050]: I1123 17:21:57.316659 5050 generic.go:334] "Generic (PLEG): container finished" podID="8ed7ec33-3227-4a3c-962e-2c471b16d3fe" containerID="971d6096158f7f9f168f02fe49dd641a04f990ffd10d179c161dd8d8d99c4f3a" exitCode=0 Nov 23 17:21:57 crc kubenswrapper[5050]: I1123 17:21:57.316727 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qb45w" event={"ID":"8ed7ec33-3227-4a3c-962e-2c471b16d3fe","Type":"ContainerDied","Data":"971d6096158f7f9f168f02fe49dd641a04f990ffd10d179c161dd8d8d99c4f3a"} Nov 23 17:21:57 crc kubenswrapper[5050]: I1123 17:21:57.317165 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qb45w" event={"ID":"8ed7ec33-3227-4a3c-962e-2c471b16d3fe","Type":"ContainerStarted","Data":"f0ff43f19c01b8bc452cd39d11780d94ece0dd6c5780c8724e30e4b09032c31b"} Nov 23 17:21:57 crc kubenswrapper[5050]: I1123 17:21:57.319718 5050 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 23 17:21:58 crc kubenswrapper[5050]: I1123 17:21:58.332064 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qb45w" event={"ID":"8ed7ec33-3227-4a3c-962e-2c471b16d3fe","Type":"ContainerStarted","Data":"3d7f241b3fe768672f5e6c4cd0ce5fa7f944189d166d0752db204e0afecfe512"} Nov 23 17:21:59 crc kubenswrapper[5050]: I1123 17:21:59.345169 5050 generic.go:334] "Generic (PLEG): container finished" podID="8ed7ec33-3227-4a3c-962e-2c471b16d3fe" containerID="3d7f241b3fe768672f5e6c4cd0ce5fa7f944189d166d0752db204e0afecfe512" exitCode=0 Nov 23 17:21:59 crc kubenswrapper[5050]: I1123 17:21:59.345240 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qb45w" event={"ID":"8ed7ec33-3227-4a3c-962e-2c471b16d3fe","Type":"ContainerDied","Data":"3d7f241b3fe768672f5e6c4cd0ce5fa7f944189d166d0752db204e0afecfe512"} Nov 23 17:22:00 crc kubenswrapper[5050]: I1123 17:22:00.359814 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qb45w" event={"ID":"8ed7ec33-3227-4a3c-962e-2c471b16d3fe","Type":"ContainerStarted","Data":"7b12d57184ffd604183b73dd892f1483c91ce6cd6acbf29f82c5e51ef3ea3329"} Nov 23 17:22:00 crc kubenswrapper[5050]: I1123 17:22:00.380917 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qb45w" podStartSLOduration=2.937890157 podStartE2EDuration="5.380897288s" podCreationTimestamp="2025-11-23 17:21:55 +0000 UTC" firstStartedPulling="2025-11-23 17:21:57.319273557 +0000 UTC m=+9612.486270082" lastFinishedPulling="2025-11-23 17:21:59.762280718 +0000 UTC m=+9614.929277213" observedRunningTime="2025-11-23 17:22:00.37600702 +0000 UTC m=+9615.543003515" watchObservedRunningTime="2025-11-23 17:22:00.380897288 +0000 UTC m=+9615.547893773" Nov 23 17:22:05 crc kubenswrapper[5050]: I1123 17:22:05.929556 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qb45w" Nov 23 17:22:05 crc kubenswrapper[5050]: I1123 17:22:05.930273 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qb45w" Nov 23 17:22:05 crc kubenswrapper[5050]: I1123 17:22:05.985495 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qb45w" Nov 23 17:22:06 crc kubenswrapper[5050]: I1123 17:22:06.534896 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qb45w" Nov 23 17:22:06 crc kubenswrapper[5050]: I1123 17:22:06.609880 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qb45w"] Nov 23 17:22:08 crc kubenswrapper[5050]: I1123 17:22:08.463897 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-qb45w" podUID="8ed7ec33-3227-4a3c-962e-2c471b16d3fe" containerName="registry-server" containerID="cri-o://7b12d57184ffd604183b73dd892f1483c91ce6cd6acbf29f82c5e51ef3ea3329" gracePeriod=2 Nov 23 17:22:09 crc kubenswrapper[5050]: I1123 17:22:09.018120 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qb45w" Nov 23 17:22:09 crc kubenswrapper[5050]: I1123 17:22:09.152336 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ed7ec33-3227-4a3c-962e-2c471b16d3fe-catalog-content\") pod \"8ed7ec33-3227-4a3c-962e-2c471b16d3fe\" (UID: \"8ed7ec33-3227-4a3c-962e-2c471b16d3fe\") " Nov 23 17:22:09 crc kubenswrapper[5050]: I1123 17:22:09.156203 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ed7ec33-3227-4a3c-962e-2c471b16d3fe-utilities\") pod \"8ed7ec33-3227-4a3c-962e-2c471b16d3fe\" (UID: \"8ed7ec33-3227-4a3c-962e-2c471b16d3fe\") " Nov 23 17:22:09 crc kubenswrapper[5050]: I1123 17:22:09.156316 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jrk2c\" (UniqueName: \"kubernetes.io/projected/8ed7ec33-3227-4a3c-962e-2c471b16d3fe-kube-api-access-jrk2c\") pod \"8ed7ec33-3227-4a3c-962e-2c471b16d3fe\" (UID: \"8ed7ec33-3227-4a3c-962e-2c471b16d3fe\") " Nov 23 17:22:09 crc kubenswrapper[5050]: I1123 17:22:09.159810 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ed7ec33-3227-4a3c-962e-2c471b16d3fe-utilities" (OuterVolumeSpecName: "utilities") pod "8ed7ec33-3227-4a3c-962e-2c471b16d3fe" (UID: "8ed7ec33-3227-4a3c-962e-2c471b16d3fe"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:22:09 crc kubenswrapper[5050]: I1123 17:22:09.172171 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ed7ec33-3227-4a3c-962e-2c471b16d3fe-kube-api-access-jrk2c" (OuterVolumeSpecName: "kube-api-access-jrk2c") pod "8ed7ec33-3227-4a3c-962e-2c471b16d3fe" (UID: "8ed7ec33-3227-4a3c-962e-2c471b16d3fe"). InnerVolumeSpecName "kube-api-access-jrk2c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:22:09 crc kubenswrapper[5050]: I1123 17:22:09.261872 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ed7ec33-3227-4a3c-962e-2c471b16d3fe-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 17:22:09 crc kubenswrapper[5050]: I1123 17:22:09.261930 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jrk2c\" (UniqueName: \"kubernetes.io/projected/8ed7ec33-3227-4a3c-962e-2c471b16d3fe-kube-api-access-jrk2c\") on node \"crc\" DevicePath \"\"" Nov 23 17:22:09 crc kubenswrapper[5050]: I1123 17:22:09.290642 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ed7ec33-3227-4a3c-962e-2c471b16d3fe-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8ed7ec33-3227-4a3c-962e-2c471b16d3fe" (UID: "8ed7ec33-3227-4a3c-962e-2c471b16d3fe"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:22:09 crc kubenswrapper[5050]: I1123 17:22:09.364933 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ed7ec33-3227-4a3c-962e-2c471b16d3fe-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 17:22:09 crc kubenswrapper[5050]: I1123 17:22:09.479019 5050 generic.go:334] "Generic (PLEG): container finished" podID="8ed7ec33-3227-4a3c-962e-2c471b16d3fe" containerID="7b12d57184ffd604183b73dd892f1483c91ce6cd6acbf29f82c5e51ef3ea3329" exitCode=0 Nov 23 17:22:09 crc kubenswrapper[5050]: I1123 17:22:09.479098 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qb45w" event={"ID":"8ed7ec33-3227-4a3c-962e-2c471b16d3fe","Type":"ContainerDied","Data":"7b12d57184ffd604183b73dd892f1483c91ce6cd6acbf29f82c5e51ef3ea3329"} Nov 23 17:22:09 crc kubenswrapper[5050]: I1123 17:22:09.479157 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qb45w" event={"ID":"8ed7ec33-3227-4a3c-962e-2c471b16d3fe","Type":"ContainerDied","Data":"f0ff43f19c01b8bc452cd39d11780d94ece0dd6c5780c8724e30e4b09032c31b"} Nov 23 17:22:09 crc kubenswrapper[5050]: I1123 17:22:09.479167 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qb45w" Nov 23 17:22:09 crc kubenswrapper[5050]: I1123 17:22:09.479223 5050 scope.go:117] "RemoveContainer" containerID="7b12d57184ffd604183b73dd892f1483c91ce6cd6acbf29f82c5e51ef3ea3329" Nov 23 17:22:09 crc kubenswrapper[5050]: I1123 17:22:09.528618 5050 scope.go:117] "RemoveContainer" containerID="3d7f241b3fe768672f5e6c4cd0ce5fa7f944189d166d0752db204e0afecfe512" Nov 23 17:22:09 crc kubenswrapper[5050]: I1123 17:22:09.577569 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qb45w"] Nov 23 17:22:09 crc kubenswrapper[5050]: I1123 17:22:09.583173 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-qb45w"] Nov 23 17:22:09 crc kubenswrapper[5050]: I1123 17:22:09.591286 5050 scope.go:117] "RemoveContainer" containerID="971d6096158f7f9f168f02fe49dd641a04f990ffd10d179c161dd8d8d99c4f3a" Nov 23 17:22:09 crc kubenswrapper[5050]: I1123 17:22:09.660676 5050 scope.go:117] "RemoveContainer" containerID="7b12d57184ffd604183b73dd892f1483c91ce6cd6acbf29f82c5e51ef3ea3329" Nov 23 17:22:09 crc kubenswrapper[5050]: E1123 17:22:09.661550 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7b12d57184ffd604183b73dd892f1483c91ce6cd6acbf29f82c5e51ef3ea3329\": container with ID starting with 7b12d57184ffd604183b73dd892f1483c91ce6cd6acbf29f82c5e51ef3ea3329 not found: ID does not exist" containerID="7b12d57184ffd604183b73dd892f1483c91ce6cd6acbf29f82c5e51ef3ea3329" Nov 23 17:22:09 crc kubenswrapper[5050]: I1123 17:22:09.661633 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b12d57184ffd604183b73dd892f1483c91ce6cd6acbf29f82c5e51ef3ea3329"} err="failed to get container status \"7b12d57184ffd604183b73dd892f1483c91ce6cd6acbf29f82c5e51ef3ea3329\": rpc error: code = NotFound desc = could not find container \"7b12d57184ffd604183b73dd892f1483c91ce6cd6acbf29f82c5e51ef3ea3329\": container with ID starting with 7b12d57184ffd604183b73dd892f1483c91ce6cd6acbf29f82c5e51ef3ea3329 not found: ID does not exist" Nov 23 17:22:09 crc kubenswrapper[5050]: I1123 17:22:09.661676 5050 scope.go:117] "RemoveContainer" containerID="3d7f241b3fe768672f5e6c4cd0ce5fa7f944189d166d0752db204e0afecfe512" Nov 23 17:22:09 crc kubenswrapper[5050]: E1123 17:22:09.662250 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d7f241b3fe768672f5e6c4cd0ce5fa7f944189d166d0752db204e0afecfe512\": container with ID starting with 3d7f241b3fe768672f5e6c4cd0ce5fa7f944189d166d0752db204e0afecfe512 not found: ID does not exist" containerID="3d7f241b3fe768672f5e6c4cd0ce5fa7f944189d166d0752db204e0afecfe512" Nov 23 17:22:09 crc kubenswrapper[5050]: I1123 17:22:09.662336 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d7f241b3fe768672f5e6c4cd0ce5fa7f944189d166d0752db204e0afecfe512"} err="failed to get container status \"3d7f241b3fe768672f5e6c4cd0ce5fa7f944189d166d0752db204e0afecfe512\": rpc error: code = NotFound desc = could not find container \"3d7f241b3fe768672f5e6c4cd0ce5fa7f944189d166d0752db204e0afecfe512\": container with ID starting with 3d7f241b3fe768672f5e6c4cd0ce5fa7f944189d166d0752db204e0afecfe512 not found: ID does not exist" Nov 23 17:22:09 crc kubenswrapper[5050]: I1123 17:22:09.662478 5050 scope.go:117] "RemoveContainer" containerID="971d6096158f7f9f168f02fe49dd641a04f990ffd10d179c161dd8d8d99c4f3a" Nov 23 17:22:09 crc kubenswrapper[5050]: E1123 17:22:09.665117 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"971d6096158f7f9f168f02fe49dd641a04f990ffd10d179c161dd8d8d99c4f3a\": container with ID starting with 971d6096158f7f9f168f02fe49dd641a04f990ffd10d179c161dd8d8d99c4f3a not found: ID does not exist" containerID="971d6096158f7f9f168f02fe49dd641a04f990ffd10d179c161dd8d8d99c4f3a" Nov 23 17:22:09 crc kubenswrapper[5050]: I1123 17:22:09.665193 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"971d6096158f7f9f168f02fe49dd641a04f990ffd10d179c161dd8d8d99c4f3a"} err="failed to get container status \"971d6096158f7f9f168f02fe49dd641a04f990ffd10d179c161dd8d8d99c4f3a\": rpc error: code = NotFound desc = could not find container \"971d6096158f7f9f168f02fe49dd641a04f990ffd10d179c161dd8d8d99c4f3a\": container with ID starting with 971d6096158f7f9f168f02fe49dd641a04f990ffd10d179c161dd8d8d99c4f3a not found: ID does not exist" Nov 23 17:22:11 crc kubenswrapper[5050]: I1123 17:22:11.564676 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ed7ec33-3227-4a3c-962e-2c471b16d3fe" path="/var/lib/kubelet/pods/8ed7ec33-3227-4a3c-962e-2c471b16d3fe/volumes" Nov 23 17:22:29 crc kubenswrapper[5050]: I1123 17:22:29.225044 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 17:22:29 crc kubenswrapper[5050]: I1123 17:22:29.225978 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 17:22:59 crc kubenswrapper[5050]: I1123 17:22:59.225191 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 17:22:59 crc kubenswrapper[5050]: I1123 17:22:59.226083 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 17:23:29 crc kubenswrapper[5050]: I1123 17:23:29.224939 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 17:23:29 crc kubenswrapper[5050]: I1123 17:23:29.225892 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 17:23:29 crc kubenswrapper[5050]: I1123 17:23:29.225987 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 17:23:29 crc kubenswrapper[5050]: I1123 17:23:29.227619 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 17:23:29 crc kubenswrapper[5050]: I1123 17:23:29.227968 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3" gracePeriod=600 Nov 23 17:23:29 crc kubenswrapper[5050]: E1123 17:23:29.368056 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:23:29 crc kubenswrapper[5050]: I1123 17:23:29.702768 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3" exitCode=0 Nov 23 17:23:29 crc kubenswrapper[5050]: I1123 17:23:29.702839 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3"} Nov 23 17:23:29 crc kubenswrapper[5050]: I1123 17:23:29.702898 5050 scope.go:117] "RemoveContainer" containerID="9236198824a90101986e5e90e6d261e3cd9c806e7f49444e40d6e87a2229720f" Nov 23 17:23:29 crc kubenswrapper[5050]: I1123 17:23:29.704223 5050 scope.go:117] "RemoveContainer" containerID="2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3" Nov 23 17:23:29 crc kubenswrapper[5050]: E1123 17:23:29.704808 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:23:40 crc kubenswrapper[5050]: I1123 17:23:40.765940 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511" containerName="galera" probeResult="failure" output="command timed out" Nov 23 17:23:40 crc kubenswrapper[5050]: I1123 17:23:40.769229 5050 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511" containerName="galera" probeResult="failure" output="command timed out" Nov 23 17:23:43 crc kubenswrapper[5050]: I1123 17:23:43.551691 5050 scope.go:117] "RemoveContainer" containerID="2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3" Nov 23 17:23:43 crc kubenswrapper[5050]: E1123 17:23:43.552958 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:23:57 crc kubenswrapper[5050]: I1123 17:23:57.549733 5050 scope.go:117] "RemoveContainer" containerID="2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3" Nov 23 17:23:57 crc kubenswrapper[5050]: E1123 17:23:57.551248 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:24:11 crc kubenswrapper[5050]: I1123 17:24:11.549401 5050 scope.go:117] "RemoveContainer" containerID="2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3" Nov 23 17:24:11 crc kubenswrapper[5050]: E1123 17:24:11.550510 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:24:23 crc kubenswrapper[5050]: I1123 17:24:23.549866 5050 scope.go:117] "RemoveContainer" containerID="2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3" Nov 23 17:24:23 crc kubenswrapper[5050]: E1123 17:24:23.554647 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:24:24 crc kubenswrapper[5050]: I1123 17:24:24.486978 5050 generic.go:334] "Generic (PLEG): container finished" podID="ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd" containerID="c7be81ef0fcae3ddf0cde77f5bfc68578cbc26552745b96d9c66f73c15f9152e" exitCode=0 Nov 23 17:24:24 crc kubenswrapper[5050]: I1123 17:24:24.487029 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" event={"ID":"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd","Type":"ContainerDied","Data":"c7be81ef0fcae3ddf0cde77f5bfc68578cbc26552745b96d9c66f73c15f9152e"} Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.208552 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.292496 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-migration-ssh-key-1\") pod \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.292590 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cell1-combined-ca-bundle\") pod \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.292749 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-inventory\") pod \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.292810 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-ssh-key\") pod \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.292835 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-migration-ssh-key-0\") pod \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.293023 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cells-global-config-1\") pod \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.293146 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cells-global-config-0\") pod \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.293210 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-ceph\") pod \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.293257 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cell1-compute-config-0\") pod \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.293345 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dnmjm\" (UniqueName: \"kubernetes.io/projected/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-kube-api-access-dnmjm\") pod \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.293416 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cell1-compute-config-1\") pod \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\" (UID: \"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd\") " Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.306810 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-ceph" (OuterVolumeSpecName: "ceph") pod "ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd" (UID: "ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.306899 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-kube-api-access-dnmjm" (OuterVolumeSpecName: "kube-api-access-dnmjm") pod "ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd" (UID: "ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd"). InnerVolumeSpecName "kube-api-access-dnmjm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.314881 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cell1-combined-ca-bundle" (OuterVolumeSpecName: "nova-cell1-combined-ca-bundle") pod "ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd" (UID: "ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd"). InnerVolumeSpecName "nova-cell1-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.342653 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd" (UID: "ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.343613 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd" (UID: "ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.359512 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd" (UID: "ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.362761 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd" (UID: "ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.368166 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-inventory" (OuterVolumeSpecName: "inventory") pod "ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd" (UID: "ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.368965 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cells-global-config-0" (OuterVolumeSpecName: "nova-cells-global-config-0") pod "ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd" (UID: "ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd"). InnerVolumeSpecName "nova-cells-global-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.373424 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cells-global-config-1" (OuterVolumeSpecName: "nova-cells-global-config-1") pod "ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd" (UID: "ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd"). InnerVolumeSpecName "nova-cells-global-config-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.389768 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd" (UID: "ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.401334 5050 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.401374 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dnmjm\" (UniqueName: \"kubernetes.io/projected/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-kube-api-access-dnmjm\") on node \"crc\" DevicePath \"\"" Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.401385 5050 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.401394 5050 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.401405 5050 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cell1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.401418 5050 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-inventory\") on node \"crc\" DevicePath \"\"" Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.401428 5050 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.401437 5050 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.401460 5050 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cells-global-config-1\") on node \"crc\" DevicePath \"\"" Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.401472 5050 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-nova-cells-global-config-0\") on node \"crc\" DevicePath \"\"" Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.401485 5050 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd-ceph\") on node \"crc\" DevicePath \"\"" Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.525981 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" event={"ID":"ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd","Type":"ContainerDied","Data":"466e5e5d8deac463f87527bd75f3bf8f52b4120e10086b03fce0746f689a3c4d"} Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.526041 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="466e5e5d8deac463f87527bd75f3bf8f52b4120e10086b03fce0746f689a3c4d" Nov 23 17:24:27 crc kubenswrapper[5050]: I1123 17:24:27.526099 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj" Nov 23 17:24:38 crc kubenswrapper[5050]: I1123 17:24:38.550513 5050 scope.go:117] "RemoveContainer" containerID="2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3" Nov 23 17:24:38 crc kubenswrapper[5050]: E1123 17:24:38.551818 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:24:53 crc kubenswrapper[5050]: I1123 17:24:53.549532 5050 scope.go:117] "RemoveContainer" containerID="2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3" Nov 23 17:24:53 crc kubenswrapper[5050]: E1123 17:24:53.550804 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:25:08 crc kubenswrapper[5050]: I1123 17:25:08.548678 5050 scope.go:117] "RemoveContainer" containerID="2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3" Nov 23 17:25:08 crc kubenswrapper[5050]: E1123 17:25:08.549747 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:25:15 crc kubenswrapper[5050]: I1123 17:25:15.480547 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-mk6lg"] Nov 23 17:25:15 crc kubenswrapper[5050]: E1123 17:25:15.483779 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ed7ec33-3227-4a3c-962e-2c471b16d3fe" containerName="extract-content" Nov 23 17:25:15 crc kubenswrapper[5050]: I1123 17:25:15.483826 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ed7ec33-3227-4a3c-962e-2c471b16d3fe" containerName="extract-content" Nov 23 17:25:15 crc kubenswrapper[5050]: E1123 17:25:15.483883 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd" containerName="nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1" Nov 23 17:25:15 crc kubenswrapper[5050]: I1123 17:25:15.483905 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd" containerName="nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1" Nov 23 17:25:15 crc kubenswrapper[5050]: E1123 17:25:15.483990 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ed7ec33-3227-4a3c-962e-2c471b16d3fe" containerName="registry-server" Nov 23 17:25:15 crc kubenswrapper[5050]: I1123 17:25:15.484010 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ed7ec33-3227-4a3c-962e-2c471b16d3fe" containerName="registry-server" Nov 23 17:25:15 crc kubenswrapper[5050]: E1123 17:25:15.484043 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ed7ec33-3227-4a3c-962e-2c471b16d3fe" containerName="extract-utilities" Nov 23 17:25:15 crc kubenswrapper[5050]: I1123 17:25:15.484060 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ed7ec33-3227-4a3c-962e-2c471b16d3fe" containerName="extract-utilities" Nov 23 17:25:15 crc kubenswrapper[5050]: I1123 17:25:15.484732 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd" containerName="nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1" Nov 23 17:25:15 crc kubenswrapper[5050]: I1123 17:25:15.484768 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ed7ec33-3227-4a3c-962e-2c471b16d3fe" containerName="registry-server" Nov 23 17:25:15 crc kubenswrapper[5050]: I1123 17:25:15.493025 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mk6lg" Nov 23 17:25:15 crc kubenswrapper[5050]: I1123 17:25:15.507771 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mk6lg"] Nov 23 17:25:15 crc kubenswrapper[5050]: I1123 17:25:15.559552 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/285ff366-ad0b-41a8-8f87-64c1c9a71bbe-catalog-content\") pod \"certified-operators-mk6lg\" (UID: \"285ff366-ad0b-41a8-8f87-64c1c9a71bbe\") " pod="openshift-marketplace/certified-operators-mk6lg" Nov 23 17:25:15 crc kubenswrapper[5050]: I1123 17:25:15.560072 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h45tx\" (UniqueName: \"kubernetes.io/projected/285ff366-ad0b-41a8-8f87-64c1c9a71bbe-kube-api-access-h45tx\") pod \"certified-operators-mk6lg\" (UID: \"285ff366-ad0b-41a8-8f87-64c1c9a71bbe\") " pod="openshift-marketplace/certified-operators-mk6lg" Nov 23 17:25:15 crc kubenswrapper[5050]: I1123 17:25:15.560237 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/285ff366-ad0b-41a8-8f87-64c1c9a71bbe-utilities\") pod \"certified-operators-mk6lg\" (UID: \"285ff366-ad0b-41a8-8f87-64c1c9a71bbe\") " pod="openshift-marketplace/certified-operators-mk6lg" Nov 23 17:25:15 crc kubenswrapper[5050]: I1123 17:25:15.663104 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/285ff366-ad0b-41a8-8f87-64c1c9a71bbe-catalog-content\") pod \"certified-operators-mk6lg\" (UID: \"285ff366-ad0b-41a8-8f87-64c1c9a71bbe\") " pod="openshift-marketplace/certified-operators-mk6lg" Nov 23 17:25:15 crc kubenswrapper[5050]: I1123 17:25:15.663241 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h45tx\" (UniqueName: \"kubernetes.io/projected/285ff366-ad0b-41a8-8f87-64c1c9a71bbe-kube-api-access-h45tx\") pod \"certified-operators-mk6lg\" (UID: \"285ff366-ad0b-41a8-8f87-64c1c9a71bbe\") " pod="openshift-marketplace/certified-operators-mk6lg" Nov 23 17:25:15 crc kubenswrapper[5050]: I1123 17:25:15.663570 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/285ff366-ad0b-41a8-8f87-64c1c9a71bbe-utilities\") pod \"certified-operators-mk6lg\" (UID: \"285ff366-ad0b-41a8-8f87-64c1c9a71bbe\") " pod="openshift-marketplace/certified-operators-mk6lg" Nov 23 17:25:15 crc kubenswrapper[5050]: I1123 17:25:15.664247 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/285ff366-ad0b-41a8-8f87-64c1c9a71bbe-catalog-content\") pod \"certified-operators-mk6lg\" (UID: \"285ff366-ad0b-41a8-8f87-64c1c9a71bbe\") " pod="openshift-marketplace/certified-operators-mk6lg" Nov 23 17:25:15 crc kubenswrapper[5050]: I1123 17:25:15.664265 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/285ff366-ad0b-41a8-8f87-64c1c9a71bbe-utilities\") pod \"certified-operators-mk6lg\" (UID: \"285ff366-ad0b-41a8-8f87-64c1c9a71bbe\") " pod="openshift-marketplace/certified-operators-mk6lg" Nov 23 17:25:16 crc kubenswrapper[5050]: I1123 17:25:16.562346 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h45tx\" (UniqueName: \"kubernetes.io/projected/285ff366-ad0b-41a8-8f87-64c1c9a71bbe-kube-api-access-h45tx\") pod \"certified-operators-mk6lg\" (UID: \"285ff366-ad0b-41a8-8f87-64c1c9a71bbe\") " pod="openshift-marketplace/certified-operators-mk6lg" Nov 23 17:25:16 crc kubenswrapper[5050]: I1123 17:25:16.747558 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mk6lg" Nov 23 17:25:17 crc kubenswrapper[5050]: I1123 17:25:17.290874 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mk6lg"] Nov 23 17:25:18 crc kubenswrapper[5050]: I1123 17:25:18.212882 5050 generic.go:334] "Generic (PLEG): container finished" podID="285ff366-ad0b-41a8-8f87-64c1c9a71bbe" containerID="1fcc19b02ee6f891837704c75f613d736d4aafab219154f5681e693590bafbc4" exitCode=0 Nov 23 17:25:18 crc kubenswrapper[5050]: I1123 17:25:18.213289 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mk6lg" event={"ID":"285ff366-ad0b-41a8-8f87-64c1c9a71bbe","Type":"ContainerDied","Data":"1fcc19b02ee6f891837704c75f613d736d4aafab219154f5681e693590bafbc4"} Nov 23 17:25:18 crc kubenswrapper[5050]: I1123 17:25:18.213726 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mk6lg" event={"ID":"285ff366-ad0b-41a8-8f87-64c1c9a71bbe","Type":"ContainerStarted","Data":"6dccc7b86eecd0ad781525720c991a80e5e8e53303f55d81f7514dc96222439e"} Nov 23 17:25:19 crc kubenswrapper[5050]: I1123 17:25:19.227540 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mk6lg" event={"ID":"285ff366-ad0b-41a8-8f87-64c1c9a71bbe","Type":"ContainerStarted","Data":"b3b8299c3bda8c1a9f50a71456d7697d09bf9abb5f2386962fca941cc8b60008"} Nov 23 17:25:20 crc kubenswrapper[5050]: I1123 17:25:20.549684 5050 scope.go:117] "RemoveContainer" containerID="2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3" Nov 23 17:25:20 crc kubenswrapper[5050]: E1123 17:25:20.550690 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:25:20 crc kubenswrapper[5050]: I1123 17:25:20.688629 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-f8fxx"] Nov 23 17:25:20 crc kubenswrapper[5050]: I1123 17:25:20.693597 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-f8fxx" Nov 23 17:25:20 crc kubenswrapper[5050]: I1123 17:25:20.723924 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-f8fxx"] Nov 23 17:25:20 crc kubenswrapper[5050]: I1123 17:25:20.837622 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqbbv\" (UniqueName: \"kubernetes.io/projected/075753a2-2d83-4707-830a-c00d7beb6172-kube-api-access-pqbbv\") pod \"redhat-marketplace-f8fxx\" (UID: \"075753a2-2d83-4707-830a-c00d7beb6172\") " pod="openshift-marketplace/redhat-marketplace-f8fxx" Nov 23 17:25:20 crc kubenswrapper[5050]: I1123 17:25:20.837719 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/075753a2-2d83-4707-830a-c00d7beb6172-catalog-content\") pod \"redhat-marketplace-f8fxx\" (UID: \"075753a2-2d83-4707-830a-c00d7beb6172\") " pod="openshift-marketplace/redhat-marketplace-f8fxx" Nov 23 17:25:20 crc kubenswrapper[5050]: I1123 17:25:20.839239 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/075753a2-2d83-4707-830a-c00d7beb6172-utilities\") pod \"redhat-marketplace-f8fxx\" (UID: \"075753a2-2d83-4707-830a-c00d7beb6172\") " pod="openshift-marketplace/redhat-marketplace-f8fxx" Nov 23 17:25:20 crc kubenswrapper[5050]: I1123 17:25:20.942506 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/075753a2-2d83-4707-830a-c00d7beb6172-utilities\") pod \"redhat-marketplace-f8fxx\" (UID: \"075753a2-2d83-4707-830a-c00d7beb6172\") " pod="openshift-marketplace/redhat-marketplace-f8fxx" Nov 23 17:25:20 crc kubenswrapper[5050]: I1123 17:25:20.942614 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqbbv\" (UniqueName: \"kubernetes.io/projected/075753a2-2d83-4707-830a-c00d7beb6172-kube-api-access-pqbbv\") pod \"redhat-marketplace-f8fxx\" (UID: \"075753a2-2d83-4707-830a-c00d7beb6172\") " pod="openshift-marketplace/redhat-marketplace-f8fxx" Nov 23 17:25:20 crc kubenswrapper[5050]: I1123 17:25:20.942683 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/075753a2-2d83-4707-830a-c00d7beb6172-catalog-content\") pod \"redhat-marketplace-f8fxx\" (UID: \"075753a2-2d83-4707-830a-c00d7beb6172\") " pod="openshift-marketplace/redhat-marketplace-f8fxx" Nov 23 17:25:20 crc kubenswrapper[5050]: I1123 17:25:20.943404 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/075753a2-2d83-4707-830a-c00d7beb6172-catalog-content\") pod \"redhat-marketplace-f8fxx\" (UID: \"075753a2-2d83-4707-830a-c00d7beb6172\") " pod="openshift-marketplace/redhat-marketplace-f8fxx" Nov 23 17:25:20 crc kubenswrapper[5050]: I1123 17:25:20.943562 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/075753a2-2d83-4707-830a-c00d7beb6172-utilities\") pod \"redhat-marketplace-f8fxx\" (UID: \"075753a2-2d83-4707-830a-c00d7beb6172\") " pod="openshift-marketplace/redhat-marketplace-f8fxx" Nov 23 17:25:20 crc kubenswrapper[5050]: I1123 17:25:20.964623 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqbbv\" (UniqueName: \"kubernetes.io/projected/075753a2-2d83-4707-830a-c00d7beb6172-kube-api-access-pqbbv\") pod \"redhat-marketplace-f8fxx\" (UID: \"075753a2-2d83-4707-830a-c00d7beb6172\") " pod="openshift-marketplace/redhat-marketplace-f8fxx" Nov 23 17:25:21 crc kubenswrapper[5050]: I1123 17:25:21.062206 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-f8fxx" Nov 23 17:25:21 crc kubenswrapper[5050]: I1123 17:25:21.262739 5050 generic.go:334] "Generic (PLEG): container finished" podID="285ff366-ad0b-41a8-8f87-64c1c9a71bbe" containerID="b3b8299c3bda8c1a9f50a71456d7697d09bf9abb5f2386962fca941cc8b60008" exitCode=0 Nov 23 17:25:21 crc kubenswrapper[5050]: I1123 17:25:21.262880 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mk6lg" event={"ID":"285ff366-ad0b-41a8-8f87-64c1c9a71bbe","Type":"ContainerDied","Data":"b3b8299c3bda8c1a9f50a71456d7697d09bf9abb5f2386962fca941cc8b60008"} Nov 23 17:25:21 crc kubenswrapper[5050]: I1123 17:25:21.670587 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-f8fxx"] Nov 23 17:25:22 crc kubenswrapper[5050]: I1123 17:25:22.275815 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mk6lg" event={"ID":"285ff366-ad0b-41a8-8f87-64c1c9a71bbe","Type":"ContainerStarted","Data":"7d9da16ba0f4aebff15f7311420fad16b318afe992e5b85e05650b8cdf9afc79"} Nov 23 17:25:22 crc kubenswrapper[5050]: I1123 17:25:22.280836 5050 generic.go:334] "Generic (PLEG): container finished" podID="075753a2-2d83-4707-830a-c00d7beb6172" containerID="482caefefb08d27401025579ed55f8bbbb627cdf2ea667116a0bde1d944ec39e" exitCode=0 Nov 23 17:25:22 crc kubenswrapper[5050]: I1123 17:25:22.280915 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-f8fxx" event={"ID":"075753a2-2d83-4707-830a-c00d7beb6172","Type":"ContainerDied","Data":"482caefefb08d27401025579ed55f8bbbb627cdf2ea667116a0bde1d944ec39e"} Nov 23 17:25:22 crc kubenswrapper[5050]: I1123 17:25:22.280960 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-f8fxx" event={"ID":"075753a2-2d83-4707-830a-c00d7beb6172","Type":"ContainerStarted","Data":"cee82a4475565a4beaa2146f13bafbe4db82be678b354d4b703a83906ebfc142"} Nov 23 17:25:22 crc kubenswrapper[5050]: I1123 17:25:22.304556 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-mk6lg" podStartSLOduration=3.7602076 podStartE2EDuration="7.304519224s" podCreationTimestamp="2025-11-23 17:25:15 +0000 UTC" firstStartedPulling="2025-11-23 17:25:18.219256223 +0000 UTC m=+9813.386252708" lastFinishedPulling="2025-11-23 17:25:21.763567847 +0000 UTC m=+9816.930564332" observedRunningTime="2025-11-23 17:25:22.298122884 +0000 UTC m=+9817.465119389" watchObservedRunningTime="2025-11-23 17:25:22.304519224 +0000 UTC m=+9817.471515709" Nov 23 17:25:23 crc kubenswrapper[5050]: I1123 17:25:23.298285 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-f8fxx" event={"ID":"075753a2-2d83-4707-830a-c00d7beb6172","Type":"ContainerStarted","Data":"b8a60a53795c1da427d637153796eae167c072898460ae89c5866d2d112808e5"} Nov 23 17:25:24 crc kubenswrapper[5050]: I1123 17:25:24.313889 5050 generic.go:334] "Generic (PLEG): container finished" podID="075753a2-2d83-4707-830a-c00d7beb6172" containerID="b8a60a53795c1da427d637153796eae167c072898460ae89c5866d2d112808e5" exitCode=0 Nov 23 17:25:24 crc kubenswrapper[5050]: I1123 17:25:24.314032 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-f8fxx" event={"ID":"075753a2-2d83-4707-830a-c00d7beb6172","Type":"ContainerDied","Data":"b8a60a53795c1da427d637153796eae167c072898460ae89c5866d2d112808e5"} Nov 23 17:25:25 crc kubenswrapper[5050]: I1123 17:25:25.331590 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-f8fxx" event={"ID":"075753a2-2d83-4707-830a-c00d7beb6172","Type":"ContainerStarted","Data":"f5acbf4dbe00e83f34cbf9a559282bcc36a339e080e58d5ff0ddf000fa570668"} Nov 23 17:25:25 crc kubenswrapper[5050]: I1123 17:25:25.356290 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-f8fxx" podStartSLOduration=2.84827382 podStartE2EDuration="5.356267706s" podCreationTimestamp="2025-11-23 17:25:20 +0000 UTC" firstStartedPulling="2025-11-23 17:25:22.288138882 +0000 UTC m=+9817.455135367" lastFinishedPulling="2025-11-23 17:25:24.796132738 +0000 UTC m=+9819.963129253" observedRunningTime="2025-11-23 17:25:25.3549909 +0000 UTC m=+9820.521987395" watchObservedRunningTime="2025-11-23 17:25:25.356267706 +0000 UTC m=+9820.523264191" Nov 23 17:25:26 crc kubenswrapper[5050]: I1123 17:25:26.749710 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-mk6lg" Nov 23 17:25:26 crc kubenswrapper[5050]: I1123 17:25:26.750328 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-mk6lg" Nov 23 17:25:26 crc kubenswrapper[5050]: I1123 17:25:26.813982 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-mk6lg" Nov 23 17:25:27 crc kubenswrapper[5050]: I1123 17:25:27.478991 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-mk6lg" Nov 23 17:25:29 crc kubenswrapper[5050]: I1123 17:25:29.467102 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mk6lg"] Nov 23 17:25:29 crc kubenswrapper[5050]: I1123 17:25:29.468285 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-mk6lg" podUID="285ff366-ad0b-41a8-8f87-64c1c9a71bbe" containerName="registry-server" containerID="cri-o://7d9da16ba0f4aebff15f7311420fad16b318afe992e5b85e05650b8cdf9afc79" gracePeriod=2 Nov 23 17:25:29 crc kubenswrapper[5050]: E1123 17:25:29.548189 5050 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod285ff366_ad0b_41a8_8f87_64c1c9a71bbe.slice/crio-conmon-7d9da16ba0f4aebff15f7311420fad16b318afe992e5b85e05650b8cdf9afc79.scope\": RecentStats: unable to find data in memory cache]" Nov 23 17:25:29 crc kubenswrapper[5050]: I1123 17:25:29.969230 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mk6lg" Nov 23 17:25:30 crc kubenswrapper[5050]: I1123 17:25:30.146719 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/285ff366-ad0b-41a8-8f87-64c1c9a71bbe-utilities\") pod \"285ff366-ad0b-41a8-8f87-64c1c9a71bbe\" (UID: \"285ff366-ad0b-41a8-8f87-64c1c9a71bbe\") " Nov 23 17:25:30 crc kubenswrapper[5050]: I1123 17:25:30.146787 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/285ff366-ad0b-41a8-8f87-64c1c9a71bbe-catalog-content\") pod \"285ff366-ad0b-41a8-8f87-64c1c9a71bbe\" (UID: \"285ff366-ad0b-41a8-8f87-64c1c9a71bbe\") " Nov 23 17:25:30 crc kubenswrapper[5050]: I1123 17:25:30.146926 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h45tx\" (UniqueName: \"kubernetes.io/projected/285ff366-ad0b-41a8-8f87-64c1c9a71bbe-kube-api-access-h45tx\") pod \"285ff366-ad0b-41a8-8f87-64c1c9a71bbe\" (UID: \"285ff366-ad0b-41a8-8f87-64c1c9a71bbe\") " Nov 23 17:25:30 crc kubenswrapper[5050]: I1123 17:25:30.149246 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/285ff366-ad0b-41a8-8f87-64c1c9a71bbe-utilities" (OuterVolumeSpecName: "utilities") pod "285ff366-ad0b-41a8-8f87-64c1c9a71bbe" (UID: "285ff366-ad0b-41a8-8f87-64c1c9a71bbe"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:25:30 crc kubenswrapper[5050]: I1123 17:25:30.156510 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/285ff366-ad0b-41a8-8f87-64c1c9a71bbe-kube-api-access-h45tx" (OuterVolumeSpecName: "kube-api-access-h45tx") pod "285ff366-ad0b-41a8-8f87-64c1c9a71bbe" (UID: "285ff366-ad0b-41a8-8f87-64c1c9a71bbe"). InnerVolumeSpecName "kube-api-access-h45tx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:25:30 crc kubenswrapper[5050]: I1123 17:25:30.196496 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/285ff366-ad0b-41a8-8f87-64c1c9a71bbe-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "285ff366-ad0b-41a8-8f87-64c1c9a71bbe" (UID: "285ff366-ad0b-41a8-8f87-64c1c9a71bbe"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:25:30 crc kubenswrapper[5050]: I1123 17:25:30.250201 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/285ff366-ad0b-41a8-8f87-64c1c9a71bbe-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 17:25:30 crc kubenswrapper[5050]: I1123 17:25:30.250245 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/285ff366-ad0b-41a8-8f87-64c1c9a71bbe-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 17:25:30 crc kubenswrapper[5050]: I1123 17:25:30.250260 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h45tx\" (UniqueName: \"kubernetes.io/projected/285ff366-ad0b-41a8-8f87-64c1c9a71bbe-kube-api-access-h45tx\") on node \"crc\" DevicePath \"\"" Nov 23 17:25:30 crc kubenswrapper[5050]: I1123 17:25:30.407002 5050 generic.go:334] "Generic (PLEG): container finished" podID="285ff366-ad0b-41a8-8f87-64c1c9a71bbe" containerID="7d9da16ba0f4aebff15f7311420fad16b318afe992e5b85e05650b8cdf9afc79" exitCode=0 Nov 23 17:25:30 crc kubenswrapper[5050]: I1123 17:25:30.407077 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mk6lg" event={"ID":"285ff366-ad0b-41a8-8f87-64c1c9a71bbe","Type":"ContainerDied","Data":"7d9da16ba0f4aebff15f7311420fad16b318afe992e5b85e05650b8cdf9afc79"} Nov 23 17:25:30 crc kubenswrapper[5050]: I1123 17:25:30.407127 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mk6lg" Nov 23 17:25:30 crc kubenswrapper[5050]: I1123 17:25:30.407177 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mk6lg" event={"ID":"285ff366-ad0b-41a8-8f87-64c1c9a71bbe","Type":"ContainerDied","Data":"6dccc7b86eecd0ad781525720c991a80e5e8e53303f55d81f7514dc96222439e"} Nov 23 17:25:30 crc kubenswrapper[5050]: I1123 17:25:30.407215 5050 scope.go:117] "RemoveContainer" containerID="7d9da16ba0f4aebff15f7311420fad16b318afe992e5b85e05650b8cdf9afc79" Nov 23 17:25:31 crc kubenswrapper[5050]: I1123 17:25:31.063197 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-f8fxx" Nov 23 17:25:31 crc kubenswrapper[5050]: I1123 17:25:31.068644 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-f8fxx" Nov 23 17:25:31 crc kubenswrapper[5050]: I1123 17:25:31.143355 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-f8fxx" Nov 23 17:25:31 crc kubenswrapper[5050]: I1123 17:25:31.158008 5050 scope.go:117] "RemoveContainer" containerID="b3b8299c3bda8c1a9f50a71456d7697d09bf9abb5f2386962fca941cc8b60008" Nov 23 17:25:31 crc kubenswrapper[5050]: I1123 17:25:31.165544 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mk6lg"] Nov 23 17:25:31 crc kubenswrapper[5050]: I1123 17:25:31.181093 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-mk6lg"] Nov 23 17:25:31 crc kubenswrapper[5050]: I1123 17:25:31.195599 5050 scope.go:117] "RemoveContainer" containerID="1fcc19b02ee6f891837704c75f613d736d4aafab219154f5681e693590bafbc4" Nov 23 17:25:31 crc kubenswrapper[5050]: I1123 17:25:31.234374 5050 scope.go:117] "RemoveContainer" containerID="7d9da16ba0f4aebff15f7311420fad16b318afe992e5b85e05650b8cdf9afc79" Nov 23 17:25:31 crc kubenswrapper[5050]: E1123 17:25:31.235090 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d9da16ba0f4aebff15f7311420fad16b318afe992e5b85e05650b8cdf9afc79\": container with ID starting with 7d9da16ba0f4aebff15f7311420fad16b318afe992e5b85e05650b8cdf9afc79 not found: ID does not exist" containerID="7d9da16ba0f4aebff15f7311420fad16b318afe992e5b85e05650b8cdf9afc79" Nov 23 17:25:31 crc kubenswrapper[5050]: I1123 17:25:31.235160 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d9da16ba0f4aebff15f7311420fad16b318afe992e5b85e05650b8cdf9afc79"} err="failed to get container status \"7d9da16ba0f4aebff15f7311420fad16b318afe992e5b85e05650b8cdf9afc79\": rpc error: code = NotFound desc = could not find container \"7d9da16ba0f4aebff15f7311420fad16b318afe992e5b85e05650b8cdf9afc79\": container with ID starting with 7d9da16ba0f4aebff15f7311420fad16b318afe992e5b85e05650b8cdf9afc79 not found: ID does not exist" Nov 23 17:25:31 crc kubenswrapper[5050]: I1123 17:25:31.235205 5050 scope.go:117] "RemoveContainer" containerID="b3b8299c3bda8c1a9f50a71456d7697d09bf9abb5f2386962fca941cc8b60008" Nov 23 17:25:31 crc kubenswrapper[5050]: E1123 17:25:31.235787 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3b8299c3bda8c1a9f50a71456d7697d09bf9abb5f2386962fca941cc8b60008\": container with ID starting with b3b8299c3bda8c1a9f50a71456d7697d09bf9abb5f2386962fca941cc8b60008 not found: ID does not exist" containerID="b3b8299c3bda8c1a9f50a71456d7697d09bf9abb5f2386962fca941cc8b60008" Nov 23 17:25:31 crc kubenswrapper[5050]: I1123 17:25:31.235841 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3b8299c3bda8c1a9f50a71456d7697d09bf9abb5f2386962fca941cc8b60008"} err="failed to get container status \"b3b8299c3bda8c1a9f50a71456d7697d09bf9abb5f2386962fca941cc8b60008\": rpc error: code = NotFound desc = could not find container \"b3b8299c3bda8c1a9f50a71456d7697d09bf9abb5f2386962fca941cc8b60008\": container with ID starting with b3b8299c3bda8c1a9f50a71456d7697d09bf9abb5f2386962fca941cc8b60008 not found: ID does not exist" Nov 23 17:25:31 crc kubenswrapper[5050]: I1123 17:25:31.235891 5050 scope.go:117] "RemoveContainer" containerID="1fcc19b02ee6f891837704c75f613d736d4aafab219154f5681e693590bafbc4" Nov 23 17:25:31 crc kubenswrapper[5050]: E1123 17:25:31.236364 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1fcc19b02ee6f891837704c75f613d736d4aafab219154f5681e693590bafbc4\": container with ID starting with 1fcc19b02ee6f891837704c75f613d736d4aafab219154f5681e693590bafbc4 not found: ID does not exist" containerID="1fcc19b02ee6f891837704c75f613d736d4aafab219154f5681e693590bafbc4" Nov 23 17:25:31 crc kubenswrapper[5050]: I1123 17:25:31.236405 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1fcc19b02ee6f891837704c75f613d736d4aafab219154f5681e693590bafbc4"} err="failed to get container status \"1fcc19b02ee6f891837704c75f613d736d4aafab219154f5681e693590bafbc4\": rpc error: code = NotFound desc = could not find container \"1fcc19b02ee6f891837704c75f613d736d4aafab219154f5681e693590bafbc4\": container with ID starting with 1fcc19b02ee6f891837704c75f613d736d4aafab219154f5681e693590bafbc4 not found: ID does not exist" Nov 23 17:25:31 crc kubenswrapper[5050]: I1123 17:25:31.497589 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-f8fxx" Nov 23 17:25:31 crc kubenswrapper[5050]: I1123 17:25:31.587403 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="285ff366-ad0b-41a8-8f87-64c1c9a71bbe" path="/var/lib/kubelet/pods/285ff366-ad0b-41a8-8f87-64c1c9a71bbe/volumes" Nov 23 17:25:32 crc kubenswrapper[5050]: I1123 17:25:32.265134 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-f8fxx"] Nov 23 17:25:33 crc kubenswrapper[5050]: I1123 17:25:33.451833 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-f8fxx" podUID="075753a2-2d83-4707-830a-c00d7beb6172" containerName="registry-server" containerID="cri-o://f5acbf4dbe00e83f34cbf9a559282bcc36a339e080e58d5ff0ddf000fa570668" gracePeriod=2 Nov 23 17:25:34 crc kubenswrapper[5050]: I1123 17:25:34.177329 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-f8fxx" Nov 23 17:25:34 crc kubenswrapper[5050]: I1123 17:25:34.191142 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/075753a2-2d83-4707-830a-c00d7beb6172-catalog-content\") pod \"075753a2-2d83-4707-830a-c00d7beb6172\" (UID: \"075753a2-2d83-4707-830a-c00d7beb6172\") " Nov 23 17:25:34 crc kubenswrapper[5050]: I1123 17:25:34.191326 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pqbbv\" (UniqueName: \"kubernetes.io/projected/075753a2-2d83-4707-830a-c00d7beb6172-kube-api-access-pqbbv\") pod \"075753a2-2d83-4707-830a-c00d7beb6172\" (UID: \"075753a2-2d83-4707-830a-c00d7beb6172\") " Nov 23 17:25:34 crc kubenswrapper[5050]: I1123 17:25:34.191396 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/075753a2-2d83-4707-830a-c00d7beb6172-utilities\") pod \"075753a2-2d83-4707-830a-c00d7beb6172\" (UID: \"075753a2-2d83-4707-830a-c00d7beb6172\") " Nov 23 17:25:34 crc kubenswrapper[5050]: I1123 17:25:34.192257 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/075753a2-2d83-4707-830a-c00d7beb6172-utilities" (OuterVolumeSpecName: "utilities") pod "075753a2-2d83-4707-830a-c00d7beb6172" (UID: "075753a2-2d83-4707-830a-c00d7beb6172"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:25:34 crc kubenswrapper[5050]: I1123 17:25:34.199809 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/075753a2-2d83-4707-830a-c00d7beb6172-kube-api-access-pqbbv" (OuterVolumeSpecName: "kube-api-access-pqbbv") pod "075753a2-2d83-4707-830a-c00d7beb6172" (UID: "075753a2-2d83-4707-830a-c00d7beb6172"). InnerVolumeSpecName "kube-api-access-pqbbv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:25:34 crc kubenswrapper[5050]: I1123 17:25:34.231637 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/075753a2-2d83-4707-830a-c00d7beb6172-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "075753a2-2d83-4707-830a-c00d7beb6172" (UID: "075753a2-2d83-4707-830a-c00d7beb6172"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:25:34 crc kubenswrapper[5050]: I1123 17:25:34.296279 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/075753a2-2d83-4707-830a-c00d7beb6172-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 17:25:34 crc kubenswrapper[5050]: I1123 17:25:34.296344 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/075753a2-2d83-4707-830a-c00d7beb6172-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 17:25:34 crc kubenswrapper[5050]: I1123 17:25:34.296363 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pqbbv\" (UniqueName: \"kubernetes.io/projected/075753a2-2d83-4707-830a-c00d7beb6172-kube-api-access-pqbbv\") on node \"crc\" DevicePath \"\"" Nov 23 17:25:34 crc kubenswrapper[5050]: I1123 17:25:34.473576 5050 generic.go:334] "Generic (PLEG): container finished" podID="075753a2-2d83-4707-830a-c00d7beb6172" containerID="f5acbf4dbe00e83f34cbf9a559282bcc36a339e080e58d5ff0ddf000fa570668" exitCode=0 Nov 23 17:25:34 crc kubenswrapper[5050]: I1123 17:25:34.473710 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-f8fxx" Nov 23 17:25:34 crc kubenswrapper[5050]: I1123 17:25:34.473776 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-f8fxx" event={"ID":"075753a2-2d83-4707-830a-c00d7beb6172","Type":"ContainerDied","Data":"f5acbf4dbe00e83f34cbf9a559282bcc36a339e080e58d5ff0ddf000fa570668"} Nov 23 17:25:34 crc kubenswrapper[5050]: I1123 17:25:34.473998 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-f8fxx" event={"ID":"075753a2-2d83-4707-830a-c00d7beb6172","Type":"ContainerDied","Data":"cee82a4475565a4beaa2146f13bafbe4db82be678b354d4b703a83906ebfc142"} Nov 23 17:25:34 crc kubenswrapper[5050]: I1123 17:25:34.474032 5050 scope.go:117] "RemoveContainer" containerID="f5acbf4dbe00e83f34cbf9a559282bcc36a339e080e58d5ff0ddf000fa570668" Nov 23 17:25:34 crc kubenswrapper[5050]: I1123 17:25:34.541273 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-f8fxx"] Nov 23 17:25:34 crc kubenswrapper[5050]: I1123 17:25:34.542258 5050 scope.go:117] "RemoveContainer" containerID="b8a60a53795c1da427d637153796eae167c072898460ae89c5866d2d112808e5" Nov 23 17:25:34 crc kubenswrapper[5050]: I1123 17:25:34.554034 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-f8fxx"] Nov 23 17:25:34 crc kubenswrapper[5050]: I1123 17:25:34.586087 5050 scope.go:117] "RemoveContainer" containerID="482caefefb08d27401025579ed55f8bbbb627cdf2ea667116a0bde1d944ec39e" Nov 23 17:25:34 crc kubenswrapper[5050]: I1123 17:25:34.644655 5050 scope.go:117] "RemoveContainer" containerID="f5acbf4dbe00e83f34cbf9a559282bcc36a339e080e58d5ff0ddf000fa570668" Nov 23 17:25:34 crc kubenswrapper[5050]: E1123 17:25:34.645275 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f5acbf4dbe00e83f34cbf9a559282bcc36a339e080e58d5ff0ddf000fa570668\": container with ID starting with f5acbf4dbe00e83f34cbf9a559282bcc36a339e080e58d5ff0ddf000fa570668 not found: ID does not exist" containerID="f5acbf4dbe00e83f34cbf9a559282bcc36a339e080e58d5ff0ddf000fa570668" Nov 23 17:25:34 crc kubenswrapper[5050]: I1123 17:25:34.645350 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5acbf4dbe00e83f34cbf9a559282bcc36a339e080e58d5ff0ddf000fa570668"} err="failed to get container status \"f5acbf4dbe00e83f34cbf9a559282bcc36a339e080e58d5ff0ddf000fa570668\": rpc error: code = NotFound desc = could not find container \"f5acbf4dbe00e83f34cbf9a559282bcc36a339e080e58d5ff0ddf000fa570668\": container with ID starting with f5acbf4dbe00e83f34cbf9a559282bcc36a339e080e58d5ff0ddf000fa570668 not found: ID does not exist" Nov 23 17:25:34 crc kubenswrapper[5050]: I1123 17:25:34.645384 5050 scope.go:117] "RemoveContainer" containerID="b8a60a53795c1da427d637153796eae167c072898460ae89c5866d2d112808e5" Nov 23 17:25:34 crc kubenswrapper[5050]: E1123 17:25:34.645915 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b8a60a53795c1da427d637153796eae167c072898460ae89c5866d2d112808e5\": container with ID starting with b8a60a53795c1da427d637153796eae167c072898460ae89c5866d2d112808e5 not found: ID does not exist" containerID="b8a60a53795c1da427d637153796eae167c072898460ae89c5866d2d112808e5" Nov 23 17:25:34 crc kubenswrapper[5050]: I1123 17:25:34.646105 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b8a60a53795c1da427d637153796eae167c072898460ae89c5866d2d112808e5"} err="failed to get container status \"b8a60a53795c1da427d637153796eae167c072898460ae89c5866d2d112808e5\": rpc error: code = NotFound desc = could not find container \"b8a60a53795c1da427d637153796eae167c072898460ae89c5866d2d112808e5\": container with ID starting with b8a60a53795c1da427d637153796eae167c072898460ae89c5866d2d112808e5 not found: ID does not exist" Nov 23 17:25:34 crc kubenswrapper[5050]: I1123 17:25:34.646249 5050 scope.go:117] "RemoveContainer" containerID="482caefefb08d27401025579ed55f8bbbb627cdf2ea667116a0bde1d944ec39e" Nov 23 17:25:34 crc kubenswrapper[5050]: E1123 17:25:34.646775 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"482caefefb08d27401025579ed55f8bbbb627cdf2ea667116a0bde1d944ec39e\": container with ID starting with 482caefefb08d27401025579ed55f8bbbb627cdf2ea667116a0bde1d944ec39e not found: ID does not exist" containerID="482caefefb08d27401025579ed55f8bbbb627cdf2ea667116a0bde1d944ec39e" Nov 23 17:25:34 crc kubenswrapper[5050]: I1123 17:25:34.646960 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"482caefefb08d27401025579ed55f8bbbb627cdf2ea667116a0bde1d944ec39e"} err="failed to get container status \"482caefefb08d27401025579ed55f8bbbb627cdf2ea667116a0bde1d944ec39e\": rpc error: code = NotFound desc = could not find container \"482caefefb08d27401025579ed55f8bbbb627cdf2ea667116a0bde1d944ec39e\": container with ID starting with 482caefefb08d27401025579ed55f8bbbb627cdf2ea667116a0bde1d944ec39e not found: ID does not exist" Nov 23 17:25:35 crc kubenswrapper[5050]: I1123 17:25:35.558271 5050 scope.go:117] "RemoveContainer" containerID="2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3" Nov 23 17:25:35 crc kubenswrapper[5050]: E1123 17:25:35.559260 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:25:35 crc kubenswrapper[5050]: I1123 17:25:35.573209 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="075753a2-2d83-4707-830a-c00d7beb6172" path="/var/lib/kubelet/pods/075753a2-2d83-4707-830a-c00d7beb6172/volumes" Nov 23 17:25:45 crc kubenswrapper[5050]: E1123 17:25:45.065227 5050 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.32:34240->38.102.83.32:33067: write tcp 38.102.83.32:34240->38.102.83.32:33067: write: connection reset by peer Nov 23 17:25:50 crc kubenswrapper[5050]: I1123 17:25:50.549138 5050 scope.go:117] "RemoveContainer" containerID="2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3" Nov 23 17:25:50 crc kubenswrapper[5050]: E1123 17:25:50.550574 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:26:01 crc kubenswrapper[5050]: I1123 17:26:01.548563 5050 scope.go:117] "RemoveContainer" containerID="2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3" Nov 23 17:26:01 crc kubenswrapper[5050]: E1123 17:26:01.549483 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:26:12 crc kubenswrapper[5050]: I1123 17:26:12.549880 5050 scope.go:117] "RemoveContainer" containerID="2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3" Nov 23 17:26:12 crc kubenswrapper[5050]: E1123 17:26:12.550915 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:26:26 crc kubenswrapper[5050]: I1123 17:26:26.549664 5050 scope.go:117] "RemoveContainer" containerID="2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3" Nov 23 17:26:26 crc kubenswrapper[5050]: E1123 17:26:26.550792 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:26:40 crc kubenswrapper[5050]: I1123 17:26:40.549236 5050 scope.go:117] "RemoveContainer" containerID="2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3" Nov 23 17:26:40 crc kubenswrapper[5050]: E1123 17:26:40.550665 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:26:52 crc kubenswrapper[5050]: I1123 17:26:52.548729 5050 scope.go:117] "RemoveContainer" containerID="2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3" Nov 23 17:26:52 crc kubenswrapper[5050]: E1123 17:26:52.550122 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:27:05 crc kubenswrapper[5050]: I1123 17:27:05.431793 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-497tw/must-gather-n292h"] Nov 23 17:27:05 crc kubenswrapper[5050]: E1123 17:27:05.433143 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="285ff366-ad0b-41a8-8f87-64c1c9a71bbe" containerName="extract-utilities" Nov 23 17:27:05 crc kubenswrapper[5050]: I1123 17:27:05.433162 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="285ff366-ad0b-41a8-8f87-64c1c9a71bbe" containerName="extract-utilities" Nov 23 17:27:05 crc kubenswrapper[5050]: E1123 17:27:05.433173 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="285ff366-ad0b-41a8-8f87-64c1c9a71bbe" containerName="extract-content" Nov 23 17:27:05 crc kubenswrapper[5050]: I1123 17:27:05.433183 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="285ff366-ad0b-41a8-8f87-64c1c9a71bbe" containerName="extract-content" Nov 23 17:27:05 crc kubenswrapper[5050]: E1123 17:27:05.433225 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="075753a2-2d83-4707-830a-c00d7beb6172" containerName="extract-utilities" Nov 23 17:27:05 crc kubenswrapper[5050]: I1123 17:27:05.433236 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="075753a2-2d83-4707-830a-c00d7beb6172" containerName="extract-utilities" Nov 23 17:27:05 crc kubenswrapper[5050]: E1123 17:27:05.433249 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="285ff366-ad0b-41a8-8f87-64c1c9a71bbe" containerName="registry-server" Nov 23 17:27:05 crc kubenswrapper[5050]: I1123 17:27:05.433255 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="285ff366-ad0b-41a8-8f87-64c1c9a71bbe" containerName="registry-server" Nov 23 17:27:05 crc kubenswrapper[5050]: E1123 17:27:05.433267 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="075753a2-2d83-4707-830a-c00d7beb6172" containerName="extract-content" Nov 23 17:27:05 crc kubenswrapper[5050]: I1123 17:27:05.433275 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="075753a2-2d83-4707-830a-c00d7beb6172" containerName="extract-content" Nov 23 17:27:05 crc kubenswrapper[5050]: E1123 17:27:05.433287 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="075753a2-2d83-4707-830a-c00d7beb6172" containerName="registry-server" Nov 23 17:27:05 crc kubenswrapper[5050]: I1123 17:27:05.433315 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="075753a2-2d83-4707-830a-c00d7beb6172" containerName="registry-server" Nov 23 17:27:05 crc kubenswrapper[5050]: I1123 17:27:05.433558 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="285ff366-ad0b-41a8-8f87-64c1c9a71bbe" containerName="registry-server" Nov 23 17:27:05 crc kubenswrapper[5050]: I1123 17:27:05.433576 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="075753a2-2d83-4707-830a-c00d7beb6172" containerName="registry-server" Nov 23 17:27:05 crc kubenswrapper[5050]: I1123 17:27:05.435083 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-497tw/must-gather-n292h" Nov 23 17:27:05 crc kubenswrapper[5050]: I1123 17:27:05.437870 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-497tw"/"openshift-service-ca.crt" Nov 23 17:27:05 crc kubenswrapper[5050]: I1123 17:27:05.440011 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-497tw"/"kube-root-ca.crt" Nov 23 17:27:05 crc kubenswrapper[5050]: I1123 17:27:05.440429 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-497tw"/"default-dockercfg-6rzgs" Nov 23 17:27:05 crc kubenswrapper[5050]: I1123 17:27:05.594941 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fbfkl\" (UniqueName: \"kubernetes.io/projected/b8fcd70b-4806-4f9f-9499-61e843f9918f-kube-api-access-fbfkl\") pod \"must-gather-n292h\" (UID: \"b8fcd70b-4806-4f9f-9499-61e843f9918f\") " pod="openshift-must-gather-497tw/must-gather-n292h" Nov 23 17:27:05 crc kubenswrapper[5050]: I1123 17:27:05.595038 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b8fcd70b-4806-4f9f-9499-61e843f9918f-must-gather-output\") pod \"must-gather-n292h\" (UID: \"b8fcd70b-4806-4f9f-9499-61e843f9918f\") " pod="openshift-must-gather-497tw/must-gather-n292h" Nov 23 17:27:05 crc kubenswrapper[5050]: I1123 17:27:05.625576 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-497tw/must-gather-n292h"] Nov 23 17:27:05 crc kubenswrapper[5050]: I1123 17:27:05.697654 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fbfkl\" (UniqueName: \"kubernetes.io/projected/b8fcd70b-4806-4f9f-9499-61e843f9918f-kube-api-access-fbfkl\") pod \"must-gather-n292h\" (UID: \"b8fcd70b-4806-4f9f-9499-61e843f9918f\") " pod="openshift-must-gather-497tw/must-gather-n292h" Nov 23 17:27:05 crc kubenswrapper[5050]: I1123 17:27:05.697785 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b8fcd70b-4806-4f9f-9499-61e843f9918f-must-gather-output\") pod \"must-gather-n292h\" (UID: \"b8fcd70b-4806-4f9f-9499-61e843f9918f\") " pod="openshift-must-gather-497tw/must-gather-n292h" Nov 23 17:27:05 crc kubenswrapper[5050]: I1123 17:27:05.703148 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b8fcd70b-4806-4f9f-9499-61e843f9918f-must-gather-output\") pod \"must-gather-n292h\" (UID: \"b8fcd70b-4806-4f9f-9499-61e843f9918f\") " pod="openshift-must-gather-497tw/must-gather-n292h" Nov 23 17:27:05 crc kubenswrapper[5050]: I1123 17:27:05.724907 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fbfkl\" (UniqueName: \"kubernetes.io/projected/b8fcd70b-4806-4f9f-9499-61e843f9918f-kube-api-access-fbfkl\") pod \"must-gather-n292h\" (UID: \"b8fcd70b-4806-4f9f-9499-61e843f9918f\") " pod="openshift-must-gather-497tw/must-gather-n292h" Nov 23 17:27:05 crc kubenswrapper[5050]: I1123 17:27:05.759879 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-497tw/must-gather-n292h" Nov 23 17:27:06 crc kubenswrapper[5050]: I1123 17:27:06.397736 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-497tw/must-gather-n292h"] Nov 23 17:27:06 crc kubenswrapper[5050]: I1123 17:27:06.398272 5050 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 23 17:27:06 crc kubenswrapper[5050]: I1123 17:27:06.549767 5050 scope.go:117] "RemoveContainer" containerID="2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3" Nov 23 17:27:06 crc kubenswrapper[5050]: E1123 17:27:06.550101 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:27:06 crc kubenswrapper[5050]: I1123 17:27:06.851740 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-497tw/must-gather-n292h" event={"ID":"b8fcd70b-4806-4f9f-9499-61e843f9918f","Type":"ContainerStarted","Data":"01dd35ba74985642c105d5a52d51cf784940b9844e0849464fb1e99791ca59f8"} Nov 23 17:27:15 crc kubenswrapper[5050]: I1123 17:27:15.990334 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-497tw/must-gather-n292h" event={"ID":"b8fcd70b-4806-4f9f-9499-61e843f9918f","Type":"ContainerStarted","Data":"51eb186ddb748786a91b4a38286b2942d8ba90f12328e1998757355713643df5"} Nov 23 17:27:15 crc kubenswrapper[5050]: I1123 17:27:15.991297 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-497tw/must-gather-n292h" event={"ID":"b8fcd70b-4806-4f9f-9499-61e843f9918f","Type":"ContainerStarted","Data":"8386c34f4e8f764a9d7ba00e903042d23f9c5efff6c216e48bbbed78eeecd3ae"} Nov 23 17:27:16 crc kubenswrapper[5050]: I1123 17:27:16.017784 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-497tw/must-gather-n292h" podStartSLOduration=2.487932851 podStartE2EDuration="11.01775645s" podCreationTimestamp="2025-11-23 17:27:05 +0000 UTC" firstStartedPulling="2025-11-23 17:27:06.397941981 +0000 UTC m=+9921.564938466" lastFinishedPulling="2025-11-23 17:27:14.92776558 +0000 UTC m=+9930.094762065" observedRunningTime="2025-11-23 17:27:16.017575624 +0000 UTC m=+9931.184572109" watchObservedRunningTime="2025-11-23 17:27:16.01775645 +0000 UTC m=+9931.184752935" Nov 23 17:27:20 crc kubenswrapper[5050]: I1123 17:27:20.549369 5050 scope.go:117] "RemoveContainer" containerID="2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3" Nov 23 17:27:20 crc kubenswrapper[5050]: E1123 17:27:20.550881 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:27:20 crc kubenswrapper[5050]: I1123 17:27:20.732377 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-497tw/crc-debug-9xfgn"] Nov 23 17:27:20 crc kubenswrapper[5050]: I1123 17:27:20.738630 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-497tw/crc-debug-9xfgn" Nov 23 17:27:20 crc kubenswrapper[5050]: I1123 17:27:20.914239 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sl58g\" (UniqueName: \"kubernetes.io/projected/358ff462-c0bb-48d7-9b63-de089e54dc83-kube-api-access-sl58g\") pod \"crc-debug-9xfgn\" (UID: \"358ff462-c0bb-48d7-9b63-de089e54dc83\") " pod="openshift-must-gather-497tw/crc-debug-9xfgn" Nov 23 17:27:20 crc kubenswrapper[5050]: I1123 17:27:20.914936 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/358ff462-c0bb-48d7-9b63-de089e54dc83-host\") pod \"crc-debug-9xfgn\" (UID: \"358ff462-c0bb-48d7-9b63-de089e54dc83\") " pod="openshift-must-gather-497tw/crc-debug-9xfgn" Nov 23 17:27:21 crc kubenswrapper[5050]: I1123 17:27:21.017746 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sl58g\" (UniqueName: \"kubernetes.io/projected/358ff462-c0bb-48d7-9b63-de089e54dc83-kube-api-access-sl58g\") pod \"crc-debug-9xfgn\" (UID: \"358ff462-c0bb-48d7-9b63-de089e54dc83\") " pod="openshift-must-gather-497tw/crc-debug-9xfgn" Nov 23 17:27:21 crc kubenswrapper[5050]: I1123 17:27:21.017822 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/358ff462-c0bb-48d7-9b63-de089e54dc83-host\") pod \"crc-debug-9xfgn\" (UID: \"358ff462-c0bb-48d7-9b63-de089e54dc83\") " pod="openshift-must-gather-497tw/crc-debug-9xfgn" Nov 23 17:27:21 crc kubenswrapper[5050]: I1123 17:27:21.018007 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/358ff462-c0bb-48d7-9b63-de089e54dc83-host\") pod \"crc-debug-9xfgn\" (UID: \"358ff462-c0bb-48d7-9b63-de089e54dc83\") " pod="openshift-must-gather-497tw/crc-debug-9xfgn" Nov 23 17:27:21 crc kubenswrapper[5050]: I1123 17:27:21.044577 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sl58g\" (UniqueName: \"kubernetes.io/projected/358ff462-c0bb-48d7-9b63-de089e54dc83-kube-api-access-sl58g\") pod \"crc-debug-9xfgn\" (UID: \"358ff462-c0bb-48d7-9b63-de089e54dc83\") " pod="openshift-must-gather-497tw/crc-debug-9xfgn" Nov 23 17:27:21 crc kubenswrapper[5050]: I1123 17:27:21.064341 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-497tw/crc-debug-9xfgn" Nov 23 17:27:21 crc kubenswrapper[5050]: I1123 17:27:21.395173 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-497tw/crc-debug-9xfgn" event={"ID":"358ff462-c0bb-48d7-9b63-de089e54dc83","Type":"ContainerStarted","Data":"820ab2865c85946a073b5588e960ea1b666bbd863c88ddf68cdd073b72a0455b"} Nov 23 17:27:31 crc kubenswrapper[5050]: I1123 17:27:31.555524 5050 scope.go:117] "RemoveContainer" containerID="2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3" Nov 23 17:27:31 crc kubenswrapper[5050]: E1123 17:27:31.556405 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:27:35 crc kubenswrapper[5050]: I1123 17:27:35.570024 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-497tw/crc-debug-9xfgn" event={"ID":"358ff462-c0bb-48d7-9b63-de089e54dc83","Type":"ContainerStarted","Data":"238d1b8b4f31551a8b68ad0c7828b88c68f1864610db3145ccfd4ef50792f4ca"} Nov 23 17:27:35 crc kubenswrapper[5050]: I1123 17:27:35.602422 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-497tw/crc-debug-9xfgn" podStartSLOduration=1.803859149 podStartE2EDuration="15.602389477s" podCreationTimestamp="2025-11-23 17:27:20 +0000 UTC" firstStartedPulling="2025-11-23 17:27:21.117270971 +0000 UTC m=+9936.284267466" lastFinishedPulling="2025-11-23 17:27:34.915801309 +0000 UTC m=+9950.082797794" observedRunningTime="2025-11-23 17:27:35.589838583 +0000 UTC m=+9950.756835068" watchObservedRunningTime="2025-11-23 17:27:35.602389477 +0000 UTC m=+9950.769385972" Nov 23 17:27:44 crc kubenswrapper[5050]: I1123 17:27:44.549353 5050 scope.go:117] "RemoveContainer" containerID="2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3" Nov 23 17:27:44 crc kubenswrapper[5050]: E1123 17:27:44.550685 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:27:58 crc kubenswrapper[5050]: I1123 17:27:58.549896 5050 scope.go:117] "RemoveContainer" containerID="2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3" Nov 23 17:27:58 crc kubenswrapper[5050]: E1123 17:27:58.550763 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:27:58 crc kubenswrapper[5050]: I1123 17:27:58.864185 5050 generic.go:334] "Generic (PLEG): container finished" podID="358ff462-c0bb-48d7-9b63-de089e54dc83" containerID="238d1b8b4f31551a8b68ad0c7828b88c68f1864610db3145ccfd4ef50792f4ca" exitCode=0 Nov 23 17:27:58 crc kubenswrapper[5050]: I1123 17:27:58.864236 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-497tw/crc-debug-9xfgn" event={"ID":"358ff462-c0bb-48d7-9b63-de089e54dc83","Type":"ContainerDied","Data":"238d1b8b4f31551a8b68ad0c7828b88c68f1864610db3145ccfd4ef50792f4ca"} Nov 23 17:28:00 crc kubenswrapper[5050]: I1123 17:28:00.040607 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-497tw/crc-debug-9xfgn" Nov 23 17:28:00 crc kubenswrapper[5050]: I1123 17:28:00.085259 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-497tw/crc-debug-9xfgn"] Nov 23 17:28:00 crc kubenswrapper[5050]: I1123 17:28:00.096727 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-497tw/crc-debug-9xfgn"] Nov 23 17:28:00 crc kubenswrapper[5050]: I1123 17:28:00.174514 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sl58g\" (UniqueName: \"kubernetes.io/projected/358ff462-c0bb-48d7-9b63-de089e54dc83-kube-api-access-sl58g\") pod \"358ff462-c0bb-48d7-9b63-de089e54dc83\" (UID: \"358ff462-c0bb-48d7-9b63-de089e54dc83\") " Nov 23 17:28:00 crc kubenswrapper[5050]: I1123 17:28:00.175609 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/358ff462-c0bb-48d7-9b63-de089e54dc83-host" (OuterVolumeSpecName: "host") pod "358ff462-c0bb-48d7-9b63-de089e54dc83" (UID: "358ff462-c0bb-48d7-9b63-de089e54dc83"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 17:28:00 crc kubenswrapper[5050]: I1123 17:28:00.175630 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/358ff462-c0bb-48d7-9b63-de089e54dc83-host\") pod \"358ff462-c0bb-48d7-9b63-de089e54dc83\" (UID: \"358ff462-c0bb-48d7-9b63-de089e54dc83\") " Nov 23 17:28:00 crc kubenswrapper[5050]: I1123 17:28:00.176661 5050 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/358ff462-c0bb-48d7-9b63-de089e54dc83-host\") on node \"crc\" DevicePath \"\"" Nov 23 17:28:00 crc kubenswrapper[5050]: I1123 17:28:00.184221 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/358ff462-c0bb-48d7-9b63-de089e54dc83-kube-api-access-sl58g" (OuterVolumeSpecName: "kube-api-access-sl58g") pod "358ff462-c0bb-48d7-9b63-de089e54dc83" (UID: "358ff462-c0bb-48d7-9b63-de089e54dc83"). InnerVolumeSpecName "kube-api-access-sl58g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:28:00 crc kubenswrapper[5050]: I1123 17:28:00.280278 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sl58g\" (UniqueName: \"kubernetes.io/projected/358ff462-c0bb-48d7-9b63-de089e54dc83-kube-api-access-sl58g\") on node \"crc\" DevicePath \"\"" Nov 23 17:28:00 crc kubenswrapper[5050]: I1123 17:28:00.896774 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="820ab2865c85946a073b5588e960ea1b666bbd863c88ddf68cdd073b72a0455b" Nov 23 17:28:00 crc kubenswrapper[5050]: I1123 17:28:00.896858 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-497tw/crc-debug-9xfgn" Nov 23 17:28:01 crc kubenswrapper[5050]: I1123 17:28:01.300764 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-497tw/crc-debug-vhjh2"] Nov 23 17:28:01 crc kubenswrapper[5050]: E1123 17:28:01.302250 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="358ff462-c0bb-48d7-9b63-de089e54dc83" containerName="container-00" Nov 23 17:28:01 crc kubenswrapper[5050]: I1123 17:28:01.302274 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="358ff462-c0bb-48d7-9b63-de089e54dc83" containerName="container-00" Nov 23 17:28:01 crc kubenswrapper[5050]: I1123 17:28:01.302957 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="358ff462-c0bb-48d7-9b63-de089e54dc83" containerName="container-00" Nov 23 17:28:01 crc kubenswrapper[5050]: I1123 17:28:01.304545 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-497tw/crc-debug-vhjh2" Nov 23 17:28:01 crc kubenswrapper[5050]: I1123 17:28:01.413544 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fh4cg\" (UniqueName: \"kubernetes.io/projected/146e7726-c2a1-49ed-90a9-125a646490ba-kube-api-access-fh4cg\") pod \"crc-debug-vhjh2\" (UID: \"146e7726-c2a1-49ed-90a9-125a646490ba\") " pod="openshift-must-gather-497tw/crc-debug-vhjh2" Nov 23 17:28:01 crc kubenswrapper[5050]: I1123 17:28:01.413634 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/146e7726-c2a1-49ed-90a9-125a646490ba-host\") pod \"crc-debug-vhjh2\" (UID: \"146e7726-c2a1-49ed-90a9-125a646490ba\") " pod="openshift-must-gather-497tw/crc-debug-vhjh2" Nov 23 17:28:01 crc kubenswrapper[5050]: I1123 17:28:01.516971 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fh4cg\" (UniqueName: \"kubernetes.io/projected/146e7726-c2a1-49ed-90a9-125a646490ba-kube-api-access-fh4cg\") pod \"crc-debug-vhjh2\" (UID: \"146e7726-c2a1-49ed-90a9-125a646490ba\") " pod="openshift-must-gather-497tw/crc-debug-vhjh2" Nov 23 17:28:01 crc kubenswrapper[5050]: I1123 17:28:01.517054 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/146e7726-c2a1-49ed-90a9-125a646490ba-host\") pod \"crc-debug-vhjh2\" (UID: \"146e7726-c2a1-49ed-90a9-125a646490ba\") " pod="openshift-must-gather-497tw/crc-debug-vhjh2" Nov 23 17:28:01 crc kubenswrapper[5050]: I1123 17:28:01.517232 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/146e7726-c2a1-49ed-90a9-125a646490ba-host\") pod \"crc-debug-vhjh2\" (UID: \"146e7726-c2a1-49ed-90a9-125a646490ba\") " pod="openshift-must-gather-497tw/crc-debug-vhjh2" Nov 23 17:28:01 crc kubenswrapper[5050]: I1123 17:28:01.545489 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fh4cg\" (UniqueName: \"kubernetes.io/projected/146e7726-c2a1-49ed-90a9-125a646490ba-kube-api-access-fh4cg\") pod \"crc-debug-vhjh2\" (UID: \"146e7726-c2a1-49ed-90a9-125a646490ba\") " pod="openshift-must-gather-497tw/crc-debug-vhjh2" Nov 23 17:28:01 crc kubenswrapper[5050]: I1123 17:28:01.560799 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="358ff462-c0bb-48d7-9b63-de089e54dc83" path="/var/lib/kubelet/pods/358ff462-c0bb-48d7-9b63-de089e54dc83/volumes" Nov 23 17:28:01 crc kubenswrapper[5050]: I1123 17:28:01.639572 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-497tw/crc-debug-vhjh2" Nov 23 17:28:01 crc kubenswrapper[5050]: I1123 17:28:01.908637 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-497tw/crc-debug-vhjh2" event={"ID":"146e7726-c2a1-49ed-90a9-125a646490ba","Type":"ContainerStarted","Data":"248af1b09673986fa2ab68f1aad1299408953ebf67c679f1f51dbd38c8dabb89"} Nov 23 17:28:02 crc kubenswrapper[5050]: I1123 17:28:02.925714 5050 generic.go:334] "Generic (PLEG): container finished" podID="146e7726-c2a1-49ed-90a9-125a646490ba" containerID="4997ffa4c65ccbe3b178cc39e53f0b6c4c2c4b98ec6f4706ceb271be93ab7417" exitCode=1 Nov 23 17:28:02 crc kubenswrapper[5050]: I1123 17:28:02.926584 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-497tw/crc-debug-vhjh2" event={"ID":"146e7726-c2a1-49ed-90a9-125a646490ba","Type":"ContainerDied","Data":"4997ffa4c65ccbe3b178cc39e53f0b6c4c2c4b98ec6f4706ceb271be93ab7417"} Nov 23 17:28:02 crc kubenswrapper[5050]: I1123 17:28:02.986383 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-497tw/crc-debug-vhjh2"] Nov 23 17:28:02 crc kubenswrapper[5050]: I1123 17:28:02.998617 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-497tw/crc-debug-vhjh2"] Nov 23 17:28:04 crc kubenswrapper[5050]: I1123 17:28:04.055151 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-497tw/crc-debug-vhjh2" Nov 23 17:28:04 crc kubenswrapper[5050]: I1123 17:28:04.187149 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fh4cg\" (UniqueName: \"kubernetes.io/projected/146e7726-c2a1-49ed-90a9-125a646490ba-kube-api-access-fh4cg\") pod \"146e7726-c2a1-49ed-90a9-125a646490ba\" (UID: \"146e7726-c2a1-49ed-90a9-125a646490ba\") " Nov 23 17:28:04 crc kubenswrapper[5050]: I1123 17:28:04.187551 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/146e7726-c2a1-49ed-90a9-125a646490ba-host\") pod \"146e7726-c2a1-49ed-90a9-125a646490ba\" (UID: \"146e7726-c2a1-49ed-90a9-125a646490ba\") " Nov 23 17:28:04 crc kubenswrapper[5050]: I1123 17:28:04.187618 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/146e7726-c2a1-49ed-90a9-125a646490ba-host" (OuterVolumeSpecName: "host") pod "146e7726-c2a1-49ed-90a9-125a646490ba" (UID: "146e7726-c2a1-49ed-90a9-125a646490ba"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 23 17:28:04 crc kubenswrapper[5050]: I1123 17:28:04.188245 5050 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/146e7726-c2a1-49ed-90a9-125a646490ba-host\") on node \"crc\" DevicePath \"\"" Nov 23 17:28:04 crc kubenswrapper[5050]: I1123 17:28:04.194909 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/146e7726-c2a1-49ed-90a9-125a646490ba-kube-api-access-fh4cg" (OuterVolumeSpecName: "kube-api-access-fh4cg") pod "146e7726-c2a1-49ed-90a9-125a646490ba" (UID: "146e7726-c2a1-49ed-90a9-125a646490ba"). InnerVolumeSpecName "kube-api-access-fh4cg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:28:04 crc kubenswrapper[5050]: I1123 17:28:04.290980 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fh4cg\" (UniqueName: \"kubernetes.io/projected/146e7726-c2a1-49ed-90a9-125a646490ba-kube-api-access-fh4cg\") on node \"crc\" DevicePath \"\"" Nov 23 17:28:04 crc kubenswrapper[5050]: I1123 17:28:04.984153 5050 scope.go:117] "RemoveContainer" containerID="4997ffa4c65ccbe3b178cc39e53f0b6c4c2c4b98ec6f4706ceb271be93ab7417" Nov 23 17:28:04 crc kubenswrapper[5050]: I1123 17:28:04.985034 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-497tw/crc-debug-vhjh2" Nov 23 17:28:05 crc kubenswrapper[5050]: I1123 17:28:05.566582 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="146e7726-c2a1-49ed-90a9-125a646490ba" path="/var/lib/kubelet/pods/146e7726-c2a1-49ed-90a9-125a646490ba/volumes" Nov 23 17:28:09 crc kubenswrapper[5050]: I1123 17:28:09.551019 5050 scope.go:117] "RemoveContainer" containerID="2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3" Nov 23 17:28:09 crc kubenswrapper[5050]: E1123 17:28:09.552594 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:28:22 crc kubenswrapper[5050]: I1123 17:28:22.549769 5050 scope.go:117] "RemoveContainer" containerID="2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3" Nov 23 17:28:22 crc kubenswrapper[5050]: E1123 17:28:22.550568 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:28:37 crc kubenswrapper[5050]: I1123 17:28:37.550426 5050 scope.go:117] "RemoveContainer" containerID="2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3" Nov 23 17:28:38 crc kubenswrapper[5050]: I1123 17:28:38.434850 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"170f3900bfb04c6dadaf7839b3a6badf07704122b830e42ebf9f1e980b5ea8f4"} Nov 23 17:30:00 crc kubenswrapper[5050]: I1123 17:30:00.229490 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398650-4d2vh"] Nov 23 17:30:00 crc kubenswrapper[5050]: E1123 17:30:00.231084 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="146e7726-c2a1-49ed-90a9-125a646490ba" containerName="container-00" Nov 23 17:30:00 crc kubenswrapper[5050]: I1123 17:30:00.231101 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="146e7726-c2a1-49ed-90a9-125a646490ba" containerName="container-00" Nov 23 17:30:00 crc kubenswrapper[5050]: I1123 17:30:00.231396 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="146e7726-c2a1-49ed-90a9-125a646490ba" containerName="container-00" Nov 23 17:30:00 crc kubenswrapper[5050]: I1123 17:30:00.232588 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398650-4d2vh" Nov 23 17:30:00 crc kubenswrapper[5050]: I1123 17:30:00.235430 5050 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 23 17:30:00 crc kubenswrapper[5050]: I1123 17:30:00.235681 5050 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 23 17:30:00 crc kubenswrapper[5050]: I1123 17:30:00.258959 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398650-4d2vh"] Nov 23 17:30:00 crc kubenswrapper[5050]: I1123 17:30:00.385666 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zjkt\" (UniqueName: \"kubernetes.io/projected/4f145c40-55d4-4ef1-b28f-a1e8afe43363-kube-api-access-9zjkt\") pod \"collect-profiles-29398650-4d2vh\" (UID: \"4f145c40-55d4-4ef1-b28f-a1e8afe43363\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398650-4d2vh" Nov 23 17:30:00 crc kubenswrapper[5050]: I1123 17:30:00.386737 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4f145c40-55d4-4ef1-b28f-a1e8afe43363-config-volume\") pod \"collect-profiles-29398650-4d2vh\" (UID: \"4f145c40-55d4-4ef1-b28f-a1e8afe43363\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398650-4d2vh" Nov 23 17:30:00 crc kubenswrapper[5050]: I1123 17:30:00.387511 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4f145c40-55d4-4ef1-b28f-a1e8afe43363-secret-volume\") pod \"collect-profiles-29398650-4d2vh\" (UID: \"4f145c40-55d4-4ef1-b28f-a1e8afe43363\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398650-4d2vh" Nov 23 17:30:00 crc kubenswrapper[5050]: I1123 17:30:00.490805 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4f145c40-55d4-4ef1-b28f-a1e8afe43363-secret-volume\") pod \"collect-profiles-29398650-4d2vh\" (UID: \"4f145c40-55d4-4ef1-b28f-a1e8afe43363\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398650-4d2vh" Nov 23 17:30:00 crc kubenswrapper[5050]: I1123 17:30:00.490925 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9zjkt\" (UniqueName: \"kubernetes.io/projected/4f145c40-55d4-4ef1-b28f-a1e8afe43363-kube-api-access-9zjkt\") pod \"collect-profiles-29398650-4d2vh\" (UID: \"4f145c40-55d4-4ef1-b28f-a1e8afe43363\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398650-4d2vh" Nov 23 17:30:00 crc kubenswrapper[5050]: I1123 17:30:00.491488 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4f145c40-55d4-4ef1-b28f-a1e8afe43363-config-volume\") pod \"collect-profiles-29398650-4d2vh\" (UID: \"4f145c40-55d4-4ef1-b28f-a1e8afe43363\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398650-4d2vh" Nov 23 17:30:00 crc kubenswrapper[5050]: I1123 17:30:00.492519 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4f145c40-55d4-4ef1-b28f-a1e8afe43363-config-volume\") pod \"collect-profiles-29398650-4d2vh\" (UID: \"4f145c40-55d4-4ef1-b28f-a1e8afe43363\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398650-4d2vh" Nov 23 17:30:00 crc kubenswrapper[5050]: I1123 17:30:00.500497 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4f145c40-55d4-4ef1-b28f-a1e8afe43363-secret-volume\") pod \"collect-profiles-29398650-4d2vh\" (UID: \"4f145c40-55d4-4ef1-b28f-a1e8afe43363\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398650-4d2vh" Nov 23 17:30:00 crc kubenswrapper[5050]: I1123 17:30:00.515565 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zjkt\" (UniqueName: \"kubernetes.io/projected/4f145c40-55d4-4ef1-b28f-a1e8afe43363-kube-api-access-9zjkt\") pod \"collect-profiles-29398650-4d2vh\" (UID: \"4f145c40-55d4-4ef1-b28f-a1e8afe43363\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29398650-4d2vh" Nov 23 17:30:00 crc kubenswrapper[5050]: I1123 17:30:00.573844 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398650-4d2vh" Nov 23 17:30:01 crc kubenswrapper[5050]: I1123 17:30:01.219046 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398650-4d2vh"] Nov 23 17:30:01 crc kubenswrapper[5050]: I1123 17:30:01.564727 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398650-4d2vh" event={"ID":"4f145c40-55d4-4ef1-b28f-a1e8afe43363","Type":"ContainerStarted","Data":"1e78cfa4fad97399f4fc428a76ea8c3ccdc6e7ba7c3ee9415b0c93dfce77344a"} Nov 23 17:30:01 crc kubenswrapper[5050]: I1123 17:30:01.565230 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398650-4d2vh" event={"ID":"4f145c40-55d4-4ef1-b28f-a1e8afe43363","Type":"ContainerStarted","Data":"929fe1693a28756edaaa2d324609fce28c2d201bf741d5067514ca98aaf718d0"} Nov 23 17:30:01 crc kubenswrapper[5050]: I1123 17:30:01.586876 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29398650-4d2vh" podStartSLOduration=1.58684518 podStartE2EDuration="1.58684518s" podCreationTimestamp="2025-11-23 17:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 17:30:01.582113347 +0000 UTC m=+10096.749109882" watchObservedRunningTime="2025-11-23 17:30:01.58684518 +0000 UTC m=+10096.753841675" Nov 23 17:30:02 crc kubenswrapper[5050]: I1123 17:30:02.579825 5050 generic.go:334] "Generic (PLEG): container finished" podID="4f145c40-55d4-4ef1-b28f-a1e8afe43363" containerID="1e78cfa4fad97399f4fc428a76ea8c3ccdc6e7ba7c3ee9415b0c93dfce77344a" exitCode=0 Nov 23 17:30:02 crc kubenswrapper[5050]: I1123 17:30:02.579984 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398650-4d2vh" event={"ID":"4f145c40-55d4-4ef1-b28f-a1e8afe43363","Type":"ContainerDied","Data":"1e78cfa4fad97399f4fc428a76ea8c3ccdc6e7ba7c3ee9415b0c93dfce77344a"} Nov 23 17:30:04 crc kubenswrapper[5050]: I1123 17:30:04.147981 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398650-4d2vh" Nov 23 17:30:04 crc kubenswrapper[5050]: I1123 17:30:04.315343 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4f145c40-55d4-4ef1-b28f-a1e8afe43363-secret-volume\") pod \"4f145c40-55d4-4ef1-b28f-a1e8afe43363\" (UID: \"4f145c40-55d4-4ef1-b28f-a1e8afe43363\") " Nov 23 17:30:04 crc kubenswrapper[5050]: I1123 17:30:04.315570 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9zjkt\" (UniqueName: \"kubernetes.io/projected/4f145c40-55d4-4ef1-b28f-a1e8afe43363-kube-api-access-9zjkt\") pod \"4f145c40-55d4-4ef1-b28f-a1e8afe43363\" (UID: \"4f145c40-55d4-4ef1-b28f-a1e8afe43363\") " Nov 23 17:30:04 crc kubenswrapper[5050]: I1123 17:30:04.315797 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4f145c40-55d4-4ef1-b28f-a1e8afe43363-config-volume\") pod \"4f145c40-55d4-4ef1-b28f-a1e8afe43363\" (UID: \"4f145c40-55d4-4ef1-b28f-a1e8afe43363\") " Nov 23 17:30:04 crc kubenswrapper[5050]: I1123 17:30:04.317383 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4f145c40-55d4-4ef1-b28f-a1e8afe43363-config-volume" (OuterVolumeSpecName: "config-volume") pod "4f145c40-55d4-4ef1-b28f-a1e8afe43363" (UID: "4f145c40-55d4-4ef1-b28f-a1e8afe43363"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 23 17:30:04 crc kubenswrapper[5050]: I1123 17:30:04.324741 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f145c40-55d4-4ef1-b28f-a1e8afe43363-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "4f145c40-55d4-4ef1-b28f-a1e8afe43363" (UID: "4f145c40-55d4-4ef1-b28f-a1e8afe43363"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 23 17:30:04 crc kubenswrapper[5050]: I1123 17:30:04.331741 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f145c40-55d4-4ef1-b28f-a1e8afe43363-kube-api-access-9zjkt" (OuterVolumeSpecName: "kube-api-access-9zjkt") pod "4f145c40-55d4-4ef1-b28f-a1e8afe43363" (UID: "4f145c40-55d4-4ef1-b28f-a1e8afe43363"). InnerVolumeSpecName "kube-api-access-9zjkt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:30:04 crc kubenswrapper[5050]: I1123 17:30:04.420028 5050 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4f145c40-55d4-4ef1-b28f-a1e8afe43363-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 23 17:30:04 crc kubenswrapper[5050]: I1123 17:30:04.420157 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9zjkt\" (UniqueName: \"kubernetes.io/projected/4f145c40-55d4-4ef1-b28f-a1e8afe43363-kube-api-access-9zjkt\") on node \"crc\" DevicePath \"\"" Nov 23 17:30:04 crc kubenswrapper[5050]: I1123 17:30:04.420195 5050 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4f145c40-55d4-4ef1-b28f-a1e8afe43363-config-volume\") on node \"crc\" DevicePath \"\"" Nov 23 17:30:04 crc kubenswrapper[5050]: I1123 17:30:04.616340 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29398650-4d2vh" event={"ID":"4f145c40-55d4-4ef1-b28f-a1e8afe43363","Type":"ContainerDied","Data":"929fe1693a28756edaaa2d324609fce28c2d201bf741d5067514ca98aaf718d0"} Nov 23 17:30:04 crc kubenswrapper[5050]: I1123 17:30:04.617188 5050 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="929fe1693a28756edaaa2d324609fce28c2d201bf741d5067514ca98aaf718d0" Nov 23 17:30:04 crc kubenswrapper[5050]: I1123 17:30:04.616400 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29398650-4d2vh" Nov 23 17:30:04 crc kubenswrapper[5050]: I1123 17:30:04.681953 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398605-zv8dr"] Nov 23 17:30:04 crc kubenswrapper[5050]: I1123 17:30:04.691383 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29398605-zv8dr"] Nov 23 17:30:05 crc kubenswrapper[5050]: I1123 17:30:05.578005 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc" path="/var/lib/kubelet/pods/af3c3024-0b36-4ac3-bbcc-82ecd80d7dfc/volumes" Nov 23 17:30:22 crc kubenswrapper[5050]: I1123 17:30:22.368646 5050 scope.go:117] "RemoveContainer" containerID="80aaff31eae62146d857f1d4a917bbdd70484c35d40e8c821c0e0cc090a79a8b" Nov 23 17:30:59 crc kubenswrapper[5050]: I1123 17:30:59.224976 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 17:30:59 crc kubenswrapper[5050]: I1123 17:30:59.226105 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 17:31:06 crc kubenswrapper[5050]: I1123 17:31:06.718976 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_7181c88f-0ea1-4e1f-bda2-dd7cf4be825a/init-config-reloader/0.log" Nov 23 17:31:06 crc kubenswrapper[5050]: I1123 17:31:06.994528 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_7181c88f-0ea1-4e1f-bda2-dd7cf4be825a/init-config-reloader/0.log" Nov 23 17:31:07 crc kubenswrapper[5050]: I1123 17:31:07.057746 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_7181c88f-0ea1-4e1f-bda2-dd7cf4be825a/alertmanager/0.log" Nov 23 17:31:07 crc kubenswrapper[5050]: I1123 17:31:07.067552 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_7181c88f-0ea1-4e1f-bda2-dd7cf4be825a/config-reloader/0.log" Nov 23 17:31:07 crc kubenswrapper[5050]: I1123 17:31:07.260439 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_5e79d4b3-0fc0-4521-a8a2-f3416c3f4039/aodh-api/0.log" Nov 23 17:31:07 crc kubenswrapper[5050]: I1123 17:31:07.272464 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_5e79d4b3-0fc0-4521-a8a2-f3416c3f4039/aodh-evaluator/0.log" Nov 23 17:31:07 crc kubenswrapper[5050]: I1123 17:31:07.289870 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_5e79d4b3-0fc0-4521-a8a2-f3416c3f4039/aodh-listener/0.log" Nov 23 17:31:07 crc kubenswrapper[5050]: I1123 17:31:07.550318 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-5b7fc4d9bd-wd2cv_4a008c53-3e3a-46ba-8195-46ffae3aaec5/barbican-api/0.log" Nov 23 17:31:07 crc kubenswrapper[5050]: I1123 17:31:07.566657 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_5e79d4b3-0fc0-4521-a8a2-f3416c3f4039/aodh-notifier/0.log" Nov 23 17:31:07 crc kubenswrapper[5050]: I1123 17:31:07.576241 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-5b7fc4d9bd-wd2cv_4a008c53-3e3a-46ba-8195-46ffae3aaec5/barbican-api-log/0.log" Nov 23 17:31:07 crc kubenswrapper[5050]: I1123 17:31:07.775268 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6bd5894c98-7q8qw_b0a3cfa8-999b-4cb5-853b-9877d94b5da4/barbican-keystone-listener/0.log" Nov 23 17:31:07 crc kubenswrapper[5050]: I1123 17:31:07.830218 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6bd5894c98-7q8qw_b0a3cfa8-999b-4cb5-853b-9877d94b5da4/barbican-keystone-listener-log/0.log" Nov 23 17:31:08 crc kubenswrapper[5050]: I1123 17:31:08.031130 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-c95654dc7-5rjvf_bd10d96b-8497-4c0b-afe6-5e2027955ec7/barbican-worker/0.log" Nov 23 17:31:08 crc kubenswrapper[5050]: I1123 17:31:08.067493 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-c95654dc7-5rjvf_bd10d96b-8497-4c0b-afe6-5e2027955ec7/barbican-worker-log/0.log" Nov 23 17:31:08 crc kubenswrapper[5050]: I1123 17:31:08.161977 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-openstack-openstack-cell1-pr9ll_39e4cb1a-f19a-4fd6-aa7d-aae2843166f2/bootstrap-openstack-openstack-cell1/0.log" Nov 23 17:31:08 crc kubenswrapper[5050]: I1123 17:31:08.345361 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_dfb247e9-89fe-4eb2-a573-3797ccad68f8/ceilometer-central-agent/0.log" Nov 23 17:31:08 crc kubenswrapper[5050]: I1123 17:31:08.395699 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_dfb247e9-89fe-4eb2-a573-3797ccad68f8/proxy-httpd/0.log" Nov 23 17:31:08 crc kubenswrapper[5050]: I1123 17:31:08.414287 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_dfb247e9-89fe-4eb2-a573-3797ccad68f8/ceilometer-notification-agent/0.log" Nov 23 17:31:08 crc kubenswrapper[5050]: I1123 17:31:08.646625 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_dfb247e9-89fe-4eb2-a573-3797ccad68f8/sg-core/0.log" Nov 23 17:31:08 crc kubenswrapper[5050]: I1123 17:31:08.717122 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-client-openstack-openstack-cell1-pshzq_03839446-b063-40d2-b17a-af3b1786f3a5/ceph-client-openstack-openstack-cell1/0.log" Nov 23 17:31:09 crc kubenswrapper[5050]: I1123 17:31:09.152649 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_530062c4-a69d-46a1-9508-dfd72558f046/cinder-api/0.log" Nov 23 17:31:09 crc kubenswrapper[5050]: I1123 17:31:09.230834 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_530062c4-a69d-46a1-9508-dfd72558f046/cinder-api-log/0.log" Nov 23 17:31:09 crc kubenswrapper[5050]: I1123 17:31:09.426899 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_ebd99884-0673-4d7b-acf0-077be572599f/cinder-backup/0.log" Nov 23 17:31:09 crc kubenswrapper[5050]: I1123 17:31:09.521939 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_ebd99884-0673-4d7b-acf0-077be572599f/probe/0.log" Nov 23 17:31:09 crc kubenswrapper[5050]: I1123 17:31:09.621697 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_832cbdc7-a2fb-41ba-93fa-342961b53fea/cinder-scheduler/0.log" Nov 23 17:31:09 crc kubenswrapper[5050]: I1123 17:31:09.701052 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_832cbdc7-a2fb-41ba-93fa-342961b53fea/probe/0.log" Nov 23 17:31:09 crc kubenswrapper[5050]: I1123 17:31:09.841580 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_05c9da2b-2e38-45c6-924b-641d802e03fd/cinder-volume/0.log" Nov 23 17:31:09 crc kubenswrapper[5050]: I1123 17:31:09.887570 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_05c9da2b-2e38-45c6-924b-641d802e03fd/probe/0.log" Nov 23 17:31:09 crc kubenswrapper[5050]: I1123 17:31:09.969676 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-openstack-openstack-cell1-qfcbd_10783393-ff0d-420a-9c89-913da2a4d3e0/configure-network-openstack-openstack-cell1/0.log" Nov 23 17:31:10 crc kubenswrapper[5050]: I1123 17:31:10.099227 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-openstack-openstack-cell1-rzgx2_f78c1a10-b5dc-40c8-8a62-f8e3955645cc/configure-os-openstack-openstack-cell1/0.log" Nov 23 17:31:10 crc kubenswrapper[5050]: I1123 17:31:10.237101 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-848d65fcdc-kdfrp_72f188fc-a3d6-4b46-81d0-9f1ed3985d10/init/0.log" Nov 23 17:31:10 crc kubenswrapper[5050]: I1123 17:31:10.447207 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-848d65fcdc-kdfrp_72f188fc-a3d6-4b46-81d0-9f1ed3985d10/init/0.log" Nov 23 17:31:10 crc kubenswrapper[5050]: I1123 17:31:10.535455 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-848d65fcdc-kdfrp_72f188fc-a3d6-4b46-81d0-9f1ed3985d10/dnsmasq-dns/0.log" Nov 23 17:31:10 crc kubenswrapper[5050]: I1123 17:31:10.568624 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-openstack-openstack-cell1-9fl52_a47c80f1-37fb-413d-9c56-0c445f609688/download-cache-openstack-openstack-cell1/0.log" Nov 23 17:31:10 crc kubenswrapper[5050]: I1123 17:31:10.792208 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_6aadf9b9-c5a7-4938-8ba5-85d12ea935f0/glance-httpd/0.log" Nov 23 17:31:10 crc kubenswrapper[5050]: I1123 17:31:10.816917 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_6aadf9b9-c5a7-4938-8ba5-85d12ea935f0/glance-log/0.log" Nov 23 17:31:10 crc kubenswrapper[5050]: I1123 17:31:10.870636 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_679ef9b6-a937-459d-86e7-125151f37744/glance-httpd/0.log" Nov 23 17:31:10 crc kubenswrapper[5050]: I1123 17:31:10.950790 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_679ef9b6-a937-459d-86e7-125151f37744/glance-log/0.log" Nov 23 17:31:11 crc kubenswrapper[5050]: I1123 17:31:11.277664 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-65d549d5cc-6rmg6_55e60cb2-77a0-4ee5-bef3-a3760eb098a7/heat-api/0.log" Nov 23 17:31:11 crc kubenswrapper[5050]: I1123 17:31:11.306233 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-5cb68c5c69-fjmhq_b63b2bec-ce8e-4140-97cc-697d06fba98c/heat-engine/0.log" Nov 23 17:31:11 crc kubenswrapper[5050]: I1123 17:31:11.331835 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-7d69779b5c-bjxtz_03fc802d-a360-46cf-8f71-153e1f2d60f6/heat-cfnapi/0.log" Nov 23 17:31:11 crc kubenswrapper[5050]: I1123 17:31:11.586703 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-58696f4c7c-gzq55_18576c60-9ba2-4cb0-9ae3-9e1d98a71811/horizon-log/0.log" Nov 23 17:31:11 crc kubenswrapper[5050]: I1123 17:31:11.672782 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-openstack-openstack-cell1-nxlp7_2baf454f-86dc-4e4b-9a53-db1eeec9a497/install-certs-openstack-openstack-cell1/0.log" Nov 23 17:31:11 crc kubenswrapper[5050]: I1123 17:31:11.679596 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-58696f4c7c-gzq55_18576c60-9ba2-4cb0-9ae3-9e1d98a71811/horizon/0.log" Nov 23 17:31:11 crc kubenswrapper[5050]: I1123 17:31:11.856549 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-openstack-openstack-cell1-tcjmm_e6f245e8-504a-41a3-9d7d-2abe26a645ba/install-os-openstack-openstack-cell1/0.log" Nov 23 17:31:12 crc kubenswrapper[5050]: I1123 17:31:12.060727 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29398621-bdwd7_054236c5-b5b5-4794-bb9a-f52f18150a9e/keystone-cron/0.log" Nov 23 17:31:12 crc kubenswrapper[5050]: I1123 17:31:12.137000 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-79794bd4b-bfchw_6c8bf1f5-950d-4fa2-b42e-d43554319c11/keystone-api/0.log" Nov 23 17:31:12 crc kubenswrapper[5050]: I1123 17:31:12.268178 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_c0c46824-94a1-4a97-b6a1-c32179c011de/kube-state-metrics/0.log" Nov 23 17:31:12 crc kubenswrapper[5050]: I1123 17:31:12.352026 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-openstack-openstack-cell1-r7z7f_3653bc64-477f-4adc-935a-94f8264ee939/libvirt-openstack-openstack-cell1/0.log" Nov 23 17:31:12 crc kubenswrapper[5050]: I1123 17:31:12.632814 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_b6068f40-9934-40eb-b77b-c497f58e6667/manila-api-log/0.log" Nov 23 17:31:12 crc kubenswrapper[5050]: I1123 17:31:12.667903 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_b6068f40-9934-40eb-b77b-c497f58e6667/manila-api/0.log" Nov 23 17:31:12 crc kubenswrapper[5050]: I1123 17:31:12.720478 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_42ae5f1a-5bf5-480e-b387-f45f211beb62/probe/0.log" Nov 23 17:31:12 crc kubenswrapper[5050]: I1123 17:31:12.732404 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_42ae5f1a-5bf5-480e-b387-f45f211beb62/manila-scheduler/0.log" Nov 23 17:31:12 crc kubenswrapper[5050]: I1123 17:31:12.967481 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_0a92636c-670e-44d8-ba06-c0ad4276da6d/probe/0.log" Nov 23 17:31:12 crc kubenswrapper[5050]: I1123 17:31:12.995832 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_0a92636c-670e-44d8-ba06-c0ad4276da6d/manila-share/0.log" Nov 23 17:31:13 crc kubenswrapper[5050]: I1123 17:31:13.108527 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-copy-data_f8acfa10-0e5b-41f9-9ac1-a35d9108f222/adoption/0.log" Nov 23 17:31:13 crc kubenswrapper[5050]: I1123 17:31:13.493026 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-58b6b75955-wvrqr_f8a537cf-2631-4d9c-8cee-4de81f9e53a4/neutron-httpd/0.log" Nov 23 17:31:13 crc kubenswrapper[5050]: I1123 17:31:13.531730 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-58b6b75955-wvrqr_f8a537cf-2631-4d9c-8cee-4de81f9e53a4/neutron-api/0.log" Nov 23 17:31:13 crc kubenswrapper[5050]: I1123 17:31:13.822704 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-dhcp-openstack-openstack-cell1-q2ptj_9623322c-0a8c-4fbb-8209-51641891664b/neutron-dhcp-openstack-openstack-cell1/0.log" Nov 23 17:31:14 crc kubenswrapper[5050]: I1123 17:31:14.032058 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-openstack-openstack-cell1-zp8dl_1b78d9c7-ae76-471e-a3a5-04939c991c97/neutron-metadata-openstack-openstack-cell1/0.log" Nov 23 17:31:14 crc kubenswrapper[5050]: I1123 17:31:14.268988 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-sriov-openstack-openstack-cell1-rz6t4_7ea51431-6725-4bd0-8f42-7d79d7bb522a/neutron-sriov-openstack-openstack-cell1/0.log" Nov 23 17:31:14 crc kubenswrapper[5050]: I1123 17:31:14.421208 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_794b257f-ba4a-49d5-ab39-1137fbea011f/nova-api-api/0.log" Nov 23 17:31:15 crc kubenswrapper[5050]: I1123 17:31:15.271816 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_8f807c69-cd14-40a1-9dd1-c50271693387/nova-cell0-conductor-conductor/0.log" Nov 23 17:31:15 crc kubenswrapper[5050]: I1123 17:31:15.284217 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_794b257f-ba4a-49d5-ab39-1137fbea011f/nova-api-log/0.log" Nov 23 17:31:15 crc kubenswrapper[5050]: I1123 17:31:15.502503 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_6563762b-f9aa-4af7-a549-2f0d767f3b46/nova-cell1-conductor-conductor/0.log" Nov 23 17:31:15 crc kubenswrapper[5050]: I1123 17:31:15.685975 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_af6314fe-5ec9-4fbe-a1fb-dc2c4adfa1fa/nova-cell1-novncproxy-novncproxy/0.log" Nov 23 17:31:15 crc kubenswrapper[5050]: I1123 17:31:15.890987 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellfnhdj_ef9265cb-a8fa-4d93-9cb5-a7a94f62fcdd/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1/0.log" Nov 23 17:31:15 crc kubenswrapper[5050]: I1123 17:31:15.958570 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-openstack-openstack-cell1-28qz2_c83fc0ae-3d5e-4abe-9654-df9afd83c9a0/nova-cell1-openstack-openstack-cell1/0.log" Nov 23 17:31:16 crc kubenswrapper[5050]: I1123 17:31:16.202575 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91/nova-metadata-log/0.log" Nov 23 17:31:16 crc kubenswrapper[5050]: I1123 17:31:16.232143 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_fd0c3ccb-28f2-47e9-8c4b-bace00fc0b91/nova-metadata-metadata/0.log" Nov 23 17:31:16 crc kubenswrapper[5050]: I1123 17:31:16.387921 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_20e24247-59dd-42ad-ab7e-7347775043ad/nova-scheduler-scheduler/0.log" Nov 23 17:31:16 crc kubenswrapper[5050]: I1123 17:31:16.506384 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-78c74877d6-ndfk4_45c153e5-8ce2-49a3-8476-eb9cb8a406ab/init/0.log" Nov 23 17:31:17 crc kubenswrapper[5050]: I1123 17:31:17.324276 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-78c74877d6-ndfk4_45c153e5-8ce2-49a3-8476-eb9cb8a406ab/octavia-api-provider-agent/0.log" Nov 23 17:31:17 crc kubenswrapper[5050]: I1123 17:31:17.459879 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-78c74877d6-ndfk4_45c153e5-8ce2-49a3-8476-eb9cb8a406ab/init/0.log" Nov 23 17:31:17 crc kubenswrapper[5050]: I1123 17:31:17.543886 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-healthmanager-z4gjs_4123706e-1979-4f48-96d7-bd5b53ebd04c/init/0.log" Nov 23 17:31:17 crc kubenswrapper[5050]: I1123 17:31:17.620434 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-78c74877d6-ndfk4_45c153e5-8ce2-49a3-8476-eb9cb8a406ab/octavia-api/0.log" Nov 23 17:31:17 crc kubenswrapper[5050]: I1123 17:31:17.906228 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-healthmanager-z4gjs_4123706e-1979-4f48-96d7-bd5b53ebd04c/init/0.log" Nov 23 17:31:17 crc kubenswrapper[5050]: I1123 17:31:17.971298 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-healthmanager-z4gjs_4123706e-1979-4f48-96d7-bd5b53ebd04c/octavia-healthmanager/0.log" Nov 23 17:31:18 crc kubenswrapper[5050]: I1123 17:31:18.071898 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-housekeeping-96ckg_31208d48-e815-461a-b520-436b71bc90ce/init/0.log" Nov 23 17:31:18 crc kubenswrapper[5050]: I1123 17:31:18.108204 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-housekeeping-96ckg_31208d48-e815-461a-b520-436b71bc90ce/init/0.log" Nov 23 17:31:18 crc kubenswrapper[5050]: I1123 17:31:18.267983 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-housekeeping-96ckg_31208d48-e815-461a-b520-436b71bc90ce/octavia-housekeeping/0.log" Nov 23 17:31:18 crc kubenswrapper[5050]: I1123 17:31:18.327379 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-image-upload-59f8cff499-wlqsk_33b0e351-a0e3-4bae-abe7-080ac3620f92/init/0.log" Nov 23 17:31:18 crc kubenswrapper[5050]: I1123 17:31:18.557823 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-image-upload-59f8cff499-wlqsk_33b0e351-a0e3-4bae-abe7-080ac3620f92/init/0.log" Nov 23 17:31:18 crc kubenswrapper[5050]: I1123 17:31:18.583064 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-image-upload-59f8cff499-wlqsk_33b0e351-a0e3-4bae-abe7-080ac3620f92/octavia-amphora-httpd/0.log" Nov 23 17:31:18 crc kubenswrapper[5050]: I1123 17:31:18.693851 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-rsyslog-rl599_3a52da4e-bd4e-4faa-99d2-944528a2f797/init/0.log" Nov 23 17:31:18 crc kubenswrapper[5050]: I1123 17:31:18.948553 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-worker-gtlmz_8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d/init/0.log" Nov 23 17:31:18 crc kubenswrapper[5050]: I1123 17:31:18.949795 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-rsyslog-rl599_3a52da4e-bd4e-4faa-99d2-944528a2f797/octavia-rsyslog/0.log" Nov 23 17:31:18 crc kubenswrapper[5050]: I1123 17:31:18.968139 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-rsyslog-rl599_3a52da4e-bd4e-4faa-99d2-944528a2f797/init/0.log" Nov 23 17:31:19 crc kubenswrapper[5050]: I1123 17:31:19.223178 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-worker-gtlmz_8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d/init/0.log" Nov 23 17:31:19 crc kubenswrapper[5050]: I1123 17:31:19.247708 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_fd4aea72-077d-4db3-85ee-3ecf19b1f7a9/mysql-bootstrap/0.log" Nov 23 17:31:19 crc kubenswrapper[5050]: I1123 17:31:19.496350 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-worker-gtlmz_8cf1b9f8-28c6-4b6e-b268-cfe4dee35d9d/octavia-worker/0.log" Nov 23 17:31:19 crc kubenswrapper[5050]: I1123 17:31:19.548317 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_fd4aea72-077d-4db3-85ee-3ecf19b1f7a9/mysql-bootstrap/0.log" Nov 23 17:31:19 crc kubenswrapper[5050]: I1123 17:31:19.549325 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_fd4aea72-077d-4db3-85ee-3ecf19b1f7a9/galera/0.log" Nov 23 17:31:19 crc kubenswrapper[5050]: I1123 17:31:19.783938 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511/mysql-bootstrap/0.log" Nov 23 17:31:19 crc kubenswrapper[5050]: I1123 17:31:19.972087 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511/mysql-bootstrap/0.log" Nov 23 17:31:20 crc kubenswrapper[5050]: I1123 17:31:20.017354 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_dd8bde11-d7b2-4bb6-beb2-89f01031730f/openstackclient/0.log" Nov 23 17:31:20 crc kubenswrapper[5050]: I1123 17:31:20.030027 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_4ec299fe-6b4a-4fe5-a401-ebc1d8dc8511/galera/0.log" Nov 23 17:31:20 crc kubenswrapper[5050]: I1123 17:31:20.275168 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-jzkdv_67fb633c-03ee-4b5f-acd1-930b6112cd19/ovn-controller/0.log" Nov 23 17:31:20 crc kubenswrapper[5050]: I1123 17:31:20.345404 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-j6l2h_df823f17-83c9-444e-8e5e-610dd679a890/openstack-network-exporter/0.log" Nov 23 17:31:20 crc kubenswrapper[5050]: I1123 17:31:20.777147 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-4r95w_9f33288a-10e9-4c41-878f-554f583430a1/ovsdb-server-init/0.log" Nov 23 17:31:20 crc kubenswrapper[5050]: I1123 17:31:20.989957 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-4r95w_9f33288a-10e9-4c41-878f-554f583430a1/ovsdb-server-init/0.log" Nov 23 17:31:20 crc kubenswrapper[5050]: I1123 17:31:20.998408 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-4r95w_9f33288a-10e9-4c41-878f-554f583430a1/ovsdb-server/0.log" Nov 23 17:31:21 crc kubenswrapper[5050]: I1123 17:31:21.075409 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-4r95w_9f33288a-10e9-4c41-878f-554f583430a1/ovs-vswitchd/0.log" Nov 23 17:31:21 crc kubenswrapper[5050]: I1123 17:31:21.264705 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-copy-data_2035442b-ccf1-4fb8-8918-4d459a27d992/adoption/0.log" Nov 23 17:31:21 crc kubenswrapper[5050]: I1123 17:31:21.339003 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_ab512517-dd97-4fc7-a923-4e2fc21a9af8/openstack-network-exporter/0.log" Nov 23 17:31:21 crc kubenswrapper[5050]: I1123 17:31:21.398055 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_ab512517-dd97-4fc7-a923-4e2fc21a9af8/ovn-northd/0.log" Nov 23 17:31:21 crc kubenswrapper[5050]: I1123 17:31:21.681851 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_e42bfb74-79c5-4710-8d55-33da1c765d8c/openstack-network-exporter/0.log" Nov 23 17:31:21 crc kubenswrapper[5050]: I1123 17:31:21.690023 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-openstack-openstack-cell1-f6s64_53184db4-ff0a-431e-bbbc-cc6295558c83/ovn-openstack-openstack-cell1/0.log" Nov 23 17:31:21 crc kubenswrapper[5050]: I1123 17:31:21.828635 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_e42bfb74-79c5-4710-8d55-33da1c765d8c/ovsdbserver-nb/0.log" Nov 23 17:31:21 crc kubenswrapper[5050]: I1123 17:31:21.965551 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-1_97bef3ec-2ce6-4a13-bbc4-d08d97618e29/openstack-network-exporter/0.log" Nov 23 17:31:21 crc kubenswrapper[5050]: I1123 17:31:21.972314 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-1_97bef3ec-2ce6-4a13-bbc4-d08d97618e29/ovsdbserver-nb/0.log" Nov 23 17:31:22 crc kubenswrapper[5050]: I1123 17:31:22.185469 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-2_bafc2ad6-91da-4cd1-b4e3-4618db95cc11/openstack-network-exporter/0.log" Nov 23 17:31:22 crc kubenswrapper[5050]: I1123 17:31:22.230509 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-2_bafc2ad6-91da-4cd1-b4e3-4618db95cc11/ovsdbserver-nb/0.log" Nov 23 17:31:22 crc kubenswrapper[5050]: I1123 17:31:22.451858 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_63882070-14a9-44ab-9298-11a518e0ee25/openstack-network-exporter/0.log" Nov 23 17:31:22 crc kubenswrapper[5050]: I1123 17:31:22.455142 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_63882070-14a9-44ab-9298-11a518e0ee25/ovsdbserver-sb/0.log" Nov 23 17:31:22 crc kubenswrapper[5050]: I1123 17:31:22.581760 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-1_24e76718-a290-4dc2-a5eb-5a0c156ddf42/openstack-network-exporter/0.log" Nov 23 17:31:22 crc kubenswrapper[5050]: I1123 17:31:22.682100 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-1_24e76718-a290-4dc2-a5eb-5a0c156ddf42/ovsdbserver-sb/0.log" Nov 23 17:31:22 crc kubenswrapper[5050]: I1123 17:31:22.775170 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-2_5c40394e-5b93-4c3e-876f-d9cbb18b1bf2/openstack-network-exporter/0.log" Nov 23 17:31:22 crc kubenswrapper[5050]: I1123 17:31:22.804987 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-2_5c40394e-5b93-4c3e-876f-d9cbb18b1bf2/ovsdbserver-sb/0.log" Nov 23 17:31:23 crc kubenswrapper[5050]: I1123 17:31:23.066929 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-678f8c5968-b8ddf_70eba0ae-5c52-4839-a886-3d2a8d989ab5/placement-api/0.log" Nov 23 17:31:23 crc kubenswrapper[5050]: I1123 17:31:23.086361 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-678f8c5968-b8ddf_70eba0ae-5c52-4839-a886-3d2a8d989ab5/placement-log/0.log" Nov 23 17:31:23 crc kubenswrapper[5050]: I1123 17:31:23.218241 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_pre-adoption-validation-openstack-pre-adoption-openstack-cf767s_a4749617-bd73-4ca8-9008-124343133ee9/pre-adoption-validation-openstack-pre-adoption-openstack-cell1/0.log" Nov 23 17:31:23 crc kubenswrapper[5050]: I1123 17:31:23.350629 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_98375fca-7e96-48ce-acc5-3abd00f043a1/init-config-reloader/0.log" Nov 23 17:31:23 crc kubenswrapper[5050]: I1123 17:31:23.680970 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_98375fca-7e96-48ce-acc5-3abd00f043a1/init-config-reloader/0.log" Nov 23 17:31:23 crc kubenswrapper[5050]: I1123 17:31:23.682313 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_98375fca-7e96-48ce-acc5-3abd00f043a1/config-reloader/0.log" Nov 23 17:31:23 crc kubenswrapper[5050]: I1123 17:31:23.702899 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_98375fca-7e96-48ce-acc5-3abd00f043a1/prometheus/0.log" Nov 23 17:31:23 crc kubenswrapper[5050]: I1123 17:31:23.779574 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_98375fca-7e96-48ce-acc5-3abd00f043a1/thanos-sidecar/0.log" Nov 23 17:31:23 crc kubenswrapper[5050]: I1123 17:31:23.923809 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_f2c92766-b409-46b5-a029-6f55fb430e89/setup-container/0.log" Nov 23 17:31:24 crc kubenswrapper[5050]: I1123 17:31:24.136990 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_f2c92766-b409-46b5-a029-6f55fb430e89/setup-container/0.log" Nov 23 17:31:24 crc kubenswrapper[5050]: I1123 17:31:24.178253 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_f2c92766-b409-46b5-a029-6f55fb430e89/rabbitmq/0.log" Nov 23 17:31:24 crc kubenswrapper[5050]: I1123 17:31:24.229223 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_a1ca158f-7aa6-4339-a321-81831457d2b0/memcached/0.log" Nov 23 17:31:24 crc kubenswrapper[5050]: I1123 17:31:24.274939 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_624f99c9-a404-4437-9a6f-835971760d52/setup-container/0.log" Nov 23 17:31:24 crc kubenswrapper[5050]: I1123 17:31:24.441315 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_624f99c9-a404-4437-9a6f-835971760d52/setup-container/0.log" Nov 23 17:31:24 crc kubenswrapper[5050]: I1123 17:31:24.473572 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_624f99c9-a404-4437-9a6f-835971760d52/rabbitmq/0.log" Nov 23 17:31:24 crc kubenswrapper[5050]: I1123 17:31:24.526280 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-openstack-openstack-cell1-q4pmx_98cb76c1-7d81-491f-bd07-037808d66926/reboot-os-openstack-openstack-cell1/0.log" Nov 23 17:31:24 crc kubenswrapper[5050]: I1123 17:31:24.636159 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-openstack-openstack-cell1-mx2l6_12cb88f4-57df-468f-b59a-5615d496ee52/run-os-openstack-openstack-cell1/0.log" Nov 23 17:31:24 crc kubenswrapper[5050]: I1123 17:31:24.751314 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-openstack-s6dvt_8beee0e1-ccc1-48f2-83ae-a59422187fac/ssh-known-hosts-openstack/0.log" Nov 23 17:31:24 crc kubenswrapper[5050]: I1123 17:31:24.899603 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-openstack-openstack-cell1-7rf78_7c29377d-9bd8-4eb5-8777-3ae34021588e/telemetry-openstack-openstack-cell1/0.log" Nov 23 17:31:25 crc kubenswrapper[5050]: I1123 17:31:25.128793 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-openstack-openstack-cell1-v8qw8_57262ba8-76c9-4627-a4f9-ba993e7e8ff8/validate-network-openstack-openstack-cell1/0.log" Nov 23 17:31:25 crc kubenswrapper[5050]: I1123 17:31:25.379702 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tripleo-cleanup-tripleo-cleanup-openstack-cell1-c9xw7_2c88cdbf-1f63-4305-a672-8cc67f06fa66/tripleo-cleanup-tripleo-cleanup-openstack-cell1/0.log" Nov 23 17:31:29 crc kubenswrapper[5050]: I1123 17:31:29.224794 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 17:31:29 crc kubenswrapper[5050]: I1123 17:31:29.225770 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 17:31:52 crc kubenswrapper[5050]: I1123 17:31:52.661329 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-75fb479bcc-2qdnf_e4ff3d33-3a5f-4ba3-ae25-308b7ad6d389/kube-rbac-proxy/0.log" Nov 23 17:31:52 crc kubenswrapper[5050]: I1123 17:31:52.820402 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-75fb479bcc-2qdnf_e4ff3d33-3a5f-4ba3-ae25-308b7ad6d389/manager/0.log" Nov 23 17:31:53 crc kubenswrapper[5050]: I1123 17:31:53.422397 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6498cbf48f-h5gcg_b5fbd22d-ccfc-4a42-8b4b-f6566d36a8c1/manager/0.log" Nov 23 17:31:53 crc kubenswrapper[5050]: I1123 17:31:53.441281 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-767ccfd65f-cp2sk_0f64e767-a4c5-4f8c-9dc9-0c09328b5d83/kube-rbac-proxy/0.log" Nov 23 17:31:53 crc kubenswrapper[5050]: I1123 17:31:53.452177 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6498cbf48f-h5gcg_b5fbd22d-ccfc-4a42-8b4b-f6566d36a8c1/kube-rbac-proxy/0.log" Nov 23 17:31:53 crc kubenswrapper[5050]: I1123 17:31:53.612463 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-767ccfd65f-cp2sk_0f64e767-a4c5-4f8c-9dc9-0c09328b5d83/manager/0.log" Nov 23 17:31:53 crc kubenswrapper[5050]: I1123 17:31:53.751715 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh_ad085e2d-80ec-4ee9-a7af-6eb49955b26f/util/0.log" Nov 23 17:31:53 crc kubenswrapper[5050]: I1123 17:31:53.927150 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh_ad085e2d-80ec-4ee9-a7af-6eb49955b26f/util/0.log" Nov 23 17:31:53 crc kubenswrapper[5050]: I1123 17:31:53.978503 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh_ad085e2d-80ec-4ee9-a7af-6eb49955b26f/pull/0.log" Nov 23 17:31:54 crc kubenswrapper[5050]: I1123 17:31:54.006822 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh_ad085e2d-80ec-4ee9-a7af-6eb49955b26f/pull/0.log" Nov 23 17:31:54 crc kubenswrapper[5050]: I1123 17:31:54.152998 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh_ad085e2d-80ec-4ee9-a7af-6eb49955b26f/util/0.log" Nov 23 17:31:54 crc kubenswrapper[5050]: I1123 17:31:54.206958 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh_ad085e2d-80ec-4ee9-a7af-6eb49955b26f/pull/0.log" Nov 23 17:31:54 crc kubenswrapper[5050]: I1123 17:31:54.268143 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_e27345db6007af157f9c9720a7d718ea21605eb25c2fa6eb354135e7a5tshbh_ad085e2d-80ec-4ee9-a7af-6eb49955b26f/extract/0.log" Nov 23 17:31:54 crc kubenswrapper[5050]: I1123 17:31:54.387686 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-7969689c84-r5lpg_8f0ad77e-339a-4a2e-a700-c52790877f4c/kube-rbac-proxy/0.log" Nov 23 17:31:54 crc kubenswrapper[5050]: I1123 17:31:54.575063 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-56f54d6746-g2g7z_96580238-85b0-481c-91f2-be0888975fb7/kube-rbac-proxy/0.log" Nov 23 17:31:54 crc kubenswrapper[5050]: I1123 17:31:54.582387 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-7969689c84-r5lpg_8f0ad77e-339a-4a2e-a700-c52790877f4c/manager/0.log" Nov 23 17:31:55 crc kubenswrapper[5050]: I1123 17:31:55.296387 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-56f54d6746-g2g7z_96580238-85b0-481c-91f2-be0888975fb7/manager/0.log" Nov 23 17:31:55 crc kubenswrapper[5050]: I1123 17:31:55.339475 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-598f69df5d-t4m5g_17b553bf-a9b6-4807-be93-845263e46bda/kube-rbac-proxy/0.log" Nov 23 17:31:55 crc kubenswrapper[5050]: I1123 17:31:55.374904 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-598f69df5d-t4m5g_17b553bf-a9b6-4807-be93-845263e46bda/manager/0.log" Nov 23 17:31:55 crc kubenswrapper[5050]: I1123 17:31:55.570591 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6dd8864d7c-pc65p_09386696-43fd-4bd4-9afd-bba22b85c546/kube-rbac-proxy/0.log" Nov 23 17:31:55 crc kubenswrapper[5050]: I1123 17:31:55.822352 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-99b499f4-4p57r_97c24c4c-3ca8-4c5b-8b81-a64edf49b0bb/kube-rbac-proxy/0.log" Nov 23 17:31:55 crc kubenswrapper[5050]: I1123 17:31:55.901871 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6dd8864d7c-pc65p_09386696-43fd-4bd4-9afd-bba22b85c546/manager/0.log" Nov 23 17:31:55 crc kubenswrapper[5050]: I1123 17:31:55.906780 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-99b499f4-4p57r_97c24c4c-3ca8-4c5b-8b81-a64edf49b0bb/manager/0.log" Nov 23 17:31:56 crc kubenswrapper[5050]: I1123 17:31:56.038216 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7454b96578-spv2t_e9c42c8d-d147-4b90-bcb4-5b2771121504/kube-rbac-proxy/0.log" Nov 23 17:31:56 crc kubenswrapper[5050]: I1123 17:31:56.155989 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58f887965d-4856d_ef8b813f-cf8f-46a0-ad63-259d5aa2dbb2/kube-rbac-proxy/0.log" Nov 23 17:31:56 crc kubenswrapper[5050]: I1123 17:31:56.288706 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58f887965d-4856d_ef8b813f-cf8f-46a0-ad63-259d5aa2dbb2/manager/0.log" Nov 23 17:31:56 crc kubenswrapper[5050]: I1123 17:31:56.342874 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7454b96578-spv2t_e9c42c8d-d147-4b90-bcb4-5b2771121504/manager/0.log" Nov 23 17:31:56 crc kubenswrapper[5050]: I1123 17:31:56.401333 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-54b5986bb8-8tnhz_1b42304a-8ccd-4015-9e74-295ea7abd0fd/manager/0.log" Nov 23 17:31:56 crc kubenswrapper[5050]: I1123 17:31:56.423915 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-54b5986bb8-8tnhz_1b42304a-8ccd-4015-9e74-295ea7abd0fd/kube-rbac-proxy/0.log" Nov 23 17:31:56 crc kubenswrapper[5050]: I1123 17:31:56.571800 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-78bd47f458-nqlmd_8c522f94-22c6-4569-a62b-18f9ed2f3b3f/kube-rbac-proxy/0.log" Nov 23 17:31:56 crc kubenswrapper[5050]: I1123 17:31:56.631100 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-78bd47f458-nqlmd_8c522f94-22c6-4569-a62b-18f9ed2f3b3f/manager/0.log" Nov 23 17:31:56 crc kubenswrapper[5050]: I1123 17:31:56.683944 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-cfbb9c588-d9m8q_d54a2a99-ffe4-422e-bbae-54371be5a57e/kube-rbac-proxy/0.log" Nov 23 17:31:56 crc kubenswrapper[5050]: I1123 17:31:56.933896 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-54cfbf4c7d-mzmmd_b886cbd7-ef95-46b5-b817-d041b5b96cec/kube-rbac-proxy/0.log" Nov 23 17:31:56 crc kubenswrapper[5050]: I1123 17:31:56.982307 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-cfbb9c588-d9m8q_d54a2a99-ffe4-422e-bbae-54371be5a57e/manager/0.log" Nov 23 17:31:57 crc kubenswrapper[5050]: I1123 17:31:57.052878 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-54cfbf4c7d-mzmmd_b886cbd7-ef95-46b5-b817-d041b5b96cec/manager/0.log" Nov 23 17:31:57 crc kubenswrapper[5050]: I1123 17:31:57.139505 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-8c7444f48-zksdq_9483bc3e-409c-426c-a0ea-34b75543ac3c/kube-rbac-proxy/0.log" Nov 23 17:31:57 crc kubenswrapper[5050]: I1123 17:31:57.461737 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-669b8498dc-87h5k_68864336-7cb2-4deb-9ef9-159d78b04fc1/kube-rbac-proxy/0.log" Nov 23 17:31:57 crc kubenswrapper[5050]: I1123 17:31:57.484777 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-8c7444f48-zksdq_9483bc3e-409c-426c-a0ea-34b75543ac3c/manager/0.log" Nov 23 17:31:57 crc kubenswrapper[5050]: I1123 17:31:57.697289 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-597d69585c-7b9fh_0db89f87-ca55-4247-977c-f40ffe5ade4c/kube-rbac-proxy/0.log" Nov 23 17:31:57 crc kubenswrapper[5050]: I1123 17:31:57.951022 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-gp5f8_5c4bbe08-f458-46ae-bcbd-7457e59ffc18/registry-server/0.log" Nov 23 17:31:57 crc kubenswrapper[5050]: I1123 17:31:57.994824 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-597d69585c-7b9fh_0db89f87-ca55-4247-977c-f40ffe5ade4c/operator/0.log" Nov 23 17:31:58 crc kubenswrapper[5050]: I1123 17:31:58.048121 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-54fc5f65b7-k9c9p_52c3e5f3-3fee-4095-97f2-e7260eb08d3d/kube-rbac-proxy/0.log" Nov 23 17:31:58 crc kubenswrapper[5050]: I1123 17:31:58.256224 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5b797b8dff-xc9s4_5de89bc7-53c3-4bf9-a791-b9b0b6a30603/kube-rbac-proxy/0.log" Nov 23 17:31:58 crc kubenswrapper[5050]: I1123 17:31:58.384130 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-54fc5f65b7-k9c9p_52c3e5f3-3fee-4095-97f2-e7260eb08d3d/manager/0.log" Nov 23 17:31:58 crc kubenswrapper[5050]: I1123 17:31:58.390596 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5b797b8dff-xc9s4_5de89bc7-53c3-4bf9-a791-b9b0b6a30603/manager/0.log" Nov 23 17:31:58 crc kubenswrapper[5050]: I1123 17:31:58.553312 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-k4khg_14e92b7f-abf0-4e72-b7b0-ff9b8d1e199b/operator/0.log" Nov 23 17:31:58 crc kubenswrapper[5050]: I1123 17:31:58.770271 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d656998f4-fjsvj_c937d003-eeeb-49db-b2fe-8bc1ffde5f94/kube-rbac-proxy/0.log" Nov 23 17:31:58 crc kubenswrapper[5050]: I1123 17:31:58.916999 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d656998f4-fjsvj_c937d003-eeeb-49db-b2fe-8bc1ffde5f94/manager/0.log" Nov 23 17:31:58 crc kubenswrapper[5050]: I1123 17:31:58.950015 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-6d4bf84b58-8wkkq_78533aec-d2a4-4691-8960-a5ca3abd34a8/kube-rbac-proxy/0.log" Nov 23 17:31:59 crc kubenswrapper[5050]: I1123 17:31:59.196383 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-b4c496f69-zdcb4_4b036715-70f2-493c-9e9c-27228473af65/kube-rbac-proxy/0.log" Nov 23 17:31:59 crc kubenswrapper[5050]: I1123 17:31:59.224325 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 17:31:59 crc kubenswrapper[5050]: I1123 17:31:59.224390 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 17:31:59 crc kubenswrapper[5050]: I1123 17:31:59.224465 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 17:31:59 crc kubenswrapper[5050]: I1123 17:31:59.226760 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"170f3900bfb04c6dadaf7839b3a6badf07704122b830e42ebf9f1e980b5ea8f4"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 17:31:59 crc kubenswrapper[5050]: I1123 17:31:59.226828 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://170f3900bfb04c6dadaf7839b3a6badf07704122b830e42ebf9f1e980b5ea8f4" gracePeriod=600 Nov 23 17:31:59 crc kubenswrapper[5050]: I1123 17:31:59.285307 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-b4c496f69-zdcb4_4b036715-70f2-493c-9e9c-27228473af65/manager/0.log" Nov 23 17:31:59 crc kubenswrapper[5050]: I1123 17:31:59.354406 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-6d4bf84b58-8wkkq_78533aec-d2a4-4691-8960-a5ca3abd34a8/manager/0.log" Nov 23 17:31:59 crc kubenswrapper[5050]: I1123 17:31:59.539158 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-8c6448b9f-m7f26_9e9d6f21-c5ab-438c-99ee-433f1eaa9093/kube-rbac-proxy/0.log" Nov 23 17:31:59 crc kubenswrapper[5050]: I1123 17:31:59.644868 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-8c6448b9f-m7f26_9e9d6f21-c5ab-438c-99ee-433f1eaa9093/manager/0.log" Nov 23 17:31:59 crc kubenswrapper[5050]: I1123 17:31:59.826628 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-669b8498dc-87h5k_68864336-7cb2-4deb-9ef9-159d78b04fc1/manager/0.log" Nov 23 17:32:00 crc kubenswrapper[5050]: I1123 17:32:00.216007 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="170f3900bfb04c6dadaf7839b3a6badf07704122b830e42ebf9f1e980b5ea8f4" exitCode=0 Nov 23 17:32:00 crc kubenswrapper[5050]: I1123 17:32:00.216233 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"170f3900bfb04c6dadaf7839b3a6badf07704122b830e42ebf9f1e980b5ea8f4"} Nov 23 17:32:00 crc kubenswrapper[5050]: I1123 17:32:00.216521 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c"} Nov 23 17:32:00 crc kubenswrapper[5050]: I1123 17:32:00.216554 5050 scope.go:117] "RemoveContainer" containerID="2bfde5e08e12dfda92d97543906fc7b88ff567a0a0bd2d168349eaebb48d00e3" Nov 23 17:32:20 crc kubenswrapper[5050]: I1123 17:32:20.319717 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-v4fb4"] Nov 23 17:32:20 crc kubenswrapper[5050]: E1123 17:32:20.321273 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f145c40-55d4-4ef1-b28f-a1e8afe43363" containerName="collect-profiles" Nov 23 17:32:20 crc kubenswrapper[5050]: I1123 17:32:20.321289 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f145c40-55d4-4ef1-b28f-a1e8afe43363" containerName="collect-profiles" Nov 23 17:32:20 crc kubenswrapper[5050]: I1123 17:32:20.321619 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f145c40-55d4-4ef1-b28f-a1e8afe43363" containerName="collect-profiles" Nov 23 17:32:20 crc kubenswrapper[5050]: I1123 17:32:20.323488 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-v4fb4" Nov 23 17:32:20 crc kubenswrapper[5050]: I1123 17:32:20.347360 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-v4fb4"] Nov 23 17:32:20 crc kubenswrapper[5050]: I1123 17:32:20.481766 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njl66\" (UniqueName: \"kubernetes.io/projected/f55be819-bd7a-42aa-b1ef-d4a3f44c8599-kube-api-access-njl66\") pod \"community-operators-v4fb4\" (UID: \"f55be819-bd7a-42aa-b1ef-d4a3f44c8599\") " pod="openshift-marketplace/community-operators-v4fb4" Nov 23 17:32:20 crc kubenswrapper[5050]: I1123 17:32:20.481857 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f55be819-bd7a-42aa-b1ef-d4a3f44c8599-catalog-content\") pod \"community-operators-v4fb4\" (UID: \"f55be819-bd7a-42aa-b1ef-d4a3f44c8599\") " pod="openshift-marketplace/community-operators-v4fb4" Nov 23 17:32:20 crc kubenswrapper[5050]: I1123 17:32:20.482024 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f55be819-bd7a-42aa-b1ef-d4a3f44c8599-utilities\") pod \"community-operators-v4fb4\" (UID: \"f55be819-bd7a-42aa-b1ef-d4a3f44c8599\") " pod="openshift-marketplace/community-operators-v4fb4" Nov 23 17:32:20 crc kubenswrapper[5050]: I1123 17:32:20.584514 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f55be819-bd7a-42aa-b1ef-d4a3f44c8599-catalog-content\") pod \"community-operators-v4fb4\" (UID: \"f55be819-bd7a-42aa-b1ef-d4a3f44c8599\") " pod="openshift-marketplace/community-operators-v4fb4" Nov 23 17:32:20 crc kubenswrapper[5050]: I1123 17:32:20.584990 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f55be819-bd7a-42aa-b1ef-d4a3f44c8599-utilities\") pod \"community-operators-v4fb4\" (UID: \"f55be819-bd7a-42aa-b1ef-d4a3f44c8599\") " pod="openshift-marketplace/community-operators-v4fb4" Nov 23 17:32:20 crc kubenswrapper[5050]: I1123 17:32:20.585114 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njl66\" (UniqueName: \"kubernetes.io/projected/f55be819-bd7a-42aa-b1ef-d4a3f44c8599-kube-api-access-njl66\") pod \"community-operators-v4fb4\" (UID: \"f55be819-bd7a-42aa-b1ef-d4a3f44c8599\") " pod="openshift-marketplace/community-operators-v4fb4" Nov 23 17:32:20 crc kubenswrapper[5050]: I1123 17:32:20.585555 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f55be819-bd7a-42aa-b1ef-d4a3f44c8599-catalog-content\") pod \"community-operators-v4fb4\" (UID: \"f55be819-bd7a-42aa-b1ef-d4a3f44c8599\") " pod="openshift-marketplace/community-operators-v4fb4" Nov 23 17:32:20 crc kubenswrapper[5050]: I1123 17:32:20.585577 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f55be819-bd7a-42aa-b1ef-d4a3f44c8599-utilities\") pod \"community-operators-v4fb4\" (UID: \"f55be819-bd7a-42aa-b1ef-d4a3f44c8599\") " pod="openshift-marketplace/community-operators-v4fb4" Nov 23 17:32:20 crc kubenswrapper[5050]: I1123 17:32:20.610768 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njl66\" (UniqueName: \"kubernetes.io/projected/f55be819-bd7a-42aa-b1ef-d4a3f44c8599-kube-api-access-njl66\") pod \"community-operators-v4fb4\" (UID: \"f55be819-bd7a-42aa-b1ef-d4a3f44c8599\") " pod="openshift-marketplace/community-operators-v4fb4" Nov 23 17:32:20 crc kubenswrapper[5050]: I1123 17:32:20.660113 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-v4fb4" Nov 23 17:32:20 crc kubenswrapper[5050]: I1123 17:32:20.713766 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-tcs6j_400aa1f3-7e05-407e-90dc-ad2e99f54e61/control-plane-machine-set-operator/0.log" Nov 23 17:32:21 crc kubenswrapper[5050]: I1123 17:32:21.187743 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-zfnk9_a934d47f-f19d-4d29-aed8-20141e5bcf2b/kube-rbac-proxy/0.log" Nov 23 17:32:21 crc kubenswrapper[5050]: I1123 17:32:21.218247 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-zfnk9_a934d47f-f19d-4d29-aed8-20141e5bcf2b/machine-api-operator/0.log" Nov 23 17:32:21 crc kubenswrapper[5050]: I1123 17:32:21.390420 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-v4fb4"] Nov 23 17:32:21 crc kubenswrapper[5050]: I1123 17:32:21.480113 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v4fb4" event={"ID":"f55be819-bd7a-42aa-b1ef-d4a3f44c8599","Type":"ContainerStarted","Data":"b072bd30183e905cf1f53f400f6b5e4c81fa66548afe8594388beb9cafe9ba42"} Nov 23 17:32:22 crc kubenswrapper[5050]: I1123 17:32:22.495729 5050 generic.go:334] "Generic (PLEG): container finished" podID="f55be819-bd7a-42aa-b1ef-d4a3f44c8599" containerID="abf08068e1f8fb3678caafbbfc862be366800407978fee8e7f5ce5618610869d" exitCode=0 Nov 23 17:32:22 crc kubenswrapper[5050]: I1123 17:32:22.495801 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v4fb4" event={"ID":"f55be819-bd7a-42aa-b1ef-d4a3f44c8599","Type":"ContainerDied","Data":"abf08068e1f8fb3678caafbbfc862be366800407978fee8e7f5ce5618610869d"} Nov 23 17:32:22 crc kubenswrapper[5050]: I1123 17:32:22.500871 5050 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 23 17:32:23 crc kubenswrapper[5050]: I1123 17:32:23.513727 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v4fb4" event={"ID":"f55be819-bd7a-42aa-b1ef-d4a3f44c8599","Type":"ContainerStarted","Data":"e6769825d1e9c7a100550736b9f44f02e1b48166ca969b4509c7a4ef480f3ee7"} Nov 23 17:32:25 crc kubenswrapper[5050]: I1123 17:32:25.543783 5050 generic.go:334] "Generic (PLEG): container finished" podID="f55be819-bd7a-42aa-b1ef-d4a3f44c8599" containerID="e6769825d1e9c7a100550736b9f44f02e1b48166ca969b4509c7a4ef480f3ee7" exitCode=0 Nov 23 17:32:25 crc kubenswrapper[5050]: I1123 17:32:25.543958 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v4fb4" event={"ID":"f55be819-bd7a-42aa-b1ef-d4a3f44c8599","Type":"ContainerDied","Data":"e6769825d1e9c7a100550736b9f44f02e1b48166ca969b4509c7a4ef480f3ee7"} Nov 23 17:32:26 crc kubenswrapper[5050]: I1123 17:32:26.559995 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v4fb4" event={"ID":"f55be819-bd7a-42aa-b1ef-d4a3f44c8599","Type":"ContainerStarted","Data":"ca6fef57ab720c41edeaf0bba4d28f924df4d043c983c0ccd0b3b75e6ddb7ba2"} Nov 23 17:32:26 crc kubenswrapper[5050]: I1123 17:32:26.587092 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-v4fb4" podStartSLOduration=3.065329553 podStartE2EDuration="6.587062061s" podCreationTimestamp="2025-11-23 17:32:20 +0000 UTC" firstStartedPulling="2025-11-23 17:32:22.500568445 +0000 UTC m=+10237.667564930" lastFinishedPulling="2025-11-23 17:32:26.022300953 +0000 UTC m=+10241.189297438" observedRunningTime="2025-11-23 17:32:26.577938763 +0000 UTC m=+10241.744935278" watchObservedRunningTime="2025-11-23 17:32:26.587062061 +0000 UTC m=+10241.754058546" Nov 23 17:32:30 crc kubenswrapper[5050]: I1123 17:32:30.660707 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-v4fb4" Nov 23 17:32:30 crc kubenswrapper[5050]: I1123 17:32:30.661713 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-v4fb4" Nov 23 17:32:31 crc kubenswrapper[5050]: I1123 17:32:31.342256 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-v4fb4" Nov 23 17:32:31 crc kubenswrapper[5050]: I1123 17:32:31.714912 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-v4fb4" Nov 23 17:32:31 crc kubenswrapper[5050]: I1123 17:32:31.782654 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-v4fb4"] Nov 23 17:32:33 crc kubenswrapper[5050]: I1123 17:32:33.654186 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-v4fb4" podUID="f55be819-bd7a-42aa-b1ef-d4a3f44c8599" containerName="registry-server" containerID="cri-o://ca6fef57ab720c41edeaf0bba4d28f924df4d043c983c0ccd0b3b75e6ddb7ba2" gracePeriod=2 Nov 23 17:32:34 crc kubenswrapper[5050]: I1123 17:32:34.679513 5050 generic.go:334] "Generic (PLEG): container finished" podID="f55be819-bd7a-42aa-b1ef-d4a3f44c8599" containerID="ca6fef57ab720c41edeaf0bba4d28f924df4d043c983c0ccd0b3b75e6ddb7ba2" exitCode=0 Nov 23 17:32:34 crc kubenswrapper[5050]: I1123 17:32:34.679624 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v4fb4" event={"ID":"f55be819-bd7a-42aa-b1ef-d4a3f44c8599","Type":"ContainerDied","Data":"ca6fef57ab720c41edeaf0bba4d28f924df4d043c983c0ccd0b3b75e6ddb7ba2"} Nov 23 17:32:34 crc kubenswrapper[5050]: I1123 17:32:34.998664 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-v4fb4" Nov 23 17:32:35 crc kubenswrapper[5050]: I1123 17:32:35.125681 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f55be819-bd7a-42aa-b1ef-d4a3f44c8599-utilities\") pod \"f55be819-bd7a-42aa-b1ef-d4a3f44c8599\" (UID: \"f55be819-bd7a-42aa-b1ef-d4a3f44c8599\") " Nov 23 17:32:35 crc kubenswrapper[5050]: I1123 17:32:35.125968 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f55be819-bd7a-42aa-b1ef-d4a3f44c8599-catalog-content\") pod \"f55be819-bd7a-42aa-b1ef-d4a3f44c8599\" (UID: \"f55be819-bd7a-42aa-b1ef-d4a3f44c8599\") " Nov 23 17:32:35 crc kubenswrapper[5050]: I1123 17:32:35.126008 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-njl66\" (UniqueName: \"kubernetes.io/projected/f55be819-bd7a-42aa-b1ef-d4a3f44c8599-kube-api-access-njl66\") pod \"f55be819-bd7a-42aa-b1ef-d4a3f44c8599\" (UID: \"f55be819-bd7a-42aa-b1ef-d4a3f44c8599\") " Nov 23 17:32:35 crc kubenswrapper[5050]: I1123 17:32:35.127573 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f55be819-bd7a-42aa-b1ef-d4a3f44c8599-utilities" (OuterVolumeSpecName: "utilities") pod "f55be819-bd7a-42aa-b1ef-d4a3f44c8599" (UID: "f55be819-bd7a-42aa-b1ef-d4a3f44c8599"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:32:35 crc kubenswrapper[5050]: I1123 17:32:35.140935 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f55be819-bd7a-42aa-b1ef-d4a3f44c8599-kube-api-access-njl66" (OuterVolumeSpecName: "kube-api-access-njl66") pod "f55be819-bd7a-42aa-b1ef-d4a3f44c8599" (UID: "f55be819-bd7a-42aa-b1ef-d4a3f44c8599"). InnerVolumeSpecName "kube-api-access-njl66". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:32:35 crc kubenswrapper[5050]: I1123 17:32:35.197911 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f55be819-bd7a-42aa-b1ef-d4a3f44c8599-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f55be819-bd7a-42aa-b1ef-d4a3f44c8599" (UID: "f55be819-bd7a-42aa-b1ef-d4a3f44c8599"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:32:35 crc kubenswrapper[5050]: I1123 17:32:35.229791 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f55be819-bd7a-42aa-b1ef-d4a3f44c8599-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 17:32:35 crc kubenswrapper[5050]: I1123 17:32:35.229844 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-njl66\" (UniqueName: \"kubernetes.io/projected/f55be819-bd7a-42aa-b1ef-d4a3f44c8599-kube-api-access-njl66\") on node \"crc\" DevicePath \"\"" Nov 23 17:32:35 crc kubenswrapper[5050]: I1123 17:32:35.229860 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f55be819-bd7a-42aa-b1ef-d4a3f44c8599-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 17:32:35 crc kubenswrapper[5050]: I1123 17:32:35.691867 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v4fb4" event={"ID":"f55be819-bd7a-42aa-b1ef-d4a3f44c8599","Type":"ContainerDied","Data":"b072bd30183e905cf1f53f400f6b5e4c81fa66548afe8594388beb9cafe9ba42"} Nov 23 17:32:35 crc kubenswrapper[5050]: I1123 17:32:35.691930 5050 scope.go:117] "RemoveContainer" containerID="ca6fef57ab720c41edeaf0bba4d28f924df4d043c983c0ccd0b3b75e6ddb7ba2" Nov 23 17:32:35 crc kubenswrapper[5050]: I1123 17:32:35.691953 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-v4fb4" Nov 23 17:32:35 crc kubenswrapper[5050]: I1123 17:32:35.722427 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-v4fb4"] Nov 23 17:32:35 crc kubenswrapper[5050]: I1123 17:32:35.734611 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-v4fb4"] Nov 23 17:32:35 crc kubenswrapper[5050]: I1123 17:32:35.743386 5050 scope.go:117] "RemoveContainer" containerID="e6769825d1e9c7a100550736b9f44f02e1b48166ca969b4509c7a4ef480f3ee7" Nov 23 17:32:35 crc kubenswrapper[5050]: I1123 17:32:35.766162 5050 scope.go:117] "RemoveContainer" containerID="abf08068e1f8fb3678caafbbfc862be366800407978fee8e7f5ce5618610869d" Nov 23 17:32:36 crc kubenswrapper[5050]: I1123 17:32:36.528336 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-86cb77c54b-htrhf_7870f636-5b79-41e3-a118-aa1d664760d9/cert-manager-controller/0.log" Nov 23 17:32:37 crc kubenswrapper[5050]: I1123 17:32:37.001508 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-855d9ccff4-2lqmr_d2eb929a-7d9e-4f85-845c-731255ed042d/cert-manager-cainjector/0.log" Nov 23 17:32:37 crc kubenswrapper[5050]: I1123 17:32:37.057696 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-f4fb5df64-cj4jv_4c3f2d51-526a-409b-ada4-4ea14e622593/cert-manager-webhook/0.log" Nov 23 17:32:37 crc kubenswrapper[5050]: I1123 17:32:37.564709 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f55be819-bd7a-42aa-b1ef-d4a3f44c8599" path="/var/lib/kubelet/pods/f55be819-bd7a-42aa-b1ef-d4a3f44c8599/volumes" Nov 23 17:32:52 crc kubenswrapper[5050]: I1123 17:32:52.182042 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-87552_0cd5928a-1a84-425a-a57d-e03c10630d1a/nmstate-console-plugin/0.log" Nov 23 17:32:52 crc kubenswrapper[5050]: I1123 17:32:52.480022 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-7vq97_c5b4cff3-6a04-4b06-a466-53b9c96b12bf/kube-rbac-proxy/0.log" Nov 23 17:32:52 crc kubenswrapper[5050]: I1123 17:32:52.482992 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-wkrpm_e1705bf0-65ab-45ea-a7f0-b89f69d5622d/nmstate-handler/0.log" Nov 23 17:32:52 crc kubenswrapper[5050]: I1123 17:32:52.533035 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-7vq97_c5b4cff3-6a04-4b06-a466-53b9c96b12bf/nmstate-metrics/0.log" Nov 23 17:32:52 crc kubenswrapper[5050]: I1123 17:32:52.754951 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-6lxqw_aea782b8-e7a4-4d41-b608-44bc576aeebc/nmstate-operator/0.log" Nov 23 17:32:52 crc kubenswrapper[5050]: I1123 17:32:52.894459 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-nmb6b_50d94b05-cf38-4f9e-a9fd-6b93aac7646b/nmstate-webhook/0.log" Nov 23 17:33:12 crc kubenswrapper[5050]: I1123 17:33:12.262889 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-xvbr9_de22d979-ac94-4839-b266-2dfa8ae79de2/kube-rbac-proxy/0.log" Nov 23 17:33:12 crc kubenswrapper[5050]: I1123 17:33:12.604974 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-sn9hl_4e8f95ac-d360-4b81-9bce-28577dce8e4c/cp-frr-files/0.log" Nov 23 17:33:12 crc kubenswrapper[5050]: I1123 17:33:12.702171 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-xvbr9_de22d979-ac94-4839-b266-2dfa8ae79de2/controller/0.log" Nov 23 17:33:12 crc kubenswrapper[5050]: I1123 17:33:12.847726 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-sn9hl_4e8f95ac-d360-4b81-9bce-28577dce8e4c/cp-frr-files/0.log" Nov 23 17:33:12 crc kubenswrapper[5050]: I1123 17:33:12.884379 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-sn9hl_4e8f95ac-d360-4b81-9bce-28577dce8e4c/cp-reloader/0.log" Nov 23 17:33:12 crc kubenswrapper[5050]: I1123 17:33:12.936841 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-sn9hl_4e8f95ac-d360-4b81-9bce-28577dce8e4c/cp-metrics/0.log" Nov 23 17:33:12 crc kubenswrapper[5050]: I1123 17:33:12.950804 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-sn9hl_4e8f95ac-d360-4b81-9bce-28577dce8e4c/cp-reloader/0.log" Nov 23 17:33:13 crc kubenswrapper[5050]: I1123 17:33:13.244941 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-sn9hl_4e8f95ac-d360-4b81-9bce-28577dce8e4c/cp-metrics/0.log" Nov 23 17:33:13 crc kubenswrapper[5050]: I1123 17:33:13.245105 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-sn9hl_4e8f95ac-d360-4b81-9bce-28577dce8e4c/cp-frr-files/0.log" Nov 23 17:33:13 crc kubenswrapper[5050]: I1123 17:33:13.256089 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-sn9hl_4e8f95ac-d360-4b81-9bce-28577dce8e4c/cp-reloader/0.log" Nov 23 17:33:13 crc kubenswrapper[5050]: I1123 17:33:13.272178 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-sn9hl_4e8f95ac-d360-4b81-9bce-28577dce8e4c/cp-metrics/0.log" Nov 23 17:33:13 crc kubenswrapper[5050]: I1123 17:33:13.470029 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-sn9hl_4e8f95ac-d360-4b81-9bce-28577dce8e4c/cp-metrics/0.log" Nov 23 17:33:13 crc kubenswrapper[5050]: I1123 17:33:13.529020 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-sn9hl_4e8f95ac-d360-4b81-9bce-28577dce8e4c/cp-frr-files/0.log" Nov 23 17:33:13 crc kubenswrapper[5050]: I1123 17:33:13.535985 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-sn9hl_4e8f95ac-d360-4b81-9bce-28577dce8e4c/controller/0.log" Nov 23 17:33:13 crc kubenswrapper[5050]: I1123 17:33:13.560846 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-sn9hl_4e8f95ac-d360-4b81-9bce-28577dce8e4c/cp-reloader/0.log" Nov 23 17:33:13 crc kubenswrapper[5050]: I1123 17:33:13.781849 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-sn9hl_4e8f95ac-d360-4b81-9bce-28577dce8e4c/frr-metrics/0.log" Nov 23 17:33:13 crc kubenswrapper[5050]: I1123 17:33:13.782273 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-sn9hl_4e8f95ac-d360-4b81-9bce-28577dce8e4c/kube-rbac-proxy/0.log" Nov 23 17:33:13 crc kubenswrapper[5050]: I1123 17:33:13.799474 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-sn9hl_4e8f95ac-d360-4b81-9bce-28577dce8e4c/kube-rbac-proxy-frr/0.log" Nov 23 17:33:14 crc kubenswrapper[5050]: I1123 17:33:14.358539 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-sn9hl_4e8f95ac-d360-4b81-9bce-28577dce8e4c/reloader/0.log" Nov 23 17:33:14 crc kubenswrapper[5050]: I1123 17:33:14.500312 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-c2rcn_4c8fffca-2f1a-4b47-beb9-66bf0c19f8e0/frr-k8s-webhook-server/0.log" Nov 23 17:33:15 crc kubenswrapper[5050]: I1123 17:33:15.388863 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-575dfc8b8d-ztv7m_7492d5b8-beb8-4678-bb7b-80dab041dcfc/manager/0.log" Nov 23 17:33:15 crc kubenswrapper[5050]: I1123 17:33:15.617273 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-777d788784-h2285_c5e8fb3c-c4fd-43e6-9def-6903243d2daa/webhook-server/0.log" Nov 23 17:33:15 crc kubenswrapper[5050]: I1123 17:33:15.664101 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-dglng_a29c3193-350c-4e8d-9c6f-bfb1e96a4527/kube-rbac-proxy/0.log" Nov 23 17:33:17 crc kubenswrapper[5050]: I1123 17:33:17.060682 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-dglng_a29c3193-350c-4e8d-9c6f-bfb1e96a4527/speaker/0.log" Nov 23 17:33:17 crc kubenswrapper[5050]: I1123 17:33:17.524552 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-sn9hl_4e8f95ac-d360-4b81-9bce-28577dce8e4c/frr/0.log" Nov 23 17:33:33 crc kubenswrapper[5050]: I1123 17:33:33.300270 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx_15d146e6-8be3-42c8-a6b4-45dc085b53a4/util/0.log" Nov 23 17:33:33 crc kubenswrapper[5050]: I1123 17:33:33.552902 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx_15d146e6-8be3-42c8-a6b4-45dc085b53a4/pull/0.log" Nov 23 17:33:33 crc kubenswrapper[5050]: I1123 17:33:33.560269 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx_15d146e6-8be3-42c8-a6b4-45dc085b53a4/pull/0.log" Nov 23 17:33:33 crc kubenswrapper[5050]: I1123 17:33:33.618901 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx_15d146e6-8be3-42c8-a6b4-45dc085b53a4/util/0.log" Nov 23 17:33:33 crc kubenswrapper[5050]: I1123 17:33:33.838547 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx_15d146e6-8be3-42c8-a6b4-45dc085b53a4/util/0.log" Nov 23 17:33:33 crc kubenswrapper[5050]: I1123 17:33:33.848602 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx_15d146e6-8be3-42c8-a6b4-45dc085b53a4/extract/0.log" Nov 23 17:33:33 crc kubenswrapper[5050]: I1123 17:33:33.883698 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afsmxx_15d146e6-8be3-42c8-a6b4-45dc085b53a4/pull/0.log" Nov 23 17:33:34 crc kubenswrapper[5050]: I1123 17:33:34.035807 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg_ca5beca1-ae2a-475c-bb17-062dce3850d6/util/0.log" Nov 23 17:33:34 crc kubenswrapper[5050]: I1123 17:33:34.255826 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg_ca5beca1-ae2a-475c-bb17-062dce3850d6/util/0.log" Nov 23 17:33:34 crc kubenswrapper[5050]: I1123 17:33:34.300274 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg_ca5beca1-ae2a-475c-bb17-062dce3850d6/pull/0.log" Nov 23 17:33:34 crc kubenswrapper[5050]: I1123 17:33:34.339111 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg_ca5beca1-ae2a-475c-bb17-062dce3850d6/pull/0.log" Nov 23 17:33:34 crc kubenswrapper[5050]: I1123 17:33:34.491068 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg_ca5beca1-ae2a-475c-bb17-062dce3850d6/pull/0.log" Nov 23 17:33:34 crc kubenswrapper[5050]: I1123 17:33:34.499587 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg_ca5beca1-ae2a-475c-bb17-062dce3850d6/util/0.log" Nov 23 17:33:34 crc kubenswrapper[5050]: I1123 17:33:34.571825 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8kg_ca5beca1-ae2a-475c-bb17-062dce3850d6/extract/0.log" Nov 23 17:33:34 crc kubenswrapper[5050]: I1123 17:33:34.709282 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5_a0d69f72-272a-4871-80ff-09f031c8019d/util/0.log" Nov 23 17:33:34 crc kubenswrapper[5050]: I1123 17:33:34.922116 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5_a0d69f72-272a-4871-80ff-09f031c8019d/util/0.log" Nov 23 17:33:34 crc kubenswrapper[5050]: I1123 17:33:34.961601 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5_a0d69f72-272a-4871-80ff-09f031c8019d/pull/0.log" Nov 23 17:33:34 crc kubenswrapper[5050]: I1123 17:33:34.986886 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5_a0d69f72-272a-4871-80ff-09f031c8019d/pull/0.log" Nov 23 17:33:35 crc kubenswrapper[5050]: I1123 17:33:35.164529 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5_a0d69f72-272a-4871-80ff-09f031c8019d/util/0.log" Nov 23 17:33:35 crc kubenswrapper[5050]: I1123 17:33:35.192390 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5_a0d69f72-272a-4871-80ff-09f031c8019d/pull/0.log" Nov 23 17:33:35 crc kubenswrapper[5050]: I1123 17:33:35.196996 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210cn9f5_a0d69f72-272a-4871-80ff-09f031c8019d/extract/0.log" Nov 23 17:33:35 crc kubenswrapper[5050]: I1123 17:33:35.366027 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ztszf_e12fe3e5-acc0-402b-aa45-164b108828d8/extract-utilities/0.log" Nov 23 17:33:35 crc kubenswrapper[5050]: I1123 17:33:35.645366 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ztszf_e12fe3e5-acc0-402b-aa45-164b108828d8/extract-content/0.log" Nov 23 17:33:35 crc kubenswrapper[5050]: I1123 17:33:35.660769 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ztszf_e12fe3e5-acc0-402b-aa45-164b108828d8/extract-utilities/0.log" Nov 23 17:33:35 crc kubenswrapper[5050]: I1123 17:33:35.685274 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ztszf_e12fe3e5-acc0-402b-aa45-164b108828d8/extract-content/0.log" Nov 23 17:33:35 crc kubenswrapper[5050]: I1123 17:33:35.961810 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ztszf_e12fe3e5-acc0-402b-aa45-164b108828d8/extract-content/0.log" Nov 23 17:33:35 crc kubenswrapper[5050]: I1123 17:33:35.969876 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ztszf_e12fe3e5-acc0-402b-aa45-164b108828d8/extract-utilities/0.log" Nov 23 17:33:36 crc kubenswrapper[5050]: I1123 17:33:36.229991 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-tff2j_5e8a1e80-a74e-4298-a915-aef2517690c7/extract-utilities/0.log" Nov 23 17:33:36 crc kubenswrapper[5050]: I1123 17:33:36.543037 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-tff2j_5e8a1e80-a74e-4298-a915-aef2517690c7/extract-content/0.log" Nov 23 17:33:36 crc kubenswrapper[5050]: I1123 17:33:36.549025 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-tff2j_5e8a1e80-a74e-4298-a915-aef2517690c7/extract-utilities/0.log" Nov 23 17:33:36 crc kubenswrapper[5050]: I1123 17:33:36.549108 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-tff2j_5e8a1e80-a74e-4298-a915-aef2517690c7/extract-content/0.log" Nov 23 17:33:36 crc kubenswrapper[5050]: I1123 17:33:36.784738 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-tff2j_5e8a1e80-a74e-4298-a915-aef2517690c7/extract-content/0.log" Nov 23 17:33:36 crc kubenswrapper[5050]: I1123 17:33:36.870861 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-tff2j_5e8a1e80-a74e-4298-a915-aef2517690c7/extract-utilities/0.log" Nov 23 17:33:37 crc kubenswrapper[5050]: I1123 17:33:37.070747 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d_13fb76cc-ba40-4b0d-85cf-73a8fcd56f36/util/0.log" Nov 23 17:33:37 crc kubenswrapper[5050]: I1123 17:33:37.103365 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ztszf_e12fe3e5-acc0-402b-aa45-164b108828d8/registry-server/0.log" Nov 23 17:33:37 crc kubenswrapper[5050]: I1123 17:33:37.321537 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d_13fb76cc-ba40-4b0d-85cf-73a8fcd56f36/pull/0.log" Nov 23 17:33:37 crc kubenswrapper[5050]: I1123 17:33:37.321770 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d_13fb76cc-ba40-4b0d-85cf-73a8fcd56f36/pull/0.log" Nov 23 17:33:37 crc kubenswrapper[5050]: I1123 17:33:37.331685 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d_13fb76cc-ba40-4b0d-85cf-73a8fcd56f36/util/0.log" Nov 23 17:33:37 crc kubenswrapper[5050]: I1123 17:33:37.592558 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d_13fb76cc-ba40-4b0d-85cf-73a8fcd56f36/util/0.log" Nov 23 17:33:37 crc kubenswrapper[5050]: I1123 17:33:37.634082 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d_13fb76cc-ba40-4b0d-85cf-73a8fcd56f36/extract/0.log" Nov 23 17:33:37 crc kubenswrapper[5050]: I1123 17:33:37.638709 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6f6z6d_13fb76cc-ba40-4b0d-85cf-73a8fcd56f36/pull/0.log" Nov 23 17:33:37 crc kubenswrapper[5050]: I1123 17:33:37.858533 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-kdvnh_6c1a2056-be78-4ef3-b451-867a8b230645/marketplace-operator/0.log" Nov 23 17:33:37 crc kubenswrapper[5050]: I1123 17:33:37.867430 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-4jl4h_6f8a262c-2ff5-45aa-bd0a-d1c948021ca8/extract-utilities/0.log" Nov 23 17:33:38 crc kubenswrapper[5050]: I1123 17:33:38.217411 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-4jl4h_6f8a262c-2ff5-45aa-bd0a-d1c948021ca8/extract-content/0.log" Nov 23 17:33:38 crc kubenswrapper[5050]: I1123 17:33:38.218597 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-4jl4h_6f8a262c-2ff5-45aa-bd0a-d1c948021ca8/extract-content/0.log" Nov 23 17:33:38 crc kubenswrapper[5050]: I1123 17:33:38.277738 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-4jl4h_6f8a262c-2ff5-45aa-bd0a-d1c948021ca8/extract-utilities/0.log" Nov 23 17:33:38 crc kubenswrapper[5050]: I1123 17:33:38.512230 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-4jl4h_6f8a262c-2ff5-45aa-bd0a-d1c948021ca8/extract-utilities/0.log" Nov 23 17:33:38 crc kubenswrapper[5050]: I1123 17:33:38.524629 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-4jl4h_6f8a262c-2ff5-45aa-bd0a-d1c948021ca8/extract-content/0.log" Nov 23 17:33:38 crc kubenswrapper[5050]: I1123 17:33:38.801822 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xwz4w_8c7ace55-a1c9-441f-8150-fb262b63bf29/extract-utilities/0.log" Nov 23 17:33:38 crc kubenswrapper[5050]: I1123 17:33:38.828582 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-tff2j_5e8a1e80-a74e-4298-a915-aef2517690c7/registry-server/0.log" Nov 23 17:33:38 crc kubenswrapper[5050]: I1123 17:33:38.946091 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xwz4w_8c7ace55-a1c9-441f-8150-fb262b63bf29/extract-content/0.log" Nov 23 17:33:38 crc kubenswrapper[5050]: I1123 17:33:38.963711 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xwz4w_8c7ace55-a1c9-441f-8150-fb262b63bf29/extract-utilities/0.log" Nov 23 17:33:38 crc kubenswrapper[5050]: I1123 17:33:38.976265 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-4jl4h_6f8a262c-2ff5-45aa-bd0a-d1c948021ca8/registry-server/0.log" Nov 23 17:33:39 crc kubenswrapper[5050]: I1123 17:33:39.040625 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xwz4w_8c7ace55-a1c9-441f-8150-fb262b63bf29/extract-content/0.log" Nov 23 17:33:39 crc kubenswrapper[5050]: I1123 17:33:39.210961 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xwz4w_8c7ace55-a1c9-441f-8150-fb262b63bf29/extract-utilities/0.log" Nov 23 17:33:39 crc kubenswrapper[5050]: I1123 17:33:39.257560 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xwz4w_8c7ace55-a1c9-441f-8150-fb262b63bf29/extract-content/0.log" Nov 23 17:33:40 crc kubenswrapper[5050]: I1123 17:33:40.515805 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xwz4w_8c7ace55-a1c9-441f-8150-fb262b63bf29/registry-server/0.log" Nov 23 17:33:56 crc kubenswrapper[5050]: I1123 17:33:56.448320 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-668cf9dfbb-tchkm_3832bef6-d18c-426a-85e0-3555819c7d47/prometheus-operator/0.log" Nov 23 17:33:56 crc kubenswrapper[5050]: I1123 17:33:56.566079 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-c8fbbdfff-9n7z7_c1edd280-122b-4bc5-ae0e-fe9d8240d2e1/prometheus-operator-admission-webhook/0.log" Nov 23 17:33:56 crc kubenswrapper[5050]: I1123 17:33:56.673844 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-c8fbbdfff-vvzzg_c9fb72b9-7a4b-4b97-8098-bdb59bc12f78/prometheus-operator-admission-webhook/0.log" Nov 23 17:33:56 crc kubenswrapper[5050]: I1123 17:33:56.823619 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-d8bb48f5d-f5xvk_0500f9f7-d32f-4925-a791-7ce5cddcad30/operator/0.log" Nov 23 17:33:56 crc kubenswrapper[5050]: I1123 17:33:56.919656 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5446b9c989-ffb9c_52ab3442-8aa0-46c1-b7f6-09d59cf23afc/perses-operator/0.log" Nov 23 17:33:59 crc kubenswrapper[5050]: I1123 17:33:59.224064 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 17:33:59 crc kubenswrapper[5050]: I1123 17:33:59.224703 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 17:34:02 crc kubenswrapper[5050]: I1123 17:34:02.339111 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9ck5s"] Nov 23 17:34:02 crc kubenswrapper[5050]: E1123 17:34:02.340646 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f55be819-bd7a-42aa-b1ef-d4a3f44c8599" containerName="extract-content" Nov 23 17:34:02 crc kubenswrapper[5050]: I1123 17:34:02.340662 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f55be819-bd7a-42aa-b1ef-d4a3f44c8599" containerName="extract-content" Nov 23 17:34:02 crc kubenswrapper[5050]: E1123 17:34:02.340694 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f55be819-bd7a-42aa-b1ef-d4a3f44c8599" containerName="extract-utilities" Nov 23 17:34:02 crc kubenswrapper[5050]: I1123 17:34:02.340701 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f55be819-bd7a-42aa-b1ef-d4a3f44c8599" containerName="extract-utilities" Nov 23 17:34:02 crc kubenswrapper[5050]: E1123 17:34:02.340717 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f55be819-bd7a-42aa-b1ef-d4a3f44c8599" containerName="registry-server" Nov 23 17:34:02 crc kubenswrapper[5050]: I1123 17:34:02.340723 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="f55be819-bd7a-42aa-b1ef-d4a3f44c8599" containerName="registry-server" Nov 23 17:34:02 crc kubenswrapper[5050]: I1123 17:34:02.340971 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="f55be819-bd7a-42aa-b1ef-d4a3f44c8599" containerName="registry-server" Nov 23 17:34:02 crc kubenswrapper[5050]: I1123 17:34:02.342650 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9ck5s" Nov 23 17:34:02 crc kubenswrapper[5050]: I1123 17:34:02.381692 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9ck5s"] Nov 23 17:34:02 crc kubenswrapper[5050]: I1123 17:34:02.412780 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwjwx\" (UniqueName: \"kubernetes.io/projected/82ca6325-f96f-4ff1-95db-27d7e860ee0c-kube-api-access-bwjwx\") pod \"redhat-operators-9ck5s\" (UID: \"82ca6325-f96f-4ff1-95db-27d7e860ee0c\") " pod="openshift-marketplace/redhat-operators-9ck5s" Nov 23 17:34:02 crc kubenswrapper[5050]: I1123 17:34:02.412937 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82ca6325-f96f-4ff1-95db-27d7e860ee0c-utilities\") pod \"redhat-operators-9ck5s\" (UID: \"82ca6325-f96f-4ff1-95db-27d7e860ee0c\") " pod="openshift-marketplace/redhat-operators-9ck5s" Nov 23 17:34:02 crc kubenswrapper[5050]: I1123 17:34:02.412962 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82ca6325-f96f-4ff1-95db-27d7e860ee0c-catalog-content\") pod \"redhat-operators-9ck5s\" (UID: \"82ca6325-f96f-4ff1-95db-27d7e860ee0c\") " pod="openshift-marketplace/redhat-operators-9ck5s" Nov 23 17:34:02 crc kubenswrapper[5050]: I1123 17:34:02.518533 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82ca6325-f96f-4ff1-95db-27d7e860ee0c-utilities\") pod \"redhat-operators-9ck5s\" (UID: \"82ca6325-f96f-4ff1-95db-27d7e860ee0c\") " pod="openshift-marketplace/redhat-operators-9ck5s" Nov 23 17:34:02 crc kubenswrapper[5050]: I1123 17:34:02.518593 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82ca6325-f96f-4ff1-95db-27d7e860ee0c-catalog-content\") pod \"redhat-operators-9ck5s\" (UID: \"82ca6325-f96f-4ff1-95db-27d7e860ee0c\") " pod="openshift-marketplace/redhat-operators-9ck5s" Nov 23 17:34:02 crc kubenswrapper[5050]: I1123 17:34:02.518695 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwjwx\" (UniqueName: \"kubernetes.io/projected/82ca6325-f96f-4ff1-95db-27d7e860ee0c-kube-api-access-bwjwx\") pod \"redhat-operators-9ck5s\" (UID: \"82ca6325-f96f-4ff1-95db-27d7e860ee0c\") " pod="openshift-marketplace/redhat-operators-9ck5s" Nov 23 17:34:02 crc kubenswrapper[5050]: I1123 17:34:02.521220 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82ca6325-f96f-4ff1-95db-27d7e860ee0c-utilities\") pod \"redhat-operators-9ck5s\" (UID: \"82ca6325-f96f-4ff1-95db-27d7e860ee0c\") " pod="openshift-marketplace/redhat-operators-9ck5s" Nov 23 17:34:02 crc kubenswrapper[5050]: I1123 17:34:02.521259 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82ca6325-f96f-4ff1-95db-27d7e860ee0c-catalog-content\") pod \"redhat-operators-9ck5s\" (UID: \"82ca6325-f96f-4ff1-95db-27d7e860ee0c\") " pod="openshift-marketplace/redhat-operators-9ck5s" Nov 23 17:34:02 crc kubenswrapper[5050]: I1123 17:34:02.559942 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwjwx\" (UniqueName: \"kubernetes.io/projected/82ca6325-f96f-4ff1-95db-27d7e860ee0c-kube-api-access-bwjwx\") pod \"redhat-operators-9ck5s\" (UID: \"82ca6325-f96f-4ff1-95db-27d7e860ee0c\") " pod="openshift-marketplace/redhat-operators-9ck5s" Nov 23 17:34:02 crc kubenswrapper[5050]: I1123 17:34:02.683869 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9ck5s" Nov 23 17:34:03 crc kubenswrapper[5050]: I1123 17:34:03.206090 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9ck5s"] Nov 23 17:34:03 crc kubenswrapper[5050]: I1123 17:34:03.813249 5050 generic.go:334] "Generic (PLEG): container finished" podID="82ca6325-f96f-4ff1-95db-27d7e860ee0c" containerID="fe7180dbf1a8097cf095d105b9b1277bf2b3e8cd590a1b9ff573b7c786e22320" exitCode=0 Nov 23 17:34:03 crc kubenswrapper[5050]: I1123 17:34:03.813373 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9ck5s" event={"ID":"82ca6325-f96f-4ff1-95db-27d7e860ee0c","Type":"ContainerDied","Data":"fe7180dbf1a8097cf095d105b9b1277bf2b3e8cd590a1b9ff573b7c786e22320"} Nov 23 17:34:03 crc kubenswrapper[5050]: I1123 17:34:03.813702 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9ck5s" event={"ID":"82ca6325-f96f-4ff1-95db-27d7e860ee0c","Type":"ContainerStarted","Data":"f0885d49656661b6f26a450cc3956e1bff3c3e1706140d778af4a5186ce0221c"} Nov 23 17:34:04 crc kubenswrapper[5050]: I1123 17:34:04.826861 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9ck5s" event={"ID":"82ca6325-f96f-4ff1-95db-27d7e860ee0c","Type":"ContainerStarted","Data":"dd6ec2d786cb84be8c17b7a497ac095049b49362dfa20c4101e7a897bac8242f"} Nov 23 17:34:09 crc kubenswrapper[5050]: I1123 17:34:09.890603 5050 generic.go:334] "Generic (PLEG): container finished" podID="82ca6325-f96f-4ff1-95db-27d7e860ee0c" containerID="dd6ec2d786cb84be8c17b7a497ac095049b49362dfa20c4101e7a897bac8242f" exitCode=0 Nov 23 17:34:09 crc kubenswrapper[5050]: I1123 17:34:09.890872 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9ck5s" event={"ID":"82ca6325-f96f-4ff1-95db-27d7e860ee0c","Type":"ContainerDied","Data":"dd6ec2d786cb84be8c17b7a497ac095049b49362dfa20c4101e7a897bac8242f"} Nov 23 17:34:10 crc kubenswrapper[5050]: I1123 17:34:10.905002 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9ck5s" event={"ID":"82ca6325-f96f-4ff1-95db-27d7e860ee0c","Type":"ContainerStarted","Data":"75d4ae2f0d09f3de0d10a5b0049199c3608264aab9254306182e680932dc3016"} Nov 23 17:34:10 crc kubenswrapper[5050]: I1123 17:34:10.931312 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9ck5s" podStartSLOduration=2.461429534 podStartE2EDuration="8.931286873s" podCreationTimestamp="2025-11-23 17:34:02 +0000 UTC" firstStartedPulling="2025-11-23 17:34:03.815207726 +0000 UTC m=+10338.982204211" lastFinishedPulling="2025-11-23 17:34:10.285065065 +0000 UTC m=+10345.452061550" observedRunningTime="2025-11-23 17:34:10.929228605 +0000 UTC m=+10346.096225090" watchObservedRunningTime="2025-11-23 17:34:10.931286873 +0000 UTC m=+10346.098283358" Nov 23 17:34:12 crc kubenswrapper[5050]: I1123 17:34:12.687716 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9ck5s" Nov 23 17:34:12 crc kubenswrapper[5050]: I1123 17:34:12.688506 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9ck5s" Nov 23 17:34:13 crc kubenswrapper[5050]: I1123 17:34:13.773839 5050 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9ck5s" podUID="82ca6325-f96f-4ff1-95db-27d7e860ee0c" containerName="registry-server" probeResult="failure" output=< Nov 23 17:34:13 crc kubenswrapper[5050]: timeout: failed to connect service ":50051" within 1s Nov 23 17:34:13 crc kubenswrapper[5050]: > Nov 23 17:34:22 crc kubenswrapper[5050]: I1123 17:34:22.579122 5050 scope.go:117] "RemoveContainer" containerID="238d1b8b4f31551a8b68ad0c7828b88c68f1864610db3145ccfd4ef50792f4ca" Nov 23 17:34:23 crc kubenswrapper[5050]: I1123 17:34:23.612929 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9ck5s" Nov 23 17:34:23 crc kubenswrapper[5050]: I1123 17:34:23.678945 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9ck5s" Nov 23 17:34:23 crc kubenswrapper[5050]: I1123 17:34:23.865808 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9ck5s"] Nov 23 17:34:25 crc kubenswrapper[5050]: I1123 17:34:25.114308 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9ck5s" podUID="82ca6325-f96f-4ff1-95db-27d7e860ee0c" containerName="registry-server" containerID="cri-o://75d4ae2f0d09f3de0d10a5b0049199c3608264aab9254306182e680932dc3016" gracePeriod=2 Nov 23 17:34:25 crc kubenswrapper[5050]: I1123 17:34:25.782413 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9ck5s" Nov 23 17:34:25 crc kubenswrapper[5050]: I1123 17:34:25.840495 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82ca6325-f96f-4ff1-95db-27d7e860ee0c-utilities\") pod \"82ca6325-f96f-4ff1-95db-27d7e860ee0c\" (UID: \"82ca6325-f96f-4ff1-95db-27d7e860ee0c\") " Nov 23 17:34:25 crc kubenswrapper[5050]: I1123 17:34:25.840702 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bwjwx\" (UniqueName: \"kubernetes.io/projected/82ca6325-f96f-4ff1-95db-27d7e860ee0c-kube-api-access-bwjwx\") pod \"82ca6325-f96f-4ff1-95db-27d7e860ee0c\" (UID: \"82ca6325-f96f-4ff1-95db-27d7e860ee0c\") " Nov 23 17:34:25 crc kubenswrapper[5050]: I1123 17:34:25.840759 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82ca6325-f96f-4ff1-95db-27d7e860ee0c-catalog-content\") pod \"82ca6325-f96f-4ff1-95db-27d7e860ee0c\" (UID: \"82ca6325-f96f-4ff1-95db-27d7e860ee0c\") " Nov 23 17:34:25 crc kubenswrapper[5050]: I1123 17:34:25.841232 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/82ca6325-f96f-4ff1-95db-27d7e860ee0c-utilities" (OuterVolumeSpecName: "utilities") pod "82ca6325-f96f-4ff1-95db-27d7e860ee0c" (UID: "82ca6325-f96f-4ff1-95db-27d7e860ee0c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:34:25 crc kubenswrapper[5050]: I1123 17:34:25.841938 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82ca6325-f96f-4ff1-95db-27d7e860ee0c-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 17:34:25 crc kubenswrapper[5050]: I1123 17:34:25.875313 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82ca6325-f96f-4ff1-95db-27d7e860ee0c-kube-api-access-bwjwx" (OuterVolumeSpecName: "kube-api-access-bwjwx") pod "82ca6325-f96f-4ff1-95db-27d7e860ee0c" (UID: "82ca6325-f96f-4ff1-95db-27d7e860ee0c"). InnerVolumeSpecName "kube-api-access-bwjwx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:34:25 crc kubenswrapper[5050]: I1123 17:34:25.945240 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bwjwx\" (UniqueName: \"kubernetes.io/projected/82ca6325-f96f-4ff1-95db-27d7e860ee0c-kube-api-access-bwjwx\") on node \"crc\" DevicePath \"\"" Nov 23 17:34:25 crc kubenswrapper[5050]: I1123 17:34:25.962314 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/82ca6325-f96f-4ff1-95db-27d7e860ee0c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "82ca6325-f96f-4ff1-95db-27d7e860ee0c" (UID: "82ca6325-f96f-4ff1-95db-27d7e860ee0c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:34:26 crc kubenswrapper[5050]: I1123 17:34:26.048361 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82ca6325-f96f-4ff1-95db-27d7e860ee0c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 17:34:26 crc kubenswrapper[5050]: I1123 17:34:26.130232 5050 generic.go:334] "Generic (PLEG): container finished" podID="82ca6325-f96f-4ff1-95db-27d7e860ee0c" containerID="75d4ae2f0d09f3de0d10a5b0049199c3608264aab9254306182e680932dc3016" exitCode=0 Nov 23 17:34:26 crc kubenswrapper[5050]: I1123 17:34:26.130288 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9ck5s" event={"ID":"82ca6325-f96f-4ff1-95db-27d7e860ee0c","Type":"ContainerDied","Data":"75d4ae2f0d09f3de0d10a5b0049199c3608264aab9254306182e680932dc3016"} Nov 23 17:34:26 crc kubenswrapper[5050]: I1123 17:34:26.130319 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9ck5s" event={"ID":"82ca6325-f96f-4ff1-95db-27d7e860ee0c","Type":"ContainerDied","Data":"f0885d49656661b6f26a450cc3956e1bff3c3e1706140d778af4a5186ce0221c"} Nov 23 17:34:26 crc kubenswrapper[5050]: I1123 17:34:26.130331 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9ck5s" Nov 23 17:34:26 crc kubenswrapper[5050]: I1123 17:34:26.130340 5050 scope.go:117] "RemoveContainer" containerID="75d4ae2f0d09f3de0d10a5b0049199c3608264aab9254306182e680932dc3016" Nov 23 17:34:26 crc kubenswrapper[5050]: I1123 17:34:26.154000 5050 scope.go:117] "RemoveContainer" containerID="dd6ec2d786cb84be8c17b7a497ac095049b49362dfa20c4101e7a897bac8242f" Nov 23 17:34:26 crc kubenswrapper[5050]: I1123 17:34:26.177939 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9ck5s"] Nov 23 17:34:26 crc kubenswrapper[5050]: I1123 17:34:26.192418 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9ck5s"] Nov 23 17:34:26 crc kubenswrapper[5050]: I1123 17:34:26.196588 5050 scope.go:117] "RemoveContainer" containerID="fe7180dbf1a8097cf095d105b9b1277bf2b3e8cd590a1b9ff573b7c786e22320" Nov 23 17:34:26 crc kubenswrapper[5050]: I1123 17:34:26.232686 5050 scope.go:117] "RemoveContainer" containerID="75d4ae2f0d09f3de0d10a5b0049199c3608264aab9254306182e680932dc3016" Nov 23 17:34:26 crc kubenswrapper[5050]: E1123 17:34:26.233142 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"75d4ae2f0d09f3de0d10a5b0049199c3608264aab9254306182e680932dc3016\": container with ID starting with 75d4ae2f0d09f3de0d10a5b0049199c3608264aab9254306182e680932dc3016 not found: ID does not exist" containerID="75d4ae2f0d09f3de0d10a5b0049199c3608264aab9254306182e680932dc3016" Nov 23 17:34:26 crc kubenswrapper[5050]: I1123 17:34:26.233194 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75d4ae2f0d09f3de0d10a5b0049199c3608264aab9254306182e680932dc3016"} err="failed to get container status \"75d4ae2f0d09f3de0d10a5b0049199c3608264aab9254306182e680932dc3016\": rpc error: code = NotFound desc = could not find container \"75d4ae2f0d09f3de0d10a5b0049199c3608264aab9254306182e680932dc3016\": container with ID starting with 75d4ae2f0d09f3de0d10a5b0049199c3608264aab9254306182e680932dc3016 not found: ID does not exist" Nov 23 17:34:26 crc kubenswrapper[5050]: I1123 17:34:26.233229 5050 scope.go:117] "RemoveContainer" containerID="dd6ec2d786cb84be8c17b7a497ac095049b49362dfa20c4101e7a897bac8242f" Nov 23 17:34:26 crc kubenswrapper[5050]: E1123 17:34:26.233765 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd6ec2d786cb84be8c17b7a497ac095049b49362dfa20c4101e7a897bac8242f\": container with ID starting with dd6ec2d786cb84be8c17b7a497ac095049b49362dfa20c4101e7a897bac8242f not found: ID does not exist" containerID="dd6ec2d786cb84be8c17b7a497ac095049b49362dfa20c4101e7a897bac8242f" Nov 23 17:34:26 crc kubenswrapper[5050]: I1123 17:34:26.233791 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd6ec2d786cb84be8c17b7a497ac095049b49362dfa20c4101e7a897bac8242f"} err="failed to get container status \"dd6ec2d786cb84be8c17b7a497ac095049b49362dfa20c4101e7a897bac8242f\": rpc error: code = NotFound desc = could not find container \"dd6ec2d786cb84be8c17b7a497ac095049b49362dfa20c4101e7a897bac8242f\": container with ID starting with dd6ec2d786cb84be8c17b7a497ac095049b49362dfa20c4101e7a897bac8242f not found: ID does not exist" Nov 23 17:34:26 crc kubenswrapper[5050]: I1123 17:34:26.233809 5050 scope.go:117] "RemoveContainer" containerID="fe7180dbf1a8097cf095d105b9b1277bf2b3e8cd590a1b9ff573b7c786e22320" Nov 23 17:34:26 crc kubenswrapper[5050]: E1123 17:34:26.234038 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe7180dbf1a8097cf095d105b9b1277bf2b3e8cd590a1b9ff573b7c786e22320\": container with ID starting with fe7180dbf1a8097cf095d105b9b1277bf2b3e8cd590a1b9ff573b7c786e22320 not found: ID does not exist" containerID="fe7180dbf1a8097cf095d105b9b1277bf2b3e8cd590a1b9ff573b7c786e22320" Nov 23 17:34:26 crc kubenswrapper[5050]: I1123 17:34:26.234062 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe7180dbf1a8097cf095d105b9b1277bf2b3e8cd590a1b9ff573b7c786e22320"} err="failed to get container status \"fe7180dbf1a8097cf095d105b9b1277bf2b3e8cd590a1b9ff573b7c786e22320\": rpc error: code = NotFound desc = could not find container \"fe7180dbf1a8097cf095d105b9b1277bf2b3e8cd590a1b9ff573b7c786e22320\": container with ID starting with fe7180dbf1a8097cf095d105b9b1277bf2b3e8cd590a1b9ff573b7c786e22320 not found: ID does not exist" Nov 23 17:34:27 crc kubenswrapper[5050]: I1123 17:34:27.568268 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82ca6325-f96f-4ff1-95db-27d7e860ee0c" path="/var/lib/kubelet/pods/82ca6325-f96f-4ff1-95db-27d7e860ee0c/volumes" Nov 23 17:34:29 crc kubenswrapper[5050]: I1123 17:34:29.224903 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 17:34:29 crc kubenswrapper[5050]: I1123 17:34:29.225740 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 17:34:35 crc kubenswrapper[5050]: E1123 17:34:35.927413 5050 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.32:45150->38.102.83.32:33067: write tcp 38.102.83.32:45150->38.102.83.32:33067: write: broken pipe Nov 23 17:34:59 crc kubenswrapper[5050]: I1123 17:34:59.224560 5050 patch_prober.go:28] interesting pod/machine-config-daemon-hlrlq container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 23 17:34:59 crc kubenswrapper[5050]: I1123 17:34:59.225611 5050 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 23 17:34:59 crc kubenswrapper[5050]: I1123 17:34:59.225700 5050 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" Nov 23 17:34:59 crc kubenswrapper[5050]: I1123 17:34:59.227784 5050 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c"} pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 23 17:34:59 crc kubenswrapper[5050]: I1123 17:34:59.227930 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" containerName="machine-config-daemon" containerID="cri-o://094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c" gracePeriod=600 Nov 23 17:34:59 crc kubenswrapper[5050]: E1123 17:34:59.357095 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:34:59 crc kubenswrapper[5050]: I1123 17:34:59.646870 5050 generic.go:334] "Generic (PLEG): container finished" podID="1d998909-9470-47ef-87e8-b34f0473682f" containerID="094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c" exitCode=0 Nov 23 17:34:59 crc kubenswrapper[5050]: I1123 17:34:59.646931 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerDied","Data":"094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c"} Nov 23 17:34:59 crc kubenswrapper[5050]: I1123 17:34:59.646973 5050 scope.go:117] "RemoveContainer" containerID="170f3900bfb04c6dadaf7839b3a6badf07704122b830e42ebf9f1e980b5ea8f4" Nov 23 17:34:59 crc kubenswrapper[5050]: I1123 17:34:59.647971 5050 scope.go:117] "RemoveContainer" containerID="094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c" Nov 23 17:34:59 crc kubenswrapper[5050]: E1123 17:34:59.648336 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:35:12 crc kubenswrapper[5050]: I1123 17:35:12.548614 5050 scope.go:117] "RemoveContainer" containerID="094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c" Nov 23 17:35:12 crc kubenswrapper[5050]: E1123 17:35:12.549855 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:35:26 crc kubenswrapper[5050]: I1123 17:35:26.575814 5050 scope.go:117] "RemoveContainer" containerID="094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c" Nov 23 17:35:26 crc kubenswrapper[5050]: E1123 17:35:26.577167 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:35:27 crc kubenswrapper[5050]: I1123 17:35:27.357423 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-hn4wm"] Nov 23 17:35:27 crc kubenswrapper[5050]: E1123 17:35:27.360131 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82ca6325-f96f-4ff1-95db-27d7e860ee0c" containerName="extract-utilities" Nov 23 17:35:27 crc kubenswrapper[5050]: I1123 17:35:27.360171 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="82ca6325-f96f-4ff1-95db-27d7e860ee0c" containerName="extract-utilities" Nov 23 17:35:27 crc kubenswrapper[5050]: E1123 17:35:27.360249 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82ca6325-f96f-4ff1-95db-27d7e860ee0c" containerName="registry-server" Nov 23 17:35:27 crc kubenswrapper[5050]: I1123 17:35:27.360264 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="82ca6325-f96f-4ff1-95db-27d7e860ee0c" containerName="registry-server" Nov 23 17:35:27 crc kubenswrapper[5050]: E1123 17:35:27.360330 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82ca6325-f96f-4ff1-95db-27d7e860ee0c" containerName="extract-content" Nov 23 17:35:27 crc kubenswrapper[5050]: I1123 17:35:27.360347 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="82ca6325-f96f-4ff1-95db-27d7e860ee0c" containerName="extract-content" Nov 23 17:35:27 crc kubenswrapper[5050]: I1123 17:35:27.360836 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="82ca6325-f96f-4ff1-95db-27d7e860ee0c" containerName="registry-server" Nov 23 17:35:27 crc kubenswrapper[5050]: I1123 17:35:27.365982 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hn4wm" Nov 23 17:35:27 crc kubenswrapper[5050]: I1123 17:35:27.407593 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hn4wm"] Nov 23 17:35:27 crc kubenswrapper[5050]: I1123 17:35:27.506747 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c36b66f-effa-4d89-9d9e-b18117396520-catalog-content\") pod \"certified-operators-hn4wm\" (UID: \"5c36b66f-effa-4d89-9d9e-b18117396520\") " pod="openshift-marketplace/certified-operators-hn4wm" Nov 23 17:35:27 crc kubenswrapper[5050]: I1123 17:35:27.506835 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c36b66f-effa-4d89-9d9e-b18117396520-utilities\") pod \"certified-operators-hn4wm\" (UID: \"5c36b66f-effa-4d89-9d9e-b18117396520\") " pod="openshift-marketplace/certified-operators-hn4wm" Nov 23 17:35:27 crc kubenswrapper[5050]: I1123 17:35:27.507171 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgtnz\" (UniqueName: \"kubernetes.io/projected/5c36b66f-effa-4d89-9d9e-b18117396520-kube-api-access-fgtnz\") pod \"certified-operators-hn4wm\" (UID: \"5c36b66f-effa-4d89-9d9e-b18117396520\") " pod="openshift-marketplace/certified-operators-hn4wm" Nov 23 17:35:27 crc kubenswrapper[5050]: I1123 17:35:27.610773 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgtnz\" (UniqueName: \"kubernetes.io/projected/5c36b66f-effa-4d89-9d9e-b18117396520-kube-api-access-fgtnz\") pod \"certified-operators-hn4wm\" (UID: \"5c36b66f-effa-4d89-9d9e-b18117396520\") " pod="openshift-marketplace/certified-operators-hn4wm" Nov 23 17:35:27 crc kubenswrapper[5050]: I1123 17:35:27.610928 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c36b66f-effa-4d89-9d9e-b18117396520-catalog-content\") pod \"certified-operators-hn4wm\" (UID: \"5c36b66f-effa-4d89-9d9e-b18117396520\") " pod="openshift-marketplace/certified-operators-hn4wm" Nov 23 17:35:27 crc kubenswrapper[5050]: I1123 17:35:27.610983 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c36b66f-effa-4d89-9d9e-b18117396520-utilities\") pod \"certified-operators-hn4wm\" (UID: \"5c36b66f-effa-4d89-9d9e-b18117396520\") " pod="openshift-marketplace/certified-operators-hn4wm" Nov 23 17:35:27 crc kubenswrapper[5050]: I1123 17:35:27.612118 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c36b66f-effa-4d89-9d9e-b18117396520-catalog-content\") pod \"certified-operators-hn4wm\" (UID: \"5c36b66f-effa-4d89-9d9e-b18117396520\") " pod="openshift-marketplace/certified-operators-hn4wm" Nov 23 17:35:27 crc kubenswrapper[5050]: I1123 17:35:27.612133 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c36b66f-effa-4d89-9d9e-b18117396520-utilities\") pod \"certified-operators-hn4wm\" (UID: \"5c36b66f-effa-4d89-9d9e-b18117396520\") " pod="openshift-marketplace/certified-operators-hn4wm" Nov 23 17:35:27 crc kubenswrapper[5050]: I1123 17:35:27.652432 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgtnz\" (UniqueName: \"kubernetes.io/projected/5c36b66f-effa-4d89-9d9e-b18117396520-kube-api-access-fgtnz\") pod \"certified-operators-hn4wm\" (UID: \"5c36b66f-effa-4d89-9d9e-b18117396520\") " pod="openshift-marketplace/certified-operators-hn4wm" Nov 23 17:35:27 crc kubenswrapper[5050]: I1123 17:35:27.705599 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hn4wm" Nov 23 17:35:28 crc kubenswrapper[5050]: I1123 17:35:28.359950 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hn4wm"] Nov 23 17:35:29 crc kubenswrapper[5050]: I1123 17:35:29.156368 5050 generic.go:334] "Generic (PLEG): container finished" podID="5c36b66f-effa-4d89-9d9e-b18117396520" containerID="eed4c750d2d63dc9200354027d5c671f2b553fdf57ea8f0a77f68b26b9e9eaa0" exitCode=0 Nov 23 17:35:29 crc kubenswrapper[5050]: I1123 17:35:29.156500 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hn4wm" event={"ID":"5c36b66f-effa-4d89-9d9e-b18117396520","Type":"ContainerDied","Data":"eed4c750d2d63dc9200354027d5c671f2b553fdf57ea8f0a77f68b26b9e9eaa0"} Nov 23 17:35:29 crc kubenswrapper[5050]: I1123 17:35:29.156888 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hn4wm" event={"ID":"5c36b66f-effa-4d89-9d9e-b18117396520","Type":"ContainerStarted","Data":"f3278b17645d952f161dcb8a14e2fc1acb731217038bc925b8b8502f9c804819"} Nov 23 17:35:31 crc kubenswrapper[5050]: I1123 17:35:31.186780 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hn4wm" event={"ID":"5c36b66f-effa-4d89-9d9e-b18117396520","Type":"ContainerStarted","Data":"9b27bc9e5c6496e6075b93a0a782740a7a7e450a1c8894f29bbce6fc46246932"} Nov 23 17:35:32 crc kubenswrapper[5050]: I1123 17:35:32.206635 5050 generic.go:334] "Generic (PLEG): container finished" podID="5c36b66f-effa-4d89-9d9e-b18117396520" containerID="9b27bc9e5c6496e6075b93a0a782740a7a7e450a1c8894f29bbce6fc46246932" exitCode=0 Nov 23 17:35:32 crc kubenswrapper[5050]: I1123 17:35:32.206727 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hn4wm" event={"ID":"5c36b66f-effa-4d89-9d9e-b18117396520","Type":"ContainerDied","Data":"9b27bc9e5c6496e6075b93a0a782740a7a7e450a1c8894f29bbce6fc46246932"} Nov 23 17:35:33 crc kubenswrapper[5050]: I1123 17:35:33.223993 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hn4wm" event={"ID":"5c36b66f-effa-4d89-9d9e-b18117396520","Type":"ContainerStarted","Data":"a9f7b978f2a43cb10adf25da307165ddd78374c1deeb779317209b8e836032eb"} Nov 23 17:35:33 crc kubenswrapper[5050]: I1123 17:35:33.255338 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-hn4wm" podStartSLOduration=2.768473988 podStartE2EDuration="6.255316241s" podCreationTimestamp="2025-11-23 17:35:27 +0000 UTC" firstStartedPulling="2025-11-23 17:35:29.164341358 +0000 UTC m=+10424.331337843" lastFinishedPulling="2025-11-23 17:35:32.651183611 +0000 UTC m=+10427.818180096" observedRunningTime="2025-11-23 17:35:33.251951286 +0000 UTC m=+10428.418947791" watchObservedRunningTime="2025-11-23 17:35:33.255316241 +0000 UTC m=+10428.422312736" Nov 23 17:35:37 crc kubenswrapper[5050]: I1123 17:35:37.552699 5050 scope.go:117] "RemoveContainer" containerID="094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c" Nov 23 17:35:37 crc kubenswrapper[5050]: E1123 17:35:37.554870 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:35:37 crc kubenswrapper[5050]: I1123 17:35:37.706153 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-hn4wm" Nov 23 17:35:37 crc kubenswrapper[5050]: I1123 17:35:37.708593 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-hn4wm" Nov 23 17:35:38 crc kubenswrapper[5050]: I1123 17:35:38.444141 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-hn4wm" Nov 23 17:35:39 crc kubenswrapper[5050]: I1123 17:35:39.410835 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-hn4wm" Nov 23 17:35:39 crc kubenswrapper[5050]: I1123 17:35:39.502535 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hn4wm"] Nov 23 17:35:41 crc kubenswrapper[5050]: I1123 17:35:41.342688 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-hn4wm" podUID="5c36b66f-effa-4d89-9d9e-b18117396520" containerName="registry-server" containerID="cri-o://a9f7b978f2a43cb10adf25da307165ddd78374c1deeb779317209b8e836032eb" gracePeriod=2 Nov 23 17:35:42 crc kubenswrapper[5050]: I1123 17:35:42.108206 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hn4wm" Nov 23 17:35:42 crc kubenswrapper[5050]: I1123 17:35:42.230566 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fgtnz\" (UniqueName: \"kubernetes.io/projected/5c36b66f-effa-4d89-9d9e-b18117396520-kube-api-access-fgtnz\") pod \"5c36b66f-effa-4d89-9d9e-b18117396520\" (UID: \"5c36b66f-effa-4d89-9d9e-b18117396520\") " Nov 23 17:35:42 crc kubenswrapper[5050]: I1123 17:35:42.230786 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c36b66f-effa-4d89-9d9e-b18117396520-catalog-content\") pod \"5c36b66f-effa-4d89-9d9e-b18117396520\" (UID: \"5c36b66f-effa-4d89-9d9e-b18117396520\") " Nov 23 17:35:42 crc kubenswrapper[5050]: I1123 17:35:42.231005 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c36b66f-effa-4d89-9d9e-b18117396520-utilities\") pod \"5c36b66f-effa-4d89-9d9e-b18117396520\" (UID: \"5c36b66f-effa-4d89-9d9e-b18117396520\") " Nov 23 17:35:42 crc kubenswrapper[5050]: I1123 17:35:42.232389 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c36b66f-effa-4d89-9d9e-b18117396520-utilities" (OuterVolumeSpecName: "utilities") pod "5c36b66f-effa-4d89-9d9e-b18117396520" (UID: "5c36b66f-effa-4d89-9d9e-b18117396520"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:35:42 crc kubenswrapper[5050]: I1123 17:35:42.247022 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c36b66f-effa-4d89-9d9e-b18117396520-kube-api-access-fgtnz" (OuterVolumeSpecName: "kube-api-access-fgtnz") pod "5c36b66f-effa-4d89-9d9e-b18117396520" (UID: "5c36b66f-effa-4d89-9d9e-b18117396520"). InnerVolumeSpecName "kube-api-access-fgtnz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:35:42 crc kubenswrapper[5050]: I1123 17:35:42.290291 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c36b66f-effa-4d89-9d9e-b18117396520-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5c36b66f-effa-4d89-9d9e-b18117396520" (UID: "5c36b66f-effa-4d89-9d9e-b18117396520"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:35:42 crc kubenswrapper[5050]: I1123 17:35:42.334903 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c36b66f-effa-4d89-9d9e-b18117396520-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 17:35:42 crc kubenswrapper[5050]: I1123 17:35:42.334951 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c36b66f-effa-4d89-9d9e-b18117396520-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 17:35:42 crc kubenswrapper[5050]: I1123 17:35:42.334965 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fgtnz\" (UniqueName: \"kubernetes.io/projected/5c36b66f-effa-4d89-9d9e-b18117396520-kube-api-access-fgtnz\") on node \"crc\" DevicePath \"\"" Nov 23 17:35:42 crc kubenswrapper[5050]: I1123 17:35:42.367150 5050 generic.go:334] "Generic (PLEG): container finished" podID="5c36b66f-effa-4d89-9d9e-b18117396520" containerID="a9f7b978f2a43cb10adf25da307165ddd78374c1deeb779317209b8e836032eb" exitCode=0 Nov 23 17:35:42 crc kubenswrapper[5050]: I1123 17:35:42.367225 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hn4wm" event={"ID":"5c36b66f-effa-4d89-9d9e-b18117396520","Type":"ContainerDied","Data":"a9f7b978f2a43cb10adf25da307165ddd78374c1deeb779317209b8e836032eb"} Nov 23 17:35:42 crc kubenswrapper[5050]: I1123 17:35:42.367256 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hn4wm" Nov 23 17:35:42 crc kubenswrapper[5050]: I1123 17:35:42.367288 5050 scope.go:117] "RemoveContainer" containerID="a9f7b978f2a43cb10adf25da307165ddd78374c1deeb779317209b8e836032eb" Nov 23 17:35:42 crc kubenswrapper[5050]: I1123 17:35:42.367270 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hn4wm" event={"ID":"5c36b66f-effa-4d89-9d9e-b18117396520","Type":"ContainerDied","Data":"f3278b17645d952f161dcb8a14e2fc1acb731217038bc925b8b8502f9c804819"} Nov 23 17:35:42 crc kubenswrapper[5050]: I1123 17:35:42.421384 5050 scope.go:117] "RemoveContainer" containerID="9b27bc9e5c6496e6075b93a0a782740a7a7e450a1c8894f29bbce6fc46246932" Nov 23 17:35:42 crc kubenswrapper[5050]: I1123 17:35:42.427085 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hn4wm"] Nov 23 17:35:42 crc kubenswrapper[5050]: I1123 17:35:42.437830 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-hn4wm"] Nov 23 17:35:42 crc kubenswrapper[5050]: I1123 17:35:42.455948 5050 scope.go:117] "RemoveContainer" containerID="eed4c750d2d63dc9200354027d5c671f2b553fdf57ea8f0a77f68b26b9e9eaa0" Nov 23 17:35:42 crc kubenswrapper[5050]: I1123 17:35:42.510124 5050 scope.go:117] "RemoveContainer" containerID="a9f7b978f2a43cb10adf25da307165ddd78374c1deeb779317209b8e836032eb" Nov 23 17:35:42 crc kubenswrapper[5050]: E1123 17:35:42.510589 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a9f7b978f2a43cb10adf25da307165ddd78374c1deeb779317209b8e836032eb\": container with ID starting with a9f7b978f2a43cb10adf25da307165ddd78374c1deeb779317209b8e836032eb not found: ID does not exist" containerID="a9f7b978f2a43cb10adf25da307165ddd78374c1deeb779317209b8e836032eb" Nov 23 17:35:42 crc kubenswrapper[5050]: I1123 17:35:42.510635 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a9f7b978f2a43cb10adf25da307165ddd78374c1deeb779317209b8e836032eb"} err="failed to get container status \"a9f7b978f2a43cb10adf25da307165ddd78374c1deeb779317209b8e836032eb\": rpc error: code = NotFound desc = could not find container \"a9f7b978f2a43cb10adf25da307165ddd78374c1deeb779317209b8e836032eb\": container with ID starting with a9f7b978f2a43cb10adf25da307165ddd78374c1deeb779317209b8e836032eb not found: ID does not exist" Nov 23 17:35:42 crc kubenswrapper[5050]: I1123 17:35:42.510663 5050 scope.go:117] "RemoveContainer" containerID="9b27bc9e5c6496e6075b93a0a782740a7a7e450a1c8894f29bbce6fc46246932" Nov 23 17:35:42 crc kubenswrapper[5050]: E1123 17:35:42.511401 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b27bc9e5c6496e6075b93a0a782740a7a7e450a1c8894f29bbce6fc46246932\": container with ID starting with 9b27bc9e5c6496e6075b93a0a782740a7a7e450a1c8894f29bbce6fc46246932 not found: ID does not exist" containerID="9b27bc9e5c6496e6075b93a0a782740a7a7e450a1c8894f29bbce6fc46246932" Nov 23 17:35:42 crc kubenswrapper[5050]: I1123 17:35:42.511503 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b27bc9e5c6496e6075b93a0a782740a7a7e450a1c8894f29bbce6fc46246932"} err="failed to get container status \"9b27bc9e5c6496e6075b93a0a782740a7a7e450a1c8894f29bbce6fc46246932\": rpc error: code = NotFound desc = could not find container \"9b27bc9e5c6496e6075b93a0a782740a7a7e450a1c8894f29bbce6fc46246932\": container with ID starting with 9b27bc9e5c6496e6075b93a0a782740a7a7e450a1c8894f29bbce6fc46246932 not found: ID does not exist" Nov 23 17:35:42 crc kubenswrapper[5050]: I1123 17:35:42.511563 5050 scope.go:117] "RemoveContainer" containerID="eed4c750d2d63dc9200354027d5c671f2b553fdf57ea8f0a77f68b26b9e9eaa0" Nov 23 17:35:42 crc kubenswrapper[5050]: E1123 17:35:42.512029 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eed4c750d2d63dc9200354027d5c671f2b553fdf57ea8f0a77f68b26b9e9eaa0\": container with ID starting with eed4c750d2d63dc9200354027d5c671f2b553fdf57ea8f0a77f68b26b9e9eaa0 not found: ID does not exist" containerID="eed4c750d2d63dc9200354027d5c671f2b553fdf57ea8f0a77f68b26b9e9eaa0" Nov 23 17:35:42 crc kubenswrapper[5050]: I1123 17:35:42.512100 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eed4c750d2d63dc9200354027d5c671f2b553fdf57ea8f0a77f68b26b9e9eaa0"} err="failed to get container status \"eed4c750d2d63dc9200354027d5c671f2b553fdf57ea8f0a77f68b26b9e9eaa0\": rpc error: code = NotFound desc = could not find container \"eed4c750d2d63dc9200354027d5c671f2b553fdf57ea8f0a77f68b26b9e9eaa0\": container with ID starting with eed4c750d2d63dc9200354027d5c671f2b553fdf57ea8f0a77f68b26b9e9eaa0 not found: ID does not exist" Nov 23 17:35:43 crc kubenswrapper[5050]: I1123 17:35:43.575313 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c36b66f-effa-4d89-9d9e-b18117396520" path="/var/lib/kubelet/pods/5c36b66f-effa-4d89-9d9e-b18117396520/volumes" Nov 23 17:35:52 crc kubenswrapper[5050]: I1123 17:35:52.549063 5050 scope.go:117] "RemoveContainer" containerID="094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c" Nov 23 17:35:52 crc kubenswrapper[5050]: E1123 17:35:52.550870 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:36:03 crc kubenswrapper[5050]: I1123 17:36:03.470947 5050 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-67cc8"] Nov 23 17:36:03 crc kubenswrapper[5050]: E1123 17:36:03.472414 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c36b66f-effa-4d89-9d9e-b18117396520" containerName="registry-server" Nov 23 17:36:03 crc kubenswrapper[5050]: I1123 17:36:03.472431 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c36b66f-effa-4d89-9d9e-b18117396520" containerName="registry-server" Nov 23 17:36:03 crc kubenswrapper[5050]: E1123 17:36:03.472499 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c36b66f-effa-4d89-9d9e-b18117396520" containerName="extract-utilities" Nov 23 17:36:03 crc kubenswrapper[5050]: I1123 17:36:03.472513 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c36b66f-effa-4d89-9d9e-b18117396520" containerName="extract-utilities" Nov 23 17:36:03 crc kubenswrapper[5050]: E1123 17:36:03.472544 5050 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c36b66f-effa-4d89-9d9e-b18117396520" containerName="extract-content" Nov 23 17:36:03 crc kubenswrapper[5050]: I1123 17:36:03.472555 5050 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c36b66f-effa-4d89-9d9e-b18117396520" containerName="extract-content" Nov 23 17:36:03 crc kubenswrapper[5050]: I1123 17:36:03.472928 5050 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c36b66f-effa-4d89-9d9e-b18117396520" containerName="registry-server" Nov 23 17:36:03 crc kubenswrapper[5050]: I1123 17:36:03.475711 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-67cc8" Nov 23 17:36:03 crc kubenswrapper[5050]: I1123 17:36:03.497525 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-67cc8"] Nov 23 17:36:03 crc kubenswrapper[5050]: I1123 17:36:03.558081 5050 scope.go:117] "RemoveContainer" containerID="094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c" Nov 23 17:36:03 crc kubenswrapper[5050]: E1123 17:36:03.558828 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:36:03 crc kubenswrapper[5050]: I1123 17:36:03.581116 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-79p99\" (UniqueName: \"kubernetes.io/projected/08b05709-1a06-4f20-8eb1-ca35d26f9d47-kube-api-access-79p99\") pod \"redhat-marketplace-67cc8\" (UID: \"08b05709-1a06-4f20-8eb1-ca35d26f9d47\") " pod="openshift-marketplace/redhat-marketplace-67cc8" Nov 23 17:36:03 crc kubenswrapper[5050]: I1123 17:36:03.581176 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08b05709-1a06-4f20-8eb1-ca35d26f9d47-utilities\") pod \"redhat-marketplace-67cc8\" (UID: \"08b05709-1a06-4f20-8eb1-ca35d26f9d47\") " pod="openshift-marketplace/redhat-marketplace-67cc8" Nov 23 17:36:03 crc kubenswrapper[5050]: I1123 17:36:03.581222 5050 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08b05709-1a06-4f20-8eb1-ca35d26f9d47-catalog-content\") pod \"redhat-marketplace-67cc8\" (UID: \"08b05709-1a06-4f20-8eb1-ca35d26f9d47\") " pod="openshift-marketplace/redhat-marketplace-67cc8" Nov 23 17:36:03 crc kubenswrapper[5050]: I1123 17:36:03.684502 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-79p99\" (UniqueName: \"kubernetes.io/projected/08b05709-1a06-4f20-8eb1-ca35d26f9d47-kube-api-access-79p99\") pod \"redhat-marketplace-67cc8\" (UID: \"08b05709-1a06-4f20-8eb1-ca35d26f9d47\") " pod="openshift-marketplace/redhat-marketplace-67cc8" Nov 23 17:36:03 crc kubenswrapper[5050]: I1123 17:36:03.684583 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08b05709-1a06-4f20-8eb1-ca35d26f9d47-utilities\") pod \"redhat-marketplace-67cc8\" (UID: \"08b05709-1a06-4f20-8eb1-ca35d26f9d47\") " pod="openshift-marketplace/redhat-marketplace-67cc8" Nov 23 17:36:03 crc kubenswrapper[5050]: I1123 17:36:03.684674 5050 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08b05709-1a06-4f20-8eb1-ca35d26f9d47-catalog-content\") pod \"redhat-marketplace-67cc8\" (UID: \"08b05709-1a06-4f20-8eb1-ca35d26f9d47\") " pod="openshift-marketplace/redhat-marketplace-67cc8" Nov 23 17:36:03 crc kubenswrapper[5050]: I1123 17:36:03.685294 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08b05709-1a06-4f20-8eb1-ca35d26f9d47-utilities\") pod \"redhat-marketplace-67cc8\" (UID: \"08b05709-1a06-4f20-8eb1-ca35d26f9d47\") " pod="openshift-marketplace/redhat-marketplace-67cc8" Nov 23 17:36:03 crc kubenswrapper[5050]: I1123 17:36:03.685752 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08b05709-1a06-4f20-8eb1-ca35d26f9d47-catalog-content\") pod \"redhat-marketplace-67cc8\" (UID: \"08b05709-1a06-4f20-8eb1-ca35d26f9d47\") " pod="openshift-marketplace/redhat-marketplace-67cc8" Nov 23 17:36:03 crc kubenswrapper[5050]: I1123 17:36:03.715425 5050 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-79p99\" (UniqueName: \"kubernetes.io/projected/08b05709-1a06-4f20-8eb1-ca35d26f9d47-kube-api-access-79p99\") pod \"redhat-marketplace-67cc8\" (UID: \"08b05709-1a06-4f20-8eb1-ca35d26f9d47\") " pod="openshift-marketplace/redhat-marketplace-67cc8" Nov 23 17:36:03 crc kubenswrapper[5050]: I1123 17:36:03.823587 5050 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-67cc8" Nov 23 17:36:04 crc kubenswrapper[5050]: I1123 17:36:04.406112 5050 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-67cc8"] Nov 23 17:36:04 crc kubenswrapper[5050]: I1123 17:36:04.703352 5050 generic.go:334] "Generic (PLEG): container finished" podID="08b05709-1a06-4f20-8eb1-ca35d26f9d47" containerID="ff414533e9e06829a9ced1c10f5d63b91898946617117bd35341939ac68badf2" exitCode=0 Nov 23 17:36:04 crc kubenswrapper[5050]: I1123 17:36:04.703540 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-67cc8" event={"ID":"08b05709-1a06-4f20-8eb1-ca35d26f9d47","Type":"ContainerDied","Data":"ff414533e9e06829a9ced1c10f5d63b91898946617117bd35341939ac68badf2"} Nov 23 17:36:04 crc kubenswrapper[5050]: I1123 17:36:04.704031 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-67cc8" event={"ID":"08b05709-1a06-4f20-8eb1-ca35d26f9d47","Type":"ContainerStarted","Data":"1317b9f0cb0347c8e3e87a28827a728326004f13355719ed5891f60b05a06709"} Nov 23 17:36:05 crc kubenswrapper[5050]: I1123 17:36:05.731859 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-67cc8" event={"ID":"08b05709-1a06-4f20-8eb1-ca35d26f9d47","Type":"ContainerStarted","Data":"e88b64d27054740274b8fb4e68129e64f00e869621deb403753243ccc98772fb"} Nov 23 17:36:06 crc kubenswrapper[5050]: I1123 17:36:06.764764 5050 generic.go:334] "Generic (PLEG): container finished" podID="08b05709-1a06-4f20-8eb1-ca35d26f9d47" containerID="e88b64d27054740274b8fb4e68129e64f00e869621deb403753243ccc98772fb" exitCode=0 Nov 23 17:36:06 crc kubenswrapper[5050]: I1123 17:36:06.764918 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-67cc8" event={"ID":"08b05709-1a06-4f20-8eb1-ca35d26f9d47","Type":"ContainerDied","Data":"e88b64d27054740274b8fb4e68129e64f00e869621deb403753243ccc98772fb"} Nov 23 17:36:07 crc kubenswrapper[5050]: I1123 17:36:07.778634 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-67cc8" event={"ID":"08b05709-1a06-4f20-8eb1-ca35d26f9d47","Type":"ContainerStarted","Data":"94bf30e861fc2eb39f0796258e7aee3c49e1bb9ed6b3dbc0deb58d35cf4ecb95"} Nov 23 17:36:07 crc kubenswrapper[5050]: I1123 17:36:07.844508 5050 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-67cc8" podStartSLOduration=2.07508988 podStartE2EDuration="4.844478763s" podCreationTimestamp="2025-11-23 17:36:03 +0000 UTC" firstStartedPulling="2025-11-23 17:36:04.709315571 +0000 UTC m=+10459.876312096" lastFinishedPulling="2025-11-23 17:36:07.478704494 +0000 UTC m=+10462.645700979" observedRunningTime="2025-11-23 17:36:07.808528758 +0000 UTC m=+10462.975525273" watchObservedRunningTime="2025-11-23 17:36:07.844478763 +0000 UTC m=+10463.011475248" Nov 23 17:36:13 crc kubenswrapper[5050]: I1123 17:36:13.824137 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-67cc8" Nov 23 17:36:13 crc kubenswrapper[5050]: I1123 17:36:13.826905 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-67cc8" Nov 23 17:36:13 crc kubenswrapper[5050]: I1123 17:36:13.888499 5050 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-67cc8" Nov 23 17:36:14 crc kubenswrapper[5050]: I1123 17:36:14.739848 5050 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-67cc8" Nov 23 17:36:14 crc kubenswrapper[5050]: I1123 17:36:14.815016 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-67cc8"] Nov 23 17:36:15 crc kubenswrapper[5050]: I1123 17:36:15.896644 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-67cc8" podUID="08b05709-1a06-4f20-8eb1-ca35d26f9d47" containerName="registry-server" containerID="cri-o://94bf30e861fc2eb39f0796258e7aee3c49e1bb9ed6b3dbc0deb58d35cf4ecb95" gracePeriod=2 Nov 23 17:36:16 crc kubenswrapper[5050]: I1123 17:36:16.467546 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-67cc8" Nov 23 17:36:16 crc kubenswrapper[5050]: I1123 17:36:16.551060 5050 scope.go:117] "RemoveContainer" containerID="094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c" Nov 23 17:36:16 crc kubenswrapper[5050]: E1123 17:36:16.551764 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:36:16 crc kubenswrapper[5050]: I1123 17:36:16.607352 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08b05709-1a06-4f20-8eb1-ca35d26f9d47-catalog-content\") pod \"08b05709-1a06-4f20-8eb1-ca35d26f9d47\" (UID: \"08b05709-1a06-4f20-8eb1-ca35d26f9d47\") " Nov 23 17:36:16 crc kubenswrapper[5050]: I1123 17:36:16.607891 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-79p99\" (UniqueName: \"kubernetes.io/projected/08b05709-1a06-4f20-8eb1-ca35d26f9d47-kube-api-access-79p99\") pod \"08b05709-1a06-4f20-8eb1-ca35d26f9d47\" (UID: \"08b05709-1a06-4f20-8eb1-ca35d26f9d47\") " Nov 23 17:36:16 crc kubenswrapper[5050]: I1123 17:36:16.607990 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08b05709-1a06-4f20-8eb1-ca35d26f9d47-utilities\") pod \"08b05709-1a06-4f20-8eb1-ca35d26f9d47\" (UID: \"08b05709-1a06-4f20-8eb1-ca35d26f9d47\") " Nov 23 17:36:16 crc kubenswrapper[5050]: I1123 17:36:16.609276 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08b05709-1a06-4f20-8eb1-ca35d26f9d47-utilities" (OuterVolumeSpecName: "utilities") pod "08b05709-1a06-4f20-8eb1-ca35d26f9d47" (UID: "08b05709-1a06-4f20-8eb1-ca35d26f9d47"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:36:16 crc kubenswrapper[5050]: I1123 17:36:16.618616 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08b05709-1a06-4f20-8eb1-ca35d26f9d47-kube-api-access-79p99" (OuterVolumeSpecName: "kube-api-access-79p99") pod "08b05709-1a06-4f20-8eb1-ca35d26f9d47" (UID: "08b05709-1a06-4f20-8eb1-ca35d26f9d47"). InnerVolumeSpecName "kube-api-access-79p99". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:36:16 crc kubenswrapper[5050]: I1123 17:36:16.642856 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08b05709-1a06-4f20-8eb1-ca35d26f9d47-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "08b05709-1a06-4f20-8eb1-ca35d26f9d47" (UID: "08b05709-1a06-4f20-8eb1-ca35d26f9d47"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:36:16 crc kubenswrapper[5050]: I1123 17:36:16.726964 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-79p99\" (UniqueName: \"kubernetes.io/projected/08b05709-1a06-4f20-8eb1-ca35d26f9d47-kube-api-access-79p99\") on node \"crc\" DevicePath \"\"" Nov 23 17:36:16 crc kubenswrapper[5050]: I1123 17:36:16.727008 5050 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08b05709-1a06-4f20-8eb1-ca35d26f9d47-utilities\") on node \"crc\" DevicePath \"\"" Nov 23 17:36:16 crc kubenswrapper[5050]: I1123 17:36:16.727021 5050 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08b05709-1a06-4f20-8eb1-ca35d26f9d47-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 23 17:36:16 crc kubenswrapper[5050]: I1123 17:36:16.922609 5050 generic.go:334] "Generic (PLEG): container finished" podID="08b05709-1a06-4f20-8eb1-ca35d26f9d47" containerID="94bf30e861fc2eb39f0796258e7aee3c49e1bb9ed6b3dbc0deb58d35cf4ecb95" exitCode=0 Nov 23 17:36:16 crc kubenswrapper[5050]: I1123 17:36:16.922696 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-67cc8" event={"ID":"08b05709-1a06-4f20-8eb1-ca35d26f9d47","Type":"ContainerDied","Data":"94bf30e861fc2eb39f0796258e7aee3c49e1bb9ed6b3dbc0deb58d35cf4ecb95"} Nov 23 17:36:16 crc kubenswrapper[5050]: I1123 17:36:16.922743 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-67cc8" event={"ID":"08b05709-1a06-4f20-8eb1-ca35d26f9d47","Type":"ContainerDied","Data":"1317b9f0cb0347c8e3e87a28827a728326004f13355719ed5891f60b05a06709"} Nov 23 17:36:16 crc kubenswrapper[5050]: I1123 17:36:16.922772 5050 scope.go:117] "RemoveContainer" containerID="94bf30e861fc2eb39f0796258e7aee3c49e1bb9ed6b3dbc0deb58d35cf4ecb95" Nov 23 17:36:16 crc kubenswrapper[5050]: I1123 17:36:16.923027 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-67cc8" Nov 23 17:36:16 crc kubenswrapper[5050]: I1123 17:36:16.983954 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-67cc8"] Nov 23 17:36:16 crc kubenswrapper[5050]: I1123 17:36:16.984251 5050 scope.go:117] "RemoveContainer" containerID="e88b64d27054740274b8fb4e68129e64f00e869621deb403753243ccc98772fb" Nov 23 17:36:16 crc kubenswrapper[5050]: I1123 17:36:16.996604 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-67cc8"] Nov 23 17:36:17 crc kubenswrapper[5050]: I1123 17:36:17.014806 5050 scope.go:117] "RemoveContainer" containerID="ff414533e9e06829a9ced1c10f5d63b91898946617117bd35341939ac68badf2" Nov 23 17:36:17 crc kubenswrapper[5050]: I1123 17:36:17.074844 5050 scope.go:117] "RemoveContainer" containerID="94bf30e861fc2eb39f0796258e7aee3c49e1bb9ed6b3dbc0deb58d35cf4ecb95" Nov 23 17:36:17 crc kubenswrapper[5050]: E1123 17:36:17.075837 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"94bf30e861fc2eb39f0796258e7aee3c49e1bb9ed6b3dbc0deb58d35cf4ecb95\": container with ID starting with 94bf30e861fc2eb39f0796258e7aee3c49e1bb9ed6b3dbc0deb58d35cf4ecb95 not found: ID does not exist" containerID="94bf30e861fc2eb39f0796258e7aee3c49e1bb9ed6b3dbc0deb58d35cf4ecb95" Nov 23 17:36:17 crc kubenswrapper[5050]: I1123 17:36:17.075946 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94bf30e861fc2eb39f0796258e7aee3c49e1bb9ed6b3dbc0deb58d35cf4ecb95"} err="failed to get container status \"94bf30e861fc2eb39f0796258e7aee3c49e1bb9ed6b3dbc0deb58d35cf4ecb95\": rpc error: code = NotFound desc = could not find container \"94bf30e861fc2eb39f0796258e7aee3c49e1bb9ed6b3dbc0deb58d35cf4ecb95\": container with ID starting with 94bf30e861fc2eb39f0796258e7aee3c49e1bb9ed6b3dbc0deb58d35cf4ecb95 not found: ID does not exist" Nov 23 17:36:17 crc kubenswrapper[5050]: I1123 17:36:17.076118 5050 scope.go:117] "RemoveContainer" containerID="e88b64d27054740274b8fb4e68129e64f00e869621deb403753243ccc98772fb" Nov 23 17:36:17 crc kubenswrapper[5050]: E1123 17:36:17.076867 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e88b64d27054740274b8fb4e68129e64f00e869621deb403753243ccc98772fb\": container with ID starting with e88b64d27054740274b8fb4e68129e64f00e869621deb403753243ccc98772fb not found: ID does not exist" containerID="e88b64d27054740274b8fb4e68129e64f00e869621deb403753243ccc98772fb" Nov 23 17:36:17 crc kubenswrapper[5050]: I1123 17:36:17.076913 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e88b64d27054740274b8fb4e68129e64f00e869621deb403753243ccc98772fb"} err="failed to get container status \"e88b64d27054740274b8fb4e68129e64f00e869621deb403753243ccc98772fb\": rpc error: code = NotFound desc = could not find container \"e88b64d27054740274b8fb4e68129e64f00e869621deb403753243ccc98772fb\": container with ID starting with e88b64d27054740274b8fb4e68129e64f00e869621deb403753243ccc98772fb not found: ID does not exist" Nov 23 17:36:17 crc kubenswrapper[5050]: I1123 17:36:17.076930 5050 scope.go:117] "RemoveContainer" containerID="ff414533e9e06829a9ced1c10f5d63b91898946617117bd35341939ac68badf2" Nov 23 17:36:17 crc kubenswrapper[5050]: E1123 17:36:17.077358 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff414533e9e06829a9ced1c10f5d63b91898946617117bd35341939ac68badf2\": container with ID starting with ff414533e9e06829a9ced1c10f5d63b91898946617117bd35341939ac68badf2 not found: ID does not exist" containerID="ff414533e9e06829a9ced1c10f5d63b91898946617117bd35341939ac68badf2" Nov 23 17:36:17 crc kubenswrapper[5050]: I1123 17:36:17.077413 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff414533e9e06829a9ced1c10f5d63b91898946617117bd35341939ac68badf2"} err="failed to get container status \"ff414533e9e06829a9ced1c10f5d63b91898946617117bd35341939ac68badf2\": rpc error: code = NotFound desc = could not find container \"ff414533e9e06829a9ced1c10f5d63b91898946617117bd35341939ac68badf2\": container with ID starting with ff414533e9e06829a9ced1c10f5d63b91898946617117bd35341939ac68badf2 not found: ID does not exist" Nov 23 17:36:17 crc kubenswrapper[5050]: I1123 17:36:17.584806 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08b05709-1a06-4f20-8eb1-ca35d26f9d47" path="/var/lib/kubelet/pods/08b05709-1a06-4f20-8eb1-ca35d26f9d47/volumes" Nov 23 17:36:24 crc kubenswrapper[5050]: I1123 17:36:24.027529 5050 generic.go:334] "Generic (PLEG): container finished" podID="b8fcd70b-4806-4f9f-9499-61e843f9918f" containerID="8386c34f4e8f764a9d7ba00e903042d23f9c5efff6c216e48bbbed78eeecd3ae" exitCode=0 Nov 23 17:36:24 crc kubenswrapper[5050]: I1123 17:36:24.027603 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-497tw/must-gather-n292h" event={"ID":"b8fcd70b-4806-4f9f-9499-61e843f9918f","Type":"ContainerDied","Data":"8386c34f4e8f764a9d7ba00e903042d23f9c5efff6c216e48bbbed78eeecd3ae"} Nov 23 17:36:24 crc kubenswrapper[5050]: I1123 17:36:24.030946 5050 scope.go:117] "RemoveContainer" containerID="8386c34f4e8f764a9d7ba00e903042d23f9c5efff6c216e48bbbed78eeecd3ae" Nov 23 17:36:24 crc kubenswrapper[5050]: I1123 17:36:24.256369 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-497tw_must-gather-n292h_b8fcd70b-4806-4f9f-9499-61e843f9918f/gather/0.log" Nov 23 17:36:27 crc kubenswrapper[5050]: I1123 17:36:27.549999 5050 scope.go:117] "RemoveContainer" containerID="094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c" Nov 23 17:36:27 crc kubenswrapper[5050]: E1123 17:36:27.551219 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:36:34 crc kubenswrapper[5050]: I1123 17:36:34.293465 5050 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-497tw/must-gather-n292h"] Nov 23 17:36:34 crc kubenswrapper[5050]: I1123 17:36:34.294664 5050 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-497tw/must-gather-n292h" podUID="b8fcd70b-4806-4f9f-9499-61e843f9918f" containerName="copy" containerID="cri-o://51eb186ddb748786a91b4a38286b2942d8ba90f12328e1998757355713643df5" gracePeriod=2 Nov 23 17:36:34 crc kubenswrapper[5050]: I1123 17:36:34.308153 5050 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-497tw/must-gather-n292h"] Nov 23 17:36:34 crc kubenswrapper[5050]: I1123 17:36:34.924372 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-497tw_must-gather-n292h_b8fcd70b-4806-4f9f-9499-61e843f9918f/copy/0.log" Nov 23 17:36:34 crc kubenswrapper[5050]: I1123 17:36:34.925207 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-497tw/must-gather-n292h" Nov 23 17:36:35 crc kubenswrapper[5050]: I1123 17:36:35.042384 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b8fcd70b-4806-4f9f-9499-61e843f9918f-must-gather-output\") pod \"b8fcd70b-4806-4f9f-9499-61e843f9918f\" (UID: \"b8fcd70b-4806-4f9f-9499-61e843f9918f\") " Nov 23 17:36:35 crc kubenswrapper[5050]: I1123 17:36:35.042614 5050 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fbfkl\" (UniqueName: \"kubernetes.io/projected/b8fcd70b-4806-4f9f-9499-61e843f9918f-kube-api-access-fbfkl\") pod \"b8fcd70b-4806-4f9f-9499-61e843f9918f\" (UID: \"b8fcd70b-4806-4f9f-9499-61e843f9918f\") " Nov 23 17:36:35 crc kubenswrapper[5050]: I1123 17:36:35.067375 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8fcd70b-4806-4f9f-9499-61e843f9918f-kube-api-access-fbfkl" (OuterVolumeSpecName: "kube-api-access-fbfkl") pod "b8fcd70b-4806-4f9f-9499-61e843f9918f" (UID: "b8fcd70b-4806-4f9f-9499-61e843f9918f"). InnerVolumeSpecName "kube-api-access-fbfkl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 23 17:36:35 crc kubenswrapper[5050]: I1123 17:36:35.151515 5050 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fbfkl\" (UniqueName: \"kubernetes.io/projected/b8fcd70b-4806-4f9f-9499-61e843f9918f-kube-api-access-fbfkl\") on node \"crc\" DevicePath \"\"" Nov 23 17:36:35 crc kubenswrapper[5050]: I1123 17:36:35.174289 5050 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-497tw_must-gather-n292h_b8fcd70b-4806-4f9f-9499-61e843f9918f/copy/0.log" Nov 23 17:36:35 crc kubenswrapper[5050]: I1123 17:36:35.174875 5050 generic.go:334] "Generic (PLEG): container finished" podID="b8fcd70b-4806-4f9f-9499-61e843f9918f" containerID="51eb186ddb748786a91b4a38286b2942d8ba90f12328e1998757355713643df5" exitCode=143 Nov 23 17:36:35 crc kubenswrapper[5050]: I1123 17:36:35.174954 5050 scope.go:117] "RemoveContainer" containerID="51eb186ddb748786a91b4a38286b2942d8ba90f12328e1998757355713643df5" Nov 23 17:36:35 crc kubenswrapper[5050]: I1123 17:36:35.175253 5050 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-497tw/must-gather-n292h" Nov 23 17:36:35 crc kubenswrapper[5050]: I1123 17:36:35.208506 5050 scope.go:117] "RemoveContainer" containerID="8386c34f4e8f764a9d7ba00e903042d23f9c5efff6c216e48bbbed78eeecd3ae" Nov 23 17:36:35 crc kubenswrapper[5050]: I1123 17:36:35.261159 5050 scope.go:117] "RemoveContainer" containerID="51eb186ddb748786a91b4a38286b2942d8ba90f12328e1998757355713643df5" Nov 23 17:36:35 crc kubenswrapper[5050]: I1123 17:36:35.263472 5050 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8fcd70b-4806-4f9f-9499-61e843f9918f-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "b8fcd70b-4806-4f9f-9499-61e843f9918f" (UID: "b8fcd70b-4806-4f9f-9499-61e843f9918f"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 23 17:36:35 crc kubenswrapper[5050]: E1123 17:36:35.264071 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"51eb186ddb748786a91b4a38286b2942d8ba90f12328e1998757355713643df5\": container with ID starting with 51eb186ddb748786a91b4a38286b2942d8ba90f12328e1998757355713643df5 not found: ID does not exist" containerID="51eb186ddb748786a91b4a38286b2942d8ba90f12328e1998757355713643df5" Nov 23 17:36:35 crc kubenswrapper[5050]: I1123 17:36:35.264125 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51eb186ddb748786a91b4a38286b2942d8ba90f12328e1998757355713643df5"} err="failed to get container status \"51eb186ddb748786a91b4a38286b2942d8ba90f12328e1998757355713643df5\": rpc error: code = NotFound desc = could not find container \"51eb186ddb748786a91b4a38286b2942d8ba90f12328e1998757355713643df5\": container with ID starting with 51eb186ddb748786a91b4a38286b2942d8ba90f12328e1998757355713643df5 not found: ID does not exist" Nov 23 17:36:35 crc kubenswrapper[5050]: I1123 17:36:35.264156 5050 scope.go:117] "RemoveContainer" containerID="8386c34f4e8f764a9d7ba00e903042d23f9c5efff6c216e48bbbed78eeecd3ae" Nov 23 17:36:35 crc kubenswrapper[5050]: E1123 17:36:35.264550 5050 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8386c34f4e8f764a9d7ba00e903042d23f9c5efff6c216e48bbbed78eeecd3ae\": container with ID starting with 8386c34f4e8f764a9d7ba00e903042d23f9c5efff6c216e48bbbed78eeecd3ae not found: ID does not exist" containerID="8386c34f4e8f764a9d7ba00e903042d23f9c5efff6c216e48bbbed78eeecd3ae" Nov 23 17:36:35 crc kubenswrapper[5050]: I1123 17:36:35.264568 5050 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8386c34f4e8f764a9d7ba00e903042d23f9c5efff6c216e48bbbed78eeecd3ae"} err="failed to get container status \"8386c34f4e8f764a9d7ba00e903042d23f9c5efff6c216e48bbbed78eeecd3ae\": rpc error: code = NotFound desc = could not find container \"8386c34f4e8f764a9d7ba00e903042d23f9c5efff6c216e48bbbed78eeecd3ae\": container with ID starting with 8386c34f4e8f764a9d7ba00e903042d23f9c5efff6c216e48bbbed78eeecd3ae not found: ID does not exist" Nov 23 17:36:35 crc kubenswrapper[5050]: I1123 17:36:35.357770 5050 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b8fcd70b-4806-4f9f-9499-61e843f9918f-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 23 17:36:35 crc kubenswrapper[5050]: I1123 17:36:35.566208 5050 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8fcd70b-4806-4f9f-9499-61e843f9918f" path="/var/lib/kubelet/pods/b8fcd70b-4806-4f9f-9499-61e843f9918f/volumes" Nov 23 17:36:38 crc kubenswrapper[5050]: I1123 17:36:38.548760 5050 scope.go:117] "RemoveContainer" containerID="094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c" Nov 23 17:36:38 crc kubenswrapper[5050]: E1123 17:36:38.549892 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:36:51 crc kubenswrapper[5050]: I1123 17:36:51.551403 5050 scope.go:117] "RemoveContainer" containerID="094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c" Nov 23 17:36:51 crc kubenswrapper[5050]: E1123 17:36:51.552536 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:37:02 crc kubenswrapper[5050]: I1123 17:37:02.548876 5050 scope.go:117] "RemoveContainer" containerID="094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c" Nov 23 17:37:02 crc kubenswrapper[5050]: E1123 17:37:02.549980 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:37:14 crc kubenswrapper[5050]: I1123 17:37:14.550790 5050 scope.go:117] "RemoveContainer" containerID="094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c" Nov 23 17:37:14 crc kubenswrapper[5050]: E1123 17:37:14.553621 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:37:26 crc kubenswrapper[5050]: I1123 17:37:26.549744 5050 scope.go:117] "RemoveContainer" containerID="094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c" Nov 23 17:37:26 crc kubenswrapper[5050]: E1123 17:37:26.551039 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:37:41 crc kubenswrapper[5050]: I1123 17:37:41.550183 5050 scope.go:117] "RemoveContainer" containerID="094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c" Nov 23 17:37:41 crc kubenswrapper[5050]: E1123 17:37:41.551812 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:37:55 crc kubenswrapper[5050]: I1123 17:37:55.569093 5050 scope.go:117] "RemoveContainer" containerID="094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c" Nov 23 17:37:55 crc kubenswrapper[5050]: E1123 17:37:55.571118 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:38:10 crc kubenswrapper[5050]: I1123 17:38:10.549840 5050 scope.go:117] "RemoveContainer" containerID="094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c" Nov 23 17:38:10 crc kubenswrapper[5050]: E1123 17:38:10.551358 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:38:22 crc kubenswrapper[5050]: I1123 17:38:22.549911 5050 scope.go:117] "RemoveContainer" containerID="094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c" Nov 23 17:38:22 crc kubenswrapper[5050]: E1123 17:38:22.551067 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:38:34 crc kubenswrapper[5050]: I1123 17:38:34.550537 5050 scope.go:117] "RemoveContainer" containerID="094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c" Nov 23 17:38:34 crc kubenswrapper[5050]: E1123 17:38:34.552087 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:38:49 crc kubenswrapper[5050]: I1123 17:38:49.549850 5050 scope.go:117] "RemoveContainer" containerID="094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c" Nov 23 17:38:49 crc kubenswrapper[5050]: E1123 17:38:49.550838 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:39:00 crc kubenswrapper[5050]: I1123 17:39:00.550030 5050 scope.go:117] "RemoveContainer" containerID="094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c" Nov 23 17:39:00 crc kubenswrapper[5050]: E1123 17:39:00.551654 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:39:14 crc kubenswrapper[5050]: I1123 17:39:14.549068 5050 scope.go:117] "RemoveContainer" containerID="094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c" Nov 23 17:39:14 crc kubenswrapper[5050]: E1123 17:39:14.550246 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:39:25 crc kubenswrapper[5050]: I1123 17:39:25.563224 5050 scope.go:117] "RemoveContainer" containerID="094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c" Nov 23 17:39:25 crc kubenswrapper[5050]: E1123 17:39:25.566065 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:39:38 crc kubenswrapper[5050]: I1123 17:39:38.549979 5050 scope.go:117] "RemoveContainer" containerID="094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c" Nov 23 17:39:38 crc kubenswrapper[5050]: E1123 17:39:38.552471 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:39:52 crc kubenswrapper[5050]: I1123 17:39:52.549270 5050 scope.go:117] "RemoveContainer" containerID="094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c" Nov 23 17:39:52 crc kubenswrapper[5050]: E1123 17:39:52.550306 5050 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-hlrlq_openshift-machine-config-operator(1d998909-9470-47ef-87e8-b34f0473682f)\"" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" podUID="1d998909-9470-47ef-87e8-b34f0473682f" Nov 23 17:40:07 crc kubenswrapper[5050]: I1123 17:40:07.549925 5050 scope.go:117] "RemoveContainer" containerID="094e7d5a208023d03b696c393552ca70524a11cddf16b3f1347e40ec9e667a1c" Nov 23 17:40:08 crc kubenswrapper[5050]: I1123 17:40:08.326675 5050 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-hlrlq" event={"ID":"1d998909-9470-47ef-87e8-b34f0473682f","Type":"ContainerStarted","Data":"57c72d87b5e90b9446f5f9109ac0b09d7013453d3bfeab71129d68eef2d252cb"} var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515110643435024447 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015110643435017364 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015110616121016477 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015110616121015447 5ustar corecore